Compare commits
424 Commits
v0.33.6
...
experiment
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
230474b620 | ||
|
|
cf09912280 | ||
|
|
cd317a1910 | ||
|
|
11a168442d | ||
|
|
e8d99369bc | ||
|
|
921469383e | ||
|
|
ccbf6bb222 | ||
|
|
c68d510564 | ||
|
|
ce1b393682 | ||
|
|
78ba0e7ab8 | ||
|
|
416c671474 | ||
|
|
31425d82a3 | ||
|
|
678ad155a2 | ||
|
|
47e26f5a4d | ||
|
|
d102e19e47 | ||
|
|
80cac86b2c | ||
|
|
0c05da2e2e | ||
|
|
828f18bd8b | ||
|
|
a267c2e3ed | ||
|
|
884a561447 | ||
|
|
f5faf6bc14 | ||
|
|
10cdf519aa | ||
|
|
65b793c5a1 | ||
|
|
cc2cf2da97 | ||
|
|
f6cbef6332 | ||
|
|
4285c818ec | ||
|
|
ceca3b2f30 | ||
|
|
9548dd9586 | ||
|
|
0bb273db07 | ||
|
|
3da9781c98 | ||
|
|
d75db3df59 | ||
|
|
ab4526a153 | ||
|
|
8b1affe7d5 | ||
|
|
835779f7fb | ||
|
|
df758e155d | ||
|
|
a51288e5d6 | ||
|
|
b5d92d4d46 | ||
|
|
4f8bb633c7 | ||
|
|
bf648c37e7 | ||
|
|
4b60c969d8 | ||
|
|
0c4dc6fd76 | ||
|
|
c1efcd7c6a | ||
|
|
83a5f459aa | ||
|
|
0869566ad3 | ||
|
|
924c82ca16 | ||
|
|
5d02704822 | ||
|
|
9ca1215582 | ||
|
|
d86826277d | ||
|
|
bca3b91c2d | ||
|
|
db5a1c059a | ||
|
|
dc59ad5334 | ||
|
|
d44dea0223 | ||
|
|
4f93abd62d | ||
|
|
30dd27afff | ||
|
|
3cecf5340d | ||
|
|
9bce065a53 | ||
|
|
d3fa6194f7 | ||
|
|
0f3f0a64bf | ||
|
|
91d96759c9 | ||
|
|
7b22421a7b | ||
|
|
abaa93c158 | ||
|
|
c70809a275 | ||
|
|
5ebed18692 | ||
|
|
94896d7ffe | ||
|
|
06c3d8050f | ||
|
|
b1a22b24ab | ||
|
|
9417986f77 | ||
|
|
0a1fc52971 | ||
|
|
de6223836e | ||
|
|
2b075fb03a | ||
|
|
264cb14402 | ||
|
|
b3708830b8 | ||
|
|
c8ba79327b | ||
|
|
2904d133f3 | ||
|
|
e62f7f17b3 | ||
|
|
0f5e51f726 | ||
|
|
f1087106cf | ||
|
|
efdcbbe46b | ||
|
|
5a63589e80 | ||
|
|
bc80b3f454 | ||
|
|
0467384d2f | ||
|
|
90d713b8c6 | ||
|
|
76cd7de108 | ||
|
|
b86d05a279 | ||
|
|
cb7a6b2379 | ||
|
|
efb9343c8c | ||
|
|
00f12e00f8 | ||
|
|
b199534518 | ||
|
|
1cc6671ec4 | ||
|
|
350f654e7b | ||
|
|
50e328d1e7 | ||
|
|
f05d97e283 | ||
|
|
54aec35867 | ||
|
|
552f090f62 | ||
|
|
642505abc3 | ||
|
|
3149d55b7d | ||
|
|
c68aab1536 | ||
|
|
1b21e771d0 | ||
|
|
62d683161e | ||
|
|
b3dd6fa981 | ||
|
|
073d400b84 | ||
|
|
907e6da5be | ||
|
|
d0ebe82871 | ||
|
|
aa98e38896 | ||
|
|
a8c9faa9a2 | ||
|
|
a8d41c6aff | ||
|
|
d1e7b9c44c | ||
|
|
1729ba1650 | ||
|
|
4ecb8b7de8 | ||
|
|
0f8591a5a8 | ||
|
|
94c7fadc98 | ||
|
|
9b827c40ca | ||
|
|
60f128a401 | ||
|
|
e3758c8c92 | ||
|
|
916efc8249 | ||
|
|
f79f454485 | ||
|
|
a2d8bff0dc | ||
|
|
0f6ec6d1ae | ||
|
|
e615e95590 | ||
|
|
67c1924899 | ||
|
|
086e1a8f3e | ||
|
|
3bade14ec0 | ||
|
|
2e223a8c22 | ||
|
|
0794504bce | ||
|
|
0dce9e1379 | ||
|
|
e0934acdbb | ||
|
|
12941f5f8b | ||
|
|
2f0f911c52 | ||
|
|
4eacf0f200 | ||
|
|
64fa557f80 | ||
|
|
563f9b61b1 | ||
|
|
169851b412 | ||
|
|
00fdfbc213 | ||
|
|
4f0fa7a120 | ||
|
|
39f419868f | ||
|
|
88e5ffe6fe | ||
|
|
a163b748a5 | ||
|
|
ad88460e0d | ||
|
|
664b192a3b | ||
|
|
f4f223aa44 | ||
|
|
b2399f6281 | ||
|
|
4cd1c9f2ff | ||
|
|
7fbfea062e | ||
|
|
56ca578f77 | ||
|
|
bf33eed609 | ||
|
|
c4b3698a80 | ||
|
|
3b0a85fc8e | ||
|
|
2b791865c4 | ||
|
|
db24d7f15e | ||
|
|
5caf79b312 | ||
|
|
54bbe71867 | ||
|
|
193cadc988 | ||
|
|
03e634dad4 | ||
|
|
77d70a7646 | ||
|
|
474810d9d5 | ||
|
|
6cb2e2448a | ||
|
|
68c0ce62d8 | ||
|
|
e6babc27d5 | ||
|
|
3a263bf3ae | ||
|
|
1b9f253e20 | ||
|
|
4cda300058 | ||
|
|
0f7d1c9906 | ||
|
|
e1948175ee | ||
|
|
7f7b2cd3de | ||
|
|
379376e5e6 | ||
|
|
871c4abfec | ||
|
|
cb53ce9d64 | ||
|
|
e5da60d75d | ||
|
|
c85e063302 | ||
|
|
95ad128851 | ||
|
|
fcbd488e9a | ||
|
|
b94a43d5b5 | ||
|
|
e5481b22aa | ||
|
|
c99b6c66bf | ||
|
|
f8fe98812b | ||
|
|
f7f487e14c | ||
|
|
edd2d82809 | ||
|
|
46f98a6a29 | ||
|
|
fc33e81323 | ||
|
|
77d3b5772f | ||
|
|
a5468eaadf | ||
|
|
81880beff4 | ||
|
|
4acb6fe8a3 | ||
|
|
9283987f7e | ||
|
|
54def42c19 | ||
|
|
ea69a84bbb | ||
|
|
663d9db8e7 | ||
|
|
07126e43a4 | ||
|
|
9ec2186586 | ||
|
|
9532caf6ef | ||
|
|
83d9ca7122 | ||
|
|
480d98c91f | ||
|
|
ab96ee29c9 | ||
|
|
0f4fb537ce | ||
|
|
3e438bfec8 | ||
|
|
56a05583ae | ||
|
|
94a49e0636 | ||
|
|
9f72c209ee | ||
|
|
78e8d4c3a5 | ||
|
|
3ad359e5be | ||
|
|
7328039117 | ||
|
|
3904cbf307 | ||
|
|
7e07d25ed6 | ||
|
|
ef771cc4c2 | ||
|
|
b313b9b009 | ||
|
|
47a9ba435d | ||
|
|
e0b9d5f0af | ||
|
|
dacbeb2e03 | ||
|
|
810715f79a | ||
|
|
cb23aa4c42 | ||
|
|
c573794b22 | ||
|
|
e564306e31 | ||
|
|
a67d8ace9b | ||
|
|
43c3f0b02f | ||
|
|
3e704822be | ||
|
|
329d18b39c | ||
|
|
6105c6101f | ||
|
|
b3f6dddad2 | ||
|
|
5c445114d3 | ||
|
|
1fe6bbb555 | ||
|
|
047ac0cbba | ||
|
|
6340141300 | ||
|
|
abd9914683 | ||
|
|
026cd91ac8 | ||
|
|
f749607c91 | ||
|
|
e7a16c6210 | ||
|
|
c7273c11bc | ||
|
|
5110f4e425 | ||
|
|
04277d0ed8 | ||
|
|
3e8b02c939 | ||
|
|
7aea00069c | ||
|
|
911db96658 | ||
|
|
058934b1cf | ||
|
|
a6f421e812 | ||
|
|
e1728dfcbe | ||
|
|
5c3d6ea9c7 | ||
|
|
3f357583ce | ||
|
|
9c2f99a3b7 | ||
|
|
08760b0d9a | ||
|
|
b85fe45f46 | ||
|
|
e5b52d0f94 | ||
|
|
81d4f51524 | ||
|
|
593389a077 | ||
|
|
eba48c0f16 | ||
|
|
f62c597d14 | ||
|
|
67f7b9cb50 | ||
|
|
056f099126 | ||
|
|
47a9da28ca | ||
|
|
cc325c7069 | ||
|
|
e404ba9aac | ||
|
|
b69216f768 | ||
|
|
6a4d01ee94 | ||
|
|
3c580c2b47 | ||
|
|
1b4bf232b9 | ||
|
|
9fafdfa97d | ||
|
|
f9d6c677ea | ||
|
|
084046456e | ||
|
|
0d31109ed5 | ||
|
|
74e7617083 | ||
|
|
1d17fc52ae | ||
|
|
a36b0ec195 | ||
|
|
6190abe8da | ||
|
|
c69026a758 | ||
|
|
dba84fa69c | ||
|
|
88c5ffec33 | ||
|
|
2baebace6a | ||
|
|
c00f4d237b | ||
|
|
49a044aa5f | ||
|
|
03287c350e | ||
|
|
c632bc8654 | ||
|
|
926da4dda8 | ||
|
|
e77f24d80a | ||
|
|
8c2b8d7f0b | ||
|
|
c7d0f34a3c | ||
|
|
52e3d3813b | ||
|
|
0fd2321629 | ||
|
|
f1bfe6167a | ||
|
|
4e726783ea | ||
|
|
a5aea15a6b | ||
|
|
c8f2c19991 | ||
|
|
1519572961 | ||
|
|
3a5d8d5891 | ||
|
|
f6a0a02a62 | ||
|
|
1af16acd4c | ||
|
|
df33c164de | ||
|
|
d6a7797dd1 | ||
|
|
6ec9d8ba0a | ||
|
|
c6584f4b5f | ||
|
|
80736fd8ed | ||
|
|
fc0f13dd03 | ||
|
|
10405153c2 | ||
|
|
017eb9d17a | ||
|
|
4a28d3d36f | ||
|
|
15133477ee | ||
|
|
fc954960e9 | ||
|
|
947c7443eb | ||
|
|
6bd856caa2 | ||
|
|
e238013c44 | ||
|
|
a94967bc5f | ||
|
|
b8a5b0097c | ||
|
|
a8ed93a4b5 | ||
|
|
442734ff9e | ||
|
|
762a0982aa | ||
|
|
f293d124b6 | ||
|
|
24bc15eab4 | ||
|
|
4e50fe3edb | ||
|
|
f726f2dc6c | ||
|
|
03c11032c3 | ||
|
|
f9ce1b4eb0 | ||
|
|
c187638ee9 | ||
|
|
06bc8d2fe5 | ||
|
|
fb216a22db | ||
|
|
1ccafb0c5e | ||
|
|
dd99db846d | ||
|
|
5119818e9d | ||
|
|
22a2004428 | ||
|
|
7ede650956 | ||
|
|
164f8e4843 | ||
|
|
7bb651de6a | ||
|
|
e3586f7c06 | ||
|
|
a2bfb778c8 | ||
|
|
a45f2c3a00 | ||
|
|
381d2cfdf0 | ||
|
|
67a1e315cc | ||
|
|
8c0ff0287a | ||
|
|
306361b31b | ||
|
|
bddfad253a | ||
|
|
86ef9760a7 | ||
|
|
83e72bb2f0 | ||
|
|
8ddd0f273c | ||
|
|
e97d93948d | ||
|
|
7e561b5c1a | ||
|
|
49840f5ab2 | ||
|
|
3cbe8331e6 | ||
|
|
395276b405 | ||
|
|
b8d9e108be | ||
|
|
20733857ab | ||
|
|
bdc27d6716 | ||
|
|
d34657e1f2 | ||
|
|
d3464ce708 | ||
|
|
9eb1a79100 | ||
|
|
dc045ef202 | ||
|
|
2418e7811a | ||
|
|
f4a4dbcad1 | ||
|
|
0c905ee015 | ||
|
|
6982320572 | ||
|
|
495975e231 | ||
|
|
8a1817f0d2 | ||
|
|
497444f1fd | ||
|
|
8164f6daf3 | ||
|
|
f7199e8734 | ||
|
|
ed82043efb | ||
|
|
2dadc092b8 | ||
|
|
c6dbd216e6 | ||
|
|
d9f3db5081 | ||
|
|
4917ff5523 | ||
|
|
17d585753f | ||
|
|
d86794325f | ||
|
|
158d6c75b6 | ||
|
|
537d0b7b36 | ||
|
|
f9d34a763c | ||
|
|
dfcad5fad5 | ||
|
|
3099d96dba | ||
|
|
149c4f1765 | ||
|
|
fd99787162 | ||
|
|
8935ec5a93 | ||
|
|
81e2813948 | ||
|
|
52e6e815be | ||
|
|
01afcfc4e9 | ||
|
|
93a8603904 | ||
|
|
69e857853f | ||
|
|
6e0c66f651 | ||
|
|
495a9d06bb | ||
|
|
c69faf8c4a | ||
|
|
7c570bff74 | ||
|
|
9693625e55 | ||
|
|
2a4ea3baa8 | ||
|
|
3e39783d5d | ||
|
|
ae61ade891 | ||
|
|
faa462ef79 | ||
|
|
23b6a0537f | ||
|
|
5b68f29f48 | ||
|
|
8f646f2d04 | ||
|
|
07340cdaca | ||
|
|
82fa31799c | ||
|
|
2a7b3439de | ||
|
|
b4c3bc1734 | ||
|
|
219606a6ed | ||
|
|
bc74925c5b | ||
|
|
f8825748dd | ||
|
|
a40802bcbc | ||
|
|
3801b8aa03 | ||
|
|
16a31c6fce | ||
|
|
83caead95a | ||
|
|
42a394caa2 | ||
|
|
8550a7e9c2 | ||
|
|
4f7064f6b5 | ||
|
|
f0cede5556 | ||
|
|
54ac18e832 | ||
|
|
66a4ca1d28 | ||
|
|
72788cf9c1 | ||
|
|
edc427a351 | ||
|
|
fe87890b18 | ||
|
|
f6a3067868 | ||
|
|
15d513f16f | ||
|
|
174be586e5 | ||
|
|
b5eee511c7 | ||
|
|
93d174bcc4 | ||
|
|
5e42c45c96 | ||
|
|
982edca380 | ||
|
|
234611f347 | ||
|
|
14b3da63a3 | ||
|
|
9f0791b7bd | ||
|
|
9f500cb39e | ||
|
|
8d14598e90 | ||
|
|
ca0b052307 | ||
|
|
cac0253799 | ||
|
|
0abb205b47 | ||
|
|
69e51c7ba4 | ||
|
|
8ae64b270f | ||
|
|
cf1e2000f6 | ||
|
|
6b8c07abc2 | ||
|
|
0bc4627a73 | ||
|
|
53ace904b2 |
@@ -23,99 +23,106 @@ jobs:
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
sytestpy2:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
sytestpy2postgres:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
sytestpy2merged:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
|
||||
path: /logs
|
||||
sytestpy2postgresmerged:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
|
||||
sytestpy3:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
sytestpy3postgres:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
sytestpy3merged:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy3
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
sytestpy3postgresmerged:
|
||||
machine: true
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: ~/project/logs
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: logs
|
||||
path: /logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
@@ -16,7 +16,7 @@ then
|
||||
GITBASE="develop"
|
||||
else
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=`curl -q https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
fi
|
||||
|
||||
# Show what we are before
|
||||
@@ -31,4 +31,4 @@ git fetch -u origin $GITBASE
|
||||
git merge --no-edit origin/$GITBASE
|
||||
|
||||
# Show what we are after.
|
||||
git show -s
|
||||
git show -s
|
||||
|
||||
12
.coveragerc
Normal file
12
.coveragerc
Normal file
@@ -0,0 +1,12 @@
|
||||
[run]
|
||||
branch = True
|
||||
parallel = True
|
||||
source = synapse
|
||||
|
||||
[paths]
|
||||
source=
|
||||
coverage
|
||||
|
||||
[report]
|
||||
precision = 2
|
||||
ignore_errors = True
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||
@@ -11,38 +17,50 @@ the necessary data to fix your issue.
|
||||
You can also preview your report before submitting it. You may remove sections
|
||||
that aren't relevant to your particular case.
|
||||
|
||||
Text between <!-- and --> marks will be invisible in the report.
|
||||
Text between <!-- and --> marks will be invisible in the report.
|
||||
|
||||
-->
|
||||
|
||||
### Description
|
||||
|
||||
Describe here the problem that you are experiencing, or the feature you are requesting.
|
||||
<!-- Describe here the problem that you are experiencing -->
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
- For bugs, list the steps
|
||||
- list the steps
|
||||
- that reproduce the bug
|
||||
- using hyphens as bullet points
|
||||
|
||||
<!--
|
||||
Describe how what happens differs from what you expected.
|
||||
|
||||
<!-- If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those (please be careful to remove any personal or private data). Please surround them with
|
||||
``` (three backticks, on a line on their own), so that they are formatted legibly. -->
|
||||
``` (three backticks, on a line on their own), so that they are formatted legibly.
|
||||
-->
|
||||
|
||||
### Version information
|
||||
|
||||
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||
|
||||
- **Homeserver**: Was this issue identified on matrix.org or another homeserver?
|
||||
<!-- Was this issue identified on matrix.org or another homeserver? -->
|
||||
- **Homeserver**:
|
||||
|
||||
If not matrix.org:
|
||||
- **Version**: What version of Synapse is running? <!--
|
||||
|
||||
<!--
|
||||
What version of Synapse is running?
|
||||
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||
your own homeserver domain):
|
||||
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
-->
|
||||
- **Install method**: package manager/git clone/pip
|
||||
- **Platform**: Tell us about the environment in which your homeserver is operating
|
||||
- distro, hardware, if it's running in a vm/container, etc.
|
||||
- **Version**:
|
||||
|
||||
- **Install method**:
|
||||
<!-- examples: package manager/git clone/pip -->
|
||||
|
||||
- **Platform**:
|
||||
<!--
|
||||
Tell us about the environment in which your homeserver is operating
|
||||
distro, hardware, if it's running in a vm/container, etc.
|
||||
-->
|
||||
9
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
vendored
Normal file
9
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Description:**
|
||||
|
||||
<!-- Describe here the feature you are requesting. -->
|
||||
9
.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
vendored
Normal file
9
.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
name: Support request
|
||||
about: I need support for Synapse
|
||||
|
||||
---
|
||||
|
||||
# Please ask for support in [**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org)
|
||||
|
||||
## Don't file an issue as a support request.
|
||||
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
### Pull Request Checklist
|
||||
|
||||
<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](CONTRIBUTING.rst#changelog)
|
||||
* [ ] Pull request includes a [sign off](CONTRIBUTING.rst#sign-off)
|
||||
3
.github/SUPPORT.md
vendored
Normal file
3
.github/SUPPORT.md
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org) is the official support room for Matrix, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html
|
||||
|
||||
It can also be access via IRC bridge at irc://irc.freenode.net/matrix or on the web here: https://webchat.freenode.net/?channels=matrix
|
||||
51
.travis.yml
51
.travis.yml
@@ -1,27 +1,45 @@
|
||||
sudo: false
|
||||
language: python
|
||||
|
||||
# tell travis to cache ~/.cache/pip
|
||||
cache: pip
|
||||
cache:
|
||||
directories:
|
||||
# we only bother to cache the wheels; parts of the http cache get
|
||||
# invalidated every build (because they get served with a max-age of 600
|
||||
# seconds), which means that we end up re-uploading the whole cache for
|
||||
# every build, which is time-consuming In any case, it's not obvious that
|
||||
# downloading the cache from S3 would be much faster than downloading the
|
||||
# originals from pypi.
|
||||
#
|
||||
- $HOME/.cache/pip/wheels
|
||||
|
||||
before_script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
# don't clone the whole repo history, one commit will do
|
||||
git:
|
||||
depth: 1
|
||||
|
||||
# only build branches we care about (PRs are built seperately)
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
- /^release-v/
|
||||
|
||||
# When running the tox environments that call Twisted Trial, we can pass the -j
|
||||
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
|
||||
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=packaging
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=pep8
|
||||
- python: 3.6
|
||||
env: TOX_ENV="pep8,check_isort"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27
|
||||
env: TOX_ENV=py27 TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-old
|
||||
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||
@@ -29,21 +47,24 @@ matrix:
|
||||
- postgresql
|
||||
|
||||
- python: 3.5
|
||||
env: TOX_ENV=py35
|
||||
env: TOX_ENV=py35 TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36
|
||||
env: TOX_ENV=py36 TRIAL_FLAGS="-j 2"
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
|
||||
services:
|
||||
- postgresql
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=check_isort
|
||||
|
||||
- python: 3.6
|
||||
- # we only need to check for the newsfragment if it's a PR build
|
||||
if: type = pull_request
|
||||
python: 3.6
|
||||
env: TOX_ENV=check-newsfragment
|
||||
script:
|
||||
- git remote set-branches --add origin develop
|
||||
- git fetch origin develop
|
||||
- tox -e $TOX_ENV
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
||||
179
CHANGES.md
179
CHANGES.md
@@ -1,3 +1,182 @@
|
||||
Synapse 0.33.9 (2018-11-19)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 0.33.9rc1 (2018-11-14)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Include flags to optionally add `m.login.terms` to the registration flow when consent tracking is enabled. ([\#4004](https://github.com/matrix-org/synapse/issues/4004), [\#4133](https://github.com/matrix-org/synapse/issues/4133), [\#4142](https://github.com/matrix-org/synapse/issues/4142), [\#4184](https://github.com/matrix-org/synapse/issues/4184))
|
||||
- Support for replacing rooms with new ones ([\#4091](https://github.com/matrix-org/synapse/issues/4091), [\#4099](https://github.com/matrix-org/synapse/issues/4099), [\#4100](https://github.com/matrix-org/synapse/issues/4100), [\#4101](https://github.com/matrix-org/synapse/issues/4101))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix exceptions when using the email mailer on Python 3. ([\#4095](https://github.com/matrix-org/synapse/issues/4095))
|
||||
- Fix e2e key backup with more than 9 backup versions ([\#4113](https://github.com/matrix-org/synapse/issues/4113))
|
||||
- Searches that request profile info now no longer fail with a 500. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
|
||||
- fix return code of empty key backups ([\#4123](https://github.com/matrix-org/synapse/issues/4123))
|
||||
- If the typing stream ID goes backwards (as on a worker when the master restarts), the worker's typing handler will no longer erroneously report rooms containing new typing events. ([\#4127](https://github.com/matrix-org/synapse/issues/4127))
|
||||
- Fix table lock of device_lists_remote_cache which could freeze the application ([\#4132](https://github.com/matrix-org/synapse/issues/4132))
|
||||
- Fix exception when using state res v2 algorithm ([\#4135](https://github.com/matrix-org/synapse/issues/4135))
|
||||
- Generating the user consent URI no longer fails on Python 3. ([\#4140](https://github.com/matrix-org/synapse/issues/4140), [\#4163](https://github.com/matrix-org/synapse/issues/4163))
|
||||
- Loading URL previews from the DB cache on Postgres will no longer cause Unicode type errors when responding to the request, and URL previews will no longer fail if the remote server returns a Content-Type header with the chartype in quotes. ([\#4157](https://github.com/matrix-org/synapse/issues/4157))
|
||||
- The hash_password script now works on Python 3. ([\#4161](https://github.com/matrix-org/synapse/issues/4161))
|
||||
- Fix noop checks when updating device keys, reducing spurious device list update notifications. ([\#4164](https://github.com/matrix-org/synapse/issues/4164))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The disused and un-specced identicon generator has been removed. ([\#4106](https://github.com/matrix-org/synapse/issues/4106))
|
||||
- The obsolete and non-functional /pull federation endpoint has been removed. ([\#4118](https://github.com/matrix-org/synapse/issues/4118))
|
||||
- The deprecated v1 key exchange endpoints have been removed. ([\#4119](https://github.com/matrix-org/synapse/issues/4119))
|
||||
- Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. ([\#4120](https://github.com/matrix-org/synapse/issues/4120))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Fix build of Docker image with docker-compose ([\#3778](https://github.com/matrix-org/synapse/issues/3778))
|
||||
- Delete unreferenced state groups during history purge ([\#4006](https://github.com/matrix-org/synapse/issues/4006))
|
||||
- The "Received rdata" log messages on workers is now logged at DEBUG, not INFO. ([\#4108](https://github.com/matrix-org/synapse/issues/4108))
|
||||
- Reduce replication traffic for device lists ([\#4109](https://github.com/matrix-org/synapse/issues/4109))
|
||||
- Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character ([\#4110](https://github.com/matrix-org/synapse/issues/4110))
|
||||
- Log some bits about room creation ([\#4121](https://github.com/matrix-org/synapse/issues/4121))
|
||||
- Fix `tox` failure on old systems ([\#4124](https://github.com/matrix-org/synapse/issues/4124))
|
||||
- Add STATE_V2_TEST room version ([\#4128](https://github.com/matrix-org/synapse/issues/4128))
|
||||
- Clean up event accesses and tests ([\#4137](https://github.com/matrix-org/synapse/issues/4137))
|
||||
- The default logging config will now set an explicit log file encoding of UTF-8. ([\#4138](https://github.com/matrix-org/synapse/issues/4138))
|
||||
- Add helpers functions for getting prev and auth events of an event ([\#4139](https://github.com/matrix-org/synapse/issues/4139))
|
||||
- Add some tests for the HTTP pusher. ([\#4149](https://github.com/matrix-org/synapse/issues/4149))
|
||||
- add purge_history.sh and purge_remote_media.sh scripts to contrib/ ([\#4155](https://github.com/matrix-org/synapse/issues/4155))
|
||||
- HTTP tests have been refactored to contain less boilerplate. ([\#4156](https://github.com/matrix-org/synapse/issues/4156))
|
||||
- Drop incoming events from federation for unknown rooms ([\#4165](https://github.com/matrix-org/synapse/issues/4165))
|
||||
|
||||
|
||||
Synapse 0.33.8 (2018-11-01)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 0.33.8rc2 (2018-10-31)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Searches that request profile info now no longer fail with a 500. Fixes
|
||||
a regression in 0.33.8rc1. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
|
||||
|
||||
|
||||
Synapse 0.33.8rc1 (2018-10-29)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Servers with auto-join rooms will now automatically create those rooms when the first user registers ([\#3975](https://github.com/matrix-org/synapse/issues/3975))
|
||||
- Add config option to control alias creation ([\#4051](https://github.com/matrix-org/synapse/issues/4051))
|
||||
- The register_new_matrix_user script is now ported to Python 3. ([\#4085](https://github.com/matrix-org/synapse/issues/4085))
|
||||
- Configure Docker image to listen on both ipv4 and ipv6. ([\#4089](https://github.com/matrix-org/synapse/issues/4089))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix HTTP error response codes for federated group requests. ([\#3969](https://github.com/matrix-org/synapse/issues/3969))
|
||||
- Fix issue where Python 3 users couldn't paginate /publicRooms ([\#4046](https://github.com/matrix-org/synapse/issues/4046))
|
||||
- Fix URL previewing to work in Python 3.7 ([\#4050](https://github.com/matrix-org/synapse/issues/4050))
|
||||
- synctl will use the right python executable to run worker processes ([\#4057](https://github.com/matrix-org/synapse/issues/4057))
|
||||
- Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. ([\#4060](https://github.com/matrix-org/synapse/issues/4060), [\#4067](https://github.com/matrix-org/synapse/issues/4067))
|
||||
- Fix some metrics being racy and causing exceptions when polled by Prometheus. ([\#4061](https://github.com/matrix-org/synapse/issues/4061))
|
||||
- Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. ([\#4068](https://github.com/matrix-org/synapse/issues/4068))
|
||||
- Correctly account for cpu usage by background threads ([\#4074](https://github.com/matrix-org/synapse/issues/4074))
|
||||
- Fix race condition where config defined reserved users were not being added to
|
||||
the monthly active user list prior to the homeserver reactor firing up ([\#4081](https://github.com/matrix-org/synapse/issues/4081))
|
||||
- Fix bug which prevented backslashes being used in event field filters ([\#4083](https://github.com/matrix-org/synapse/issues/4083))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add information about the [matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy) playbook ([\#3698](https://github.com/matrix-org/synapse/issues/3698))
|
||||
- Add initial implementation of new state resolution algorithm ([\#3786](https://github.com/matrix-org/synapse/issues/3786))
|
||||
- Reduce database load when fetching state groups ([\#4011](https://github.com/matrix-org/synapse/issues/4011))
|
||||
- Various cleanups in the federation client code ([\#4031](https://github.com/matrix-org/synapse/issues/4031))
|
||||
- Run the CircleCI builds in docker containers ([\#4041](https://github.com/matrix-org/synapse/issues/4041))
|
||||
- Only colourise synctl output when attached to tty ([\#4049](https://github.com/matrix-org/synapse/issues/4049))
|
||||
- Refactor room alias creation code ([\#4063](https://github.com/matrix-org/synapse/issues/4063))
|
||||
- Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. ([\#4068](https://github.com/matrix-org/synapse/issues/4068))
|
||||
- The README now contains example for the Caddy web server. Contributed by steamp0rt. ([\#4072](https://github.com/matrix-org/synapse/issues/4072))
|
||||
- Add psutil as an explicit dependency ([\#4073](https://github.com/matrix-org/synapse/issues/4073))
|
||||
- Clean up threading and logcontexts in pushers ([\#4075](https://github.com/matrix-org/synapse/issues/4075))
|
||||
- Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings ([\#4076](https://github.com/matrix-org/synapse/issues/4076))
|
||||
- Give some more things logcontexts ([\#4077](https://github.com/matrix-org/synapse/issues/4077))
|
||||
- Clean up some bits of code which were flagged by the linter ([\#4082](https://github.com/matrix-org/synapse/issues/4082))
|
||||
|
||||
|
||||
Synapse 0.33.7 (2018-10-18)
|
||||
===========================
|
||||
|
||||
**Warning**: This release removes the example email notification templates from
|
||||
`res/templates` (they are now internal to the python package). This should only
|
||||
affect you if you (a) deploy your Synapse instance from a git checkout or a
|
||||
github snapshot URL, and (b) have email notifications enabled.
|
||||
|
||||
If you have email notifications enabled, you should ensure that
|
||||
`email.template_dir` is either configured to point at a directory where you
|
||||
have installed customised templates, or leave it unset to use the default
|
||||
templates.
|
||||
|
||||
Synapse 0.33.7rc2 (2018-10-17)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Ship the example email templates as part of the package ([\#4052](https://github.com/matrix-org/synapse/issues/4052))
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug which made get_missing_events return too few events ([\#4045](https://github.com/matrix-org/synapse/issues/4045))
|
||||
|
||||
|
||||
Synapse 0.33.7rc1 (2018-10-15)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add support for end-to-end key backup (MSC1687) ([\#4019](https://github.com/matrix-org/synapse/issues/4019))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug in event persistence logic which caused 'NoneType is not iterable' ([\#3995](https://github.com/matrix-org/synapse/issues/3995))
|
||||
- Fix exception in background metrics collection ([\#3996](https://github.com/matrix-org/synapse/issues/3996))
|
||||
- Fix exception handling in fetching remote profiles ([\#3997](https://github.com/matrix-org/synapse/issues/3997))
|
||||
- Fix handling of rejected threepid invites ([\#3999](https://github.com/matrix-org/synapse/issues/3999))
|
||||
- Workers now start on Python 3. ([\#4027](https://github.com/matrix-org/synapse/issues/4027))
|
||||
- Synapse now starts on Python 3.7. ([\#4033](https://github.com/matrix-org/synapse/issues/4033))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Log exceptions in looping calls ([\#4008](https://github.com/matrix-org/synapse/issues/4008))
|
||||
- Optimisation for serving federation requests ([\#4017](https://github.com/matrix-org/synapse/issues/4017))
|
||||
- Add metric to count number of non-empty sync responses ([\#4022](https://github.com/matrix-org/synapse/issues/4022))
|
||||
|
||||
|
||||
Synapse 0.33.6 (2018-10-04)
|
||||
===========================
|
||||
|
||||
|
||||
@@ -12,12 +12,12 @@ recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.py
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include res *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
@@ -34,6 +34,7 @@ prune .github
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune .circleci
|
||||
prune .coveragerc
|
||||
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
28
README.rst
28
README.rst
@@ -142,7 +142,7 @@ Installing prerequisites on openSUSE::
|
||||
Installing prerequisites on OpenBSD::
|
||||
|
||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
libxslt
|
||||
libxslt jpeg
|
||||
|
||||
To install the Synapse homeserver run::
|
||||
|
||||
@@ -174,6 +174,12 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
|
||||
For more details, see
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
Configuring Synapse
|
||||
-------------------
|
||||
|
||||
@@ -651,7 +657,8 @@ Using a reverse proxy with Synapse
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
@@ -682,7 +689,15 @@ so an example nginx configuration might look like::
|
||||
}
|
||||
}
|
||||
|
||||
and an example apache configuration may look like::
|
||||
an example Caddy configuration might look like::
|
||||
|
||||
matrix.example.com {
|
||||
proxy /_matrix http://localhost:8008 {
|
||||
transparent
|
||||
}
|
||||
}
|
||||
|
||||
and an example Apache configuration might look like::
|
||||
|
||||
<VirtualHost *:443>
|
||||
SSLEngine on
|
||||
@@ -714,9 +729,10 @@ port:
|
||||
|
||||
.. __: `key_management`_
|
||||
|
||||
* Synapse does not currently support SNI on the federation protocol
|
||||
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_), which
|
||||
means that using name-based virtual hosting is unreliable.
|
||||
* Until v0.33.3, Synapse did not support SNI on the federation port
|
||||
(`bug #1491 <https://github.com/matrix-org/synapse/issues/1491>`_). This bug
|
||||
is now fixed, but means that federating with older servers can be unreliable
|
||||
when using name-based virtual hosting.
|
||||
|
||||
Furthermore, a number of the normal reasons for using a reverse-proxy do not
|
||||
apply:
|
||||
|
||||
13
UPGRADE.rst
13
UPGRADE.rst
@@ -48,6 +48,19 @@ returned by the Client-Server API:
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v0.33.7
|
||||
====================
|
||||
|
||||
This release removes the example email notification templates from
|
||||
``res/templates`` (they are now internal to the python package). This should
|
||||
only affect you if you (a) deploy your Synapse instance from a git checkout or
|
||||
a github snapshot URL, and (b) have email notifications enabled.
|
||||
|
||||
If you have email notifications enabled, you should ensure that
|
||||
``email.template_dir`` is either configured to point at a directory where you
|
||||
have installed customised templates, or leave it unset to use the default
|
||||
templates.
|
||||
|
||||
Upgrading to v0.27.3
|
||||
====================
|
||||
|
||||
|
||||
1
changelog.d/3830.feature
Normal file
1
changelog.d/3830.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add option to track MAU stats (but not limit people)
|
||||
1
changelog.d/4176.bugfix
Normal file
1
changelog.d/4176.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
The media repository now no longer fails to decode UTF-8 filenames when downloading remote media.
|
||||
1
changelog.d/4180.misc
Normal file
1
changelog.d/4180.misc
Normal file
@@ -0,0 +1 @@
|
||||
A coveragerc file, as well as the py36-coverage tox target, have been added.
|
||||
1
changelog.d/4182.misc
Normal file
1
changelog.d/4182.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a GitHub pull request template and add multiple issue templates
|
||||
1
changelog.d/4183.bugfix
Normal file
1
changelog.d/4183.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
URL previews now correctly decode non-UTF-8 text if the header contains a `<meta http-equiv="Content-Type"` header.
|
||||
1
changelog.d/4188.misc
Normal file
1
changelog.d/4188.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update README to reflect the fact that #1491 is fixed
|
||||
1
changelog.d/4192.bugfix
Normal file
1
changelog.d/4192.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix an issue where public consent URLs had two slashes.
|
||||
1
changelog.d/4193.misc
Normal file
1
changelog.d/4193.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing `jpeg` package prerequisite for OpenBSD in README.
|
||||
1
changelog.d/4197.bugfix
Normal file
1
changelog.d/4197.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fallback auth now accepts the session parameter on Python 3.
|
||||
1
changelog.d/4200.misc
Normal file
1
changelog.d/4200.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a note saying you need to manually reclaim disk space after using the Purge History API
|
||||
1
changelog.d/4204.misc
Normal file
1
changelog.d/4204.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix logcontext leaks in EmailPusher and in tests
|
||||
1
changelog.d/4207.bugfix
Normal file
1
changelog.d/4207.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Remove riot.im from the list of trusted Identity Servers in the default configuration
|
||||
@@ -6,9 +6,11 @@ version: '3'
|
||||
services:
|
||||
|
||||
synapse:
|
||||
build: ../..
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: docker/Dockerfile
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since snyapse does not retry to connect to the database, restart upon
|
||||
# Since synapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
@@ -47,4 +49,4 @@ services:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
# .. or store them on some high performance storage for better results
|
||||
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
||||
# - /path/to/ssd/storage:/var/lib/postgresql/data
|
||||
|
||||
16
contrib/purge_api/README.md
Normal file
16
contrib/purge_api/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
Purge history API examples
|
||||
==========================
|
||||
|
||||
# `purge_history.sh`
|
||||
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
|
||||
purge all messages in a list of rooms up to a certain event. You can select a
|
||||
timeframe or a number of messages that you want to keep in the room.
|
||||
|
||||
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
||||
the script.
|
||||
|
||||
# `purge_remote_media.sh`
|
||||
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
|
||||
purge all old cached remote media.
|
||||
141
contrib/purge_api/purge_history.sh
Normal file
141
contrib/purge_api/purge_history.sh
Normal file
@@ -0,0 +1,141 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
#
|
||||
# It will purge all messages in a list of rooms up to a cetrain event
|
||||
|
||||
###################################################################################################
|
||||
# define your domain and admin user
|
||||
###################################################################################################
|
||||
# add this user as admin in your home server:
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
###################################################################################################
|
||||
#choose the rooms to prune old messages from (add a free comment at the end)
|
||||
###################################################################################################
|
||||
# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
|
||||
ROOMS_ARRAY=(
|
||||
'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
|
||||
'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
|
||||
)
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# you can select all the rooms that are not encrypted and loop over the result:
|
||||
# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
|
||||
# or
|
||||
# select all rooms with at least 100 members:
|
||||
# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
|
||||
# GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
|
||||
|
||||
###################################################################################################
|
||||
# evaluate the EVENT_ID before which should be pruned
|
||||
###################################################################################################
|
||||
# choose a time before which the messages should be pruned:
|
||||
TIME='12 months ago'
|
||||
# ALTERNATIVELY:
|
||||
# a certain time:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# prune all messages that are older than 1000 messages ago:
|
||||
# LAST_MESSAGES=1000
|
||||
# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# select the EVENT_ID manually:
|
||||
#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
|
||||
|
||||
###################################################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###################################################################################################
|
||||
# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###################################################################################################
|
||||
# get an access token
|
||||
###################################################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
AUTH="Authorization: Bearer $TOKEN"
|
||||
|
||||
###################################################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###################################################################################################
|
||||
# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###################################################################################################
|
||||
# finally start pruning the room:
|
||||
###################################################################################################
|
||||
POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
|
||||
|
||||
for ROOM in "${ROOMS_ARRAY[@]}"; do
|
||||
echo "########################################### $(date) ################# "
|
||||
echo "pruning room: $ROOM ..."
|
||||
ROOM=${ROOM%#*}
|
||||
#set -x
|
||||
echo "check for alias in db..."
|
||||
# for postgres:
|
||||
sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
|
||||
echo "get event..."
|
||||
# for postgres:
|
||||
EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
|
||||
if [ "$EVENT_ID" == "" ]; then
|
||||
echo "no event $TIME"
|
||||
else
|
||||
echo "event: $EVENT_ID"
|
||||
SLEEP=2
|
||||
set -x
|
||||
# call purge
|
||||
OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
|
||||
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
|
||||
if [ "$PURGE_ID" == "" ]; then
|
||||
# probably the history purge is already in progress for $ROOM
|
||||
: "continuing with next room"
|
||||
else
|
||||
while : ; do
|
||||
# get status of purge and sleep longer each time if still active
|
||||
sleep $SLEEP
|
||||
STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
|
||||
: "$ROOM --> Status: $STATUS"
|
||||
[[ "$STATUS" == "active" ]] || break
|
||||
SLEEP=$((SLEEP + 1))
|
||||
done
|
||||
fi
|
||||
set +x
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# additionally
|
||||
###################################################################################################
|
||||
# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
|
||||
# This can take a very long time (hours) and the client have to be stopped while you do so:
|
||||
# $ synctl stop
|
||||
# $ sqlite3 -line homeserver.db "vacuum;"
|
||||
# $ synctl start
|
||||
|
||||
# This could be set, so you don't need to prune every time after deleting some rows:
|
||||
# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
|
||||
# be cautious, it could make the database somewhat slow if there are a lot of deletions
|
||||
|
||||
exit
|
||||
54
contrib/purge_api/purge_remote_media.sh
Normal file
54
contrib/purge_api/purge_remote_media.sh
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
# choose a time before which the messages should be pruned:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
TIME='12 months ago'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###############################################################################
|
||||
# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###############################################################################
|
||||
# get an access token
|
||||
###############################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
|
||||
###############################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###############################################################################
|
||||
# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###############################################################################
|
||||
# optional check size before
|
||||
###############################################################################
|
||||
# echo calculate used storage before ...
|
||||
# du -shc ../.synapse/media_store/*
|
||||
|
||||
###############################################################################
|
||||
# finally start pruning media:
|
||||
###############################################################################
|
||||
set -x # for debugging the generated string
|
||||
curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||
@@ -21,7 +21,7 @@ listeners:
|
||||
{% if not SYNAPSE_NO_TLS %}
|
||||
-
|
||||
port: 8448
|
||||
bind_addresses: ['0.0.0.0']
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
tls: true
|
||||
x_forwarded: false
|
||||
@@ -34,7 +34,7 @@ listeners:
|
||||
|
||||
- port: 8008
|
||||
tls: false
|
||||
bind_addresses: ['0.0.0.0']
|
||||
bind_addresses: ['::']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
@@ -150,10 +150,12 @@ enable_group_creation: true
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
## Metrics ###
|
||||
|
||||
@@ -211,7 +213,9 @@ email:
|
||||
require_transport_security: False
|
||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||
app_name: Matrix
|
||||
template_dir: res/templates
|
||||
# if template_dir is unset, uses the example templates that are part of
|
||||
# the Synapse distribution.
|
||||
#template_dir: res/templates
|
||||
notif_template_html: notif_mail.html
|
||||
notif_template_text: notif_mail.txt
|
||||
notif_for_new_users: True
|
||||
|
||||
@@ -61,3 +61,11 @@ the following:
|
||||
}
|
||||
|
||||
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||
|
||||
Reclaim disk space (Postgres)
|
||||
-----------------------------
|
||||
|
||||
To reclaim the disk space and return it to the operating system, you need to run
|
||||
`VACUUM FULL;` on the database.
|
||||
|
||||
https://www.postgresql.org/docs/current/sql-vacuum.html
|
||||
|
||||
@@ -31,7 +31,7 @@ Note that the templates must be stored under a name giving the language of the
|
||||
template - currently this must always be `en` (for "English");
|
||||
internationalisation support is intended for the future.
|
||||
|
||||
The template for the policy itself should be versioned and named according to
|
||||
The template for the policy itself should be versioned and named according to
|
||||
the version: for example `1.0.html`. The version of the policy which the user
|
||||
has agreed to is stored in the database.
|
||||
|
||||
@@ -85,6 +85,37 @@ Once this is complete, and the server has been restarted, try visiting
|
||||
an error "Missing string query parameter 'u'". It is now possible to manually
|
||||
construct URIs where users can give their consent.
|
||||
|
||||
### Enabling consent tracking at registration
|
||||
|
||||
1. Add the following to your configuration:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
require_at_registration: true
|
||||
policy_name: "Privacy Policy" # or whatever you'd like to call the policy
|
||||
```
|
||||
|
||||
2. In your consent templates, make use of the `public_version` variable to
|
||||
see if an unauthenticated user is viewing the page. This is typically
|
||||
wrapped around the form that would be used to actually agree to the document:
|
||||
|
||||
```
|
||||
{% if not public_version %}
|
||||
<!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
```
|
||||
|
||||
3. Restart Synapse to apply the changes.
|
||||
|
||||
Visiting `https://<server>/_matrix/consent` should now give you a view of the privacy
|
||||
document. This is what users will be able to see when registering for accounts.
|
||||
|
||||
### Constructing the consent URI
|
||||
|
||||
It may be useful to manually construct the "consent URI" for a given user - for
|
||||
@@ -106,6 +137,12 @@ query parameters:
|
||||
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
|
||||
|
||||
|
||||
Note that not providing a `u` parameter will be interpreted as wanting to view
|
||||
the document from an unauthenticated perspective, such as prior to registration.
|
||||
Therefore, the `h` parameter is not required in this scenario. To enable this
|
||||
behaviour, set `require_at_registration` to `true` in your `user_consent` config.
|
||||
|
||||
|
||||
Sending users a server notice asking them to agree to the policy
|
||||
----------------------------------------------------------------
|
||||
|
||||
|
||||
@@ -12,12 +12,15 @@
|
||||
<p>
|
||||
All your base are belong to us.
|
||||
</p>
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% if not public_version %}
|
||||
<!-- The variables used here are only provided when the 'u' param is given to the homeserver -->
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -14,22 +14,3 @@ fi
|
||||
|
||||
# set up the virtualenv
|
||||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
|
||||
# cryptography 2.2 requires setuptools >= 18.5.
|
||||
#
|
||||
# older versions of virtualenv (?) give us a virtualenv with the same version
|
||||
# of setuptools as is installed on the system python (and tox runs virtualenv
|
||||
# under python3, so we get the version of setuptools that is installed on that).
|
||||
#
|
||||
# anyway, make sure that we have a recent enough setuptools.
|
||||
$TOX_BIN/pip install 'setuptools>=18.5'
|
||||
|
||||
# we also need a semi-recent version of pip, because old ones fail to install
|
||||
# the "enum34" dependency of cryptography.
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
||||
@@ -1,21 +1,20 @@
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.api.auth import Auth
|
||||
|
||||
from mock import Mock
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import itertools
|
||||
import json
|
||||
import sys
|
||||
|
||||
from mock import Mock
|
||||
|
||||
from synapse.api.auth import Auth
|
||||
from synapse.events import FrozenEvent
|
||||
|
||||
|
||||
def check_auth(auth, auth_chain, events):
|
||||
auth_chain.sort(key=lambda e: e.depth)
|
||||
|
||||
auth_map = {
|
||||
e.event_id: e
|
||||
for e in auth_chain
|
||||
}
|
||||
auth_map = {e.event_id: e for e in auth_chain}
|
||||
|
||||
create_events = {}
|
||||
for e in auth_chain:
|
||||
@@ -25,31 +24,26 @@ def check_auth(auth, auth_chain, events):
|
||||
for e in itertools.chain(auth_chain, events):
|
||||
auth_events_list = [auth_map[i] for i, _ in e.auth_events]
|
||||
|
||||
auth_events = {
|
||||
(e.type, e.state_key): e
|
||||
for e in auth_events_list
|
||||
}
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_list}
|
||||
|
||||
auth_events[("m.room.create", "")] = create_events[e.room_id]
|
||||
|
||||
try:
|
||||
auth.check(e, auth_events=auth_events)
|
||||
except Exception as ex:
|
||||
print "Failed:", e.event_id, e.type, e.state_key
|
||||
print "Auth_events:", auth_events
|
||||
print ex
|
||||
print json.dumps(e.get_dict(), sort_keys=True, indent=4)
|
||||
print("Failed:", e.event_id, e.type, e.state_key)
|
||||
print("Auth_events:", auth_events)
|
||||
print(ex)
|
||||
print(json.dumps(e.get_dict(), sort_keys=True, indent=4))
|
||||
# raise
|
||||
print "Success:", e.event_id, e.type, e.state_key
|
||||
print("Success:", e.event_id, e.type, e.state_key)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
'json',
|
||||
nargs='?',
|
||||
type=argparse.FileType('r'),
|
||||
default=sys.stdin,
|
||||
'json', nargs='?', type=argparse.FileType('r'), default=sys.stdin
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
from synapse.crypto.event_signing import *
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
import argparse
|
||||
import hashlib
|
||||
import sys
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.crypto.event_signing import (
|
||||
check_event_content_hash,
|
||||
compute_event_reference_hash,
|
||||
)
|
||||
|
||||
|
||||
class dictobj(dict):
|
||||
@@ -24,27 +29,26 @@ class dictobj(dict):
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
|
||||
default=sys.stdin)
|
||||
parser.add_argument(
|
||||
"input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin
|
||||
)
|
||||
args = parser.parse_args()
|
||||
logging.basicConfig()
|
||||
|
||||
event_json = dictobj(json.load(args.input_json))
|
||||
|
||||
algorithms = {
|
||||
"sha256": hashlib.sha256,
|
||||
}
|
||||
algorithms = {"sha256": hashlib.sha256}
|
||||
|
||||
for alg_name in event_json.hashes:
|
||||
if check_event_content_hash(event_json, algorithms[alg_name]):
|
||||
print "PASS content hash %s" % (alg_name,)
|
||||
print("PASS content hash %s" % (alg_name,))
|
||||
else:
|
||||
print "FAIL content hash %s" % (alg_name,)
|
||||
print("FAIL content hash %s" % (alg_name,))
|
||||
|
||||
for algorithm in algorithms.values():
|
||||
name, h_bytes = compute_event_reference_hash(event_json, algorithm)
|
||||
print "Reference hash %s: %s" % (name, encode_base64(h_bytes))
|
||||
print("Reference hash %s: %s" % (name, encode_base64(h_bytes)))
|
||||
|
||||
if __name__=="__main__":
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
from signedjson.sign import verify_signed_json
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import urllib2
|
||||
|
||||
import dns.resolver
|
||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||
from signedjson.sign import verify_signed_json
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
import urllib2
|
||||
import json
|
||||
import sys
|
||||
import dns.resolver
|
||||
import pprint
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
def get_targets(server_name):
|
||||
if ":" in server_name:
|
||||
@@ -23,6 +23,7 @@ def get_targets(server_name):
|
||||
except dns.resolver.NXDOMAIN:
|
||||
yield (server_name, 8448)
|
||||
|
||||
|
||||
def get_server_keys(server_name, target, port):
|
||||
url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||
keys = json.load(urllib2.urlopen(url))
|
||||
@@ -33,12 +34,14 @@ def get_server_keys(server_name, target, port):
|
||||
verify_keys[key_id] = verify_key
|
||||
return verify_keys
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("signature_name")
|
||||
parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
|
||||
default=sys.stdin)
|
||||
parser.add_argument(
|
||||
"input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
logging.basicConfig()
|
||||
@@ -48,24 +51,23 @@ def main():
|
||||
for target, port in get_targets(server_name):
|
||||
try:
|
||||
keys = get_server_keys(server_name, target, port)
|
||||
print "Using keys from https://%s:%s/_matrix/key/v1" % (target, port)
|
||||
print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
|
||||
write_signing_keys(sys.stdout, keys.values())
|
||||
break
|
||||
except:
|
||||
except Exception:
|
||||
logging.exception("Error talking to %s:%s", target, port)
|
||||
|
||||
json_to_check = json.load(args.input_json)
|
||||
print "Checking JSON:"
|
||||
print("Checking JSON:")
|
||||
for key_id in json_to_check["signatures"][args.signature_name]:
|
||||
try:
|
||||
key = keys[key_id]
|
||||
verify_signed_json(json_to_check, args.signature_name, key)
|
||||
print "PASS %s" % (key_id,)
|
||||
except:
|
||||
print("PASS %s" % (key_id,))
|
||||
except Exception:
|
||||
logging.exception("Check for key %s failed" % (key_id,))
|
||||
print "FAIL %s" % (key_id,)
|
||||
print("FAIL %s" % (key_id,))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
import psycopg2
|
||||
import yaml
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
from unpaddedbase64 import encode_base64
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import read_signing_keys
|
||||
from signedjson.sign import sign_json
|
||||
from canonicaljson import encode_canonical_json
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
if six.PY2:
|
||||
db_type = six.moves.builtins.buffer
|
||||
else:
|
||||
db_type = memoryview
|
||||
|
||||
|
||||
def select_v1_keys(connection):
|
||||
@@ -39,7 +47,9 @@ def select_v2_json(connection):
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, key_json in rows:
|
||||
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
||||
results.setdefault(server_name, {})[key_id] = json.loads(
|
||||
str(key_json).decode("utf-8")
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
@@ -47,10 +57,7 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||
return {
|
||||
"old_verify_keys": {},
|
||||
"server_name": server_name,
|
||||
"verify_keys": {
|
||||
key_id: {"key": key}
|
||||
for key_id, key in keys.items()
|
||||
},
|
||||
"verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
|
||||
"valid_until_ts": valid_until,
|
||||
"tls_fingerprints": [fingerprint(certificate)],
|
||||
}
|
||||
@@ -65,7 +72,7 @@ def rows_v2(server, json):
|
||||
valid_until = json["valid_until_ts"]
|
||||
key_json = encode_canonical_json(json)
|
||||
for key_id in json["verify_keys"]:
|
||||
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
||||
yield (server, key_id, "-", valid_until, valid_until, db_type(key_json))
|
||||
|
||||
|
||||
def main():
|
||||
@@ -87,7 +94,7 @@ def main():
|
||||
|
||||
result = {}
|
||||
for server in keys:
|
||||
if not server in json:
|
||||
if server not in json:
|
||||
v2_json = convert_v1_to_v2(
|
||||
server, valid_until, keys[server], certificates[server]
|
||||
)
|
||||
@@ -96,10 +103,7 @@ def main():
|
||||
|
||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||
|
||||
rows = list(
|
||||
row for server, json in result.items()
|
||||
for row in rows_v2(server, json)
|
||||
)
|
||||
rows = list(row for server, json in result.items() for row in rows_v2(server, json))
|
||||
|
||||
cursor = connection.cursor()
|
||||
cursor.executemany(
|
||||
@@ -107,7 +111,7 @@ def main():
|
||||
" server_name, key_id, from_server,"
|
||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||
rows
|
||||
rows,
|
||||
)
|
||||
connection.commit()
|
||||
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import ast
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
class DefinitionVisitor(ast.NodeVisitor):
|
||||
def __init__(self):
|
||||
super(DefinitionVisitor, self).__init__()
|
||||
@@ -42,15 +50,18 @@ def non_empty(defs):
|
||||
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
||||
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
||||
result = {}
|
||||
if functions: result['def'] = functions
|
||||
if classes: result['class'] = classes
|
||||
if functions:
|
||||
result['def'] = functions
|
||||
if classes:
|
||||
result['class'] = classes
|
||||
names = defs['names']
|
||||
uses = []
|
||||
for name in names.get('Load', ()):
|
||||
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
||||
uses.append(name)
|
||||
uses.extend(defs['attrs'])
|
||||
if uses: result['uses'] = uses
|
||||
if uses:
|
||||
result['uses'] = uses
|
||||
result['names'] = names
|
||||
result['attrs'] = defs['attrs']
|
||||
return result
|
||||
@@ -95,7 +106,6 @@ def used_names(prefix, item, defs, names):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys, os, argparse, re
|
||||
|
||||
parser = argparse.ArgumentParser(description='Find definitions.')
|
||||
parser.add_argument(
|
||||
@@ -105,24 +115,28 @@ if __name__ == '__main__':
|
||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pattern", action="append", metavar="REGEXP",
|
||||
help="Search for a pattern"
|
||||
"--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
|
||||
)
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
"directories",
|
||||
nargs='+',
|
||||
metavar="DIR",
|
||||
help="Directories to search for definitions",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referrers", default=0, type=int,
|
||||
help="Include referrers up to the given depth"
|
||||
"--referrers",
|
||||
default=0,
|
||||
type=int,
|
||||
help="Include referrers up to the given depth",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--referred", default=0, type=int,
|
||||
help="Include referred down to the given depth"
|
||||
"--referred",
|
||||
default=0,
|
||||
type=int,
|
||||
help="Include referred down to the given depth",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--format", default="yaml",
|
||||
help="Output format, one of 'yaml' or 'dot'"
|
||||
"--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -162,7 +176,7 @@ if __name__ == '__main__':
|
||||
for used_by in entry.get("used", ()):
|
||||
referrers.add(used_by)
|
||||
for name, definition in names.items():
|
||||
if not name in referrers:
|
||||
if name not in referrers:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
@@ -176,7 +190,7 @@ if __name__ == '__main__':
|
||||
for uses in entry.get("uses", ()):
|
||||
referred.add(uses)
|
||||
for name, definition in names.items():
|
||||
if not name in referred:
|
||||
if name not in referred:
|
||||
continue
|
||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||
continue
|
||||
@@ -185,12 +199,12 @@ if __name__ == '__main__':
|
||||
if args.format == 'yaml':
|
||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||
elif args.format == 'dot':
|
||||
print "digraph {"
|
||||
print("digraph {")
|
||||
for name, entry in result.items():
|
||||
print name
|
||||
print(name)
|
||||
for used_by in entry.get("used", ()):
|
||||
if used_by in result:
|
||||
print used_by, "->", name
|
||||
print "}"
|
||||
print(used_by, "->", name)
|
||||
print("}")
|
||||
else:
|
||||
raise ValueError("Unknown format %r" % (args.format))
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
#!/usr/bin/env python2
|
||||
|
||||
import pymacaroons
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
import pymacaroons
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
||||
sys.exit(1)
|
||||
@@ -11,14 +14,14 @@ macaroon_string = sys.argv[1]
|
||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
||||
|
||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
||||
print macaroon.inspect()
|
||||
print(macaroon.inspect())
|
||||
|
||||
print ""
|
||||
print("")
|
||||
|
||||
verifier = pymacaroons.Verifier()
|
||||
verifier.satisfy_general(lambda c: True)
|
||||
try:
|
||||
verifier.verify(macaroon, key)
|
||||
print "Signature is correct"
|
||||
print("Signature is correct")
|
||||
except Exception as e:
|
||||
print str(e)
|
||||
print(str(e))
|
||||
|
||||
@@ -18,21 +18,21 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import json
|
||||
import sys
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
import nacl.signing
|
||||
import json
|
||||
import base64
|
||||
import requests
|
||||
import sys
|
||||
|
||||
from requests.adapters import HTTPAdapter
|
||||
import srvlookup
|
||||
import yaml
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
# uncomment the following to enable debug logging of http requests
|
||||
#from httplib import HTTPConnection
|
||||
#HTTPConnection.debuglevel = 1
|
||||
# from httplib import HTTPConnection
|
||||
# HTTPConnection.debuglevel = 1
|
||||
|
||||
|
||||
def encode_base64(input_bytes):
|
||||
"""Encode bytes as a base64 string without any padding."""
|
||||
@@ -58,15 +58,15 @@ def decode_base64(input_string):
|
||||
|
||||
def encode_canonical_json(value):
|
||||
return json.dumps(
|
||||
value,
|
||||
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
||||
ensure_ascii=False,
|
||||
# Remove unecessary white space.
|
||||
separators=(',',':'),
|
||||
# Sort the keys of dictionaries.
|
||||
sort_keys=True,
|
||||
# Encode the resulting unicode as UTF-8 bytes.
|
||||
).encode("UTF-8")
|
||||
value,
|
||||
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
||||
ensure_ascii=False,
|
||||
# Remove unecessary white space.
|
||||
separators=(',', ':'),
|
||||
# Sort the keys of dictionaries.
|
||||
sort_keys=True,
|
||||
# Encode the resulting unicode as UTF-8 bytes.
|
||||
).encode("UTF-8")
|
||||
|
||||
|
||||
def sign_json(json_object, signing_key, signing_name):
|
||||
@@ -88,6 +88,7 @@ def sign_json(json_object, signing_key, signing_name):
|
||||
|
||||
NACL_ED25519 = "ed25519"
|
||||
|
||||
|
||||
def decode_signing_key_base64(algorithm, version, key_base64):
|
||||
"""Decode a base64 encoded signing key
|
||||
Args:
|
||||
@@ -143,25 +144,25 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
authorization_headers = []
|
||||
|
||||
for key, sig in signed_json["signatures"][origin_name].items():
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
||||
origin_name, key, sig,
|
||||
)
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig)
|
||||
authorization_headers.append(bytes(header))
|
||||
print ("Authorization: %s" % header, file=sys.stderr)
|
||||
print("Authorization: %s" % header, file=sys.stderr)
|
||||
|
||||
dest = "matrix://%s%s" % (destination, path)
|
||||
print ("Requesting %s" % dest, file=sys.stderr)
|
||||
print("Requesting %s" % dest, file=sys.stderr)
|
||||
|
||||
s = requests.Session()
|
||||
s.mount("matrix://", MatrixConnectionAdapter())
|
||||
|
||||
headers = {"Host": destination, "Authorization": authorization_headers[0]}
|
||||
|
||||
if method == "POST":
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
result = s.request(
|
||||
method=method,
|
||||
url=dest,
|
||||
headers={
|
||||
"Host": destination,
|
||||
"Authorization": authorization_headers[0]
|
||||
},
|
||||
headers=headers,
|
||||
verify=False,
|
||||
data=content,
|
||||
)
|
||||
@@ -171,50 +172,50 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description=
|
||||
"Signs and sends a federation request to a matrix homeserver",
|
||||
description="Signs and sends a federation request to a matrix homeserver"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-N", "--server-name",
|
||||
"-N",
|
||||
"--server-name",
|
||||
help="Name to give as the local homeserver. If unspecified, will be "
|
||||
"read from the config file.",
|
||||
"read from the config file.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-k", "--signing-key-path",
|
||||
"-k",
|
||||
"--signing-key-path",
|
||||
help="Path to the file containing the private ed25519 key to sign the "
|
||||
"request with.",
|
||||
"request with.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
"-c",
|
||||
"--config",
|
||||
default="homeserver.yaml",
|
||||
help="Path to server config file. Ignored if --server-name and "
|
||||
"--signing-key-path are both given.",
|
||||
"--signing-key-path are both given.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-d", "--destination",
|
||||
"-d",
|
||||
"--destination",
|
||||
default="matrix.org",
|
||||
help="name of the remote homeserver. We will do SRV lookups and "
|
||||
"connect appropriately.",
|
||||
"connect appropriately.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-X", "--method",
|
||||
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||
"unspecified, POST if it is."
|
||||
"-X",
|
||||
"--method",
|
||||
help="HTTP method to use for the request. Defaults to GET if --body is"
|
||||
"unspecified, POST if it is.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--body",
|
||||
help="Data to send as the body of the HTTP request"
|
||||
)
|
||||
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||
|
||||
parser.add_argument(
|
||||
"path",
|
||||
help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
"path", help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
@@ -227,13 +228,15 @@ def main():
|
||||
|
||||
result = request_json(
|
||||
args.method,
|
||||
args.server_name, key, args.destination,
|
||||
args.server_name,
|
||||
key,
|
||||
args.destination,
|
||||
"/_matrix/federation/v1/" + args.path,
|
||||
content=args.body,
|
||||
)
|
||||
|
||||
json.dump(result, sys.stdout)
|
||||
print ("")
|
||||
print("")
|
||||
|
||||
|
||||
def read_args_from_config(args):
|
||||
@@ -253,7 +256,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
return s, 8448
|
||||
|
||||
if ":" in s:
|
||||
out = s.rsplit(":",1)
|
||||
out = s.rsplit(":", 1)
|
||||
try:
|
||||
port = int(out[1])
|
||||
except ValueError:
|
||||
@@ -263,7 +266,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||
return srv.host, srv.port
|
||||
except:
|
||||
except Exception:
|
||||
return s, 8448
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
@@ -272,10 +275,9 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
(host, port) = self.lookup(parsed.netloc)
|
||||
netloc = "%s:%d" % (host, port)
|
||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||
url = urlunparse((
|
||||
"https", netloc, parsed.path, parsed.params, parsed.query,
|
||||
parsed.fragment,
|
||||
))
|
||||
url = urlunparse(
|
||||
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
|
||||
)
|
||||
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
|
||||
|
||||
|
||||
|
||||
@@ -1,23 +1,31 @@
|
||||
from synapse.storage.pdu import PduStore
|
||||
from synapse.storage.signatures import SignatureStore
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.federation.units import Pdu
|
||||
from synapse.crypto.event_signing import (
|
||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
||||
)
|
||||
from synapse.api.events.utils import prune_pdu
|
||||
from unpaddedbase64 import encode_base64, decode_base64
|
||||
from canonicaljson import encode_canonical_json
|
||||
from __future__ import print_function
|
||||
|
||||
import sqlite3
|
||||
import sys
|
||||
|
||||
from unpaddedbase64 import decode_base64, encode_base64
|
||||
|
||||
from synapse.crypto.event_signing import (
|
||||
add_event_pdu_content_hash,
|
||||
compute_pdu_event_reference_hash,
|
||||
)
|
||||
from synapse.federation.units import Pdu
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.pdu import PduStore
|
||||
from synapse.storage.signatures import SignatureStore
|
||||
|
||||
|
||||
class Store(object):
|
||||
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
|
||||
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
|
||||
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
|
||||
_get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"]
|
||||
_get_pdu_origin_signatures_txn = SignatureStore.__dict__[
|
||||
"_get_pdu_origin_signatures_txn"
|
||||
]
|
||||
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
|
||||
_store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"]
|
||||
_store_pdu_reference_hash_txn = SignatureStore.__dict__[
|
||||
"_store_pdu_reference_hash_txn"
|
||||
]
|
||||
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
|
||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||
|
||||
@@ -26,9 +34,7 @@ store = Store()
|
||||
|
||||
|
||||
def select_pdus(cursor):
|
||||
cursor.execute(
|
||||
"SELECT pdu_id, origin FROM pdus ORDER BY depth ASC"
|
||||
)
|
||||
cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
|
||||
|
||||
ids = cursor.fetchall()
|
||||
|
||||
@@ -41,23 +47,30 @@ def select_pdus(cursor):
|
||||
for pdu in pdus:
|
||||
try:
|
||||
if pdu.prev_pdus:
|
||||
print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
|
||||
print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||
for pdu_id, origin, hashes in pdu.prev_pdus:
|
||||
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
|
||||
hashes[ref_alg] = encode_base64(ref_hsh)
|
||||
store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh)
|
||||
print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
|
||||
store._store_prev_pdu_hash_txn(
|
||||
cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
|
||||
)
|
||||
print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||
pdu = add_event_pdu_content_hash(pdu)
|
||||
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
|
||||
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
|
||||
store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh)
|
||||
store._store_pdu_reference_hash_txn(
|
||||
cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
|
||||
)
|
||||
|
||||
for alg, hsh_base64 in pdu.hashes.items():
|
||||
print alg, hsh_base64
|
||||
store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64))
|
||||
print(alg, hsh_base64)
|
||||
store._store_pdu_content_hash_txn(
|
||||
cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
|
||||
)
|
||||
|
||||
except Exception:
|
||||
print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||
|
||||
except:
|
||||
print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus
|
||||
|
||||
def main():
|
||||
conn = sqlite3.connect(sys.argv[1])
|
||||
@@ -65,5 +78,6 @@ def main():
|
||||
select_pdus(cursor)
|
||||
conn.commit()
|
||||
|
||||
if __name__=='__main__':
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
import ast
|
||||
import argparse
|
||||
import ast
|
||||
import os
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
|
||||
PATTERNS_V1 = []
|
||||
PATTERNS_V2 = []
|
||||
|
||||
RESULT = {
|
||||
"v1": PATTERNS_V1,
|
||||
"v2": PATTERNS_V2,
|
||||
}
|
||||
RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
|
||||
|
||||
|
||||
class CallVisitor(ast.NodeVisitor):
|
||||
def visit_Call(self, node):
|
||||
@@ -21,7 +20,6 @@ class CallVisitor(ast.NodeVisitor):
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
if name == "client_path_patterns":
|
||||
PATTERNS_V1.append(node.args[0].s)
|
||||
elif name == "client_v2_patterns":
|
||||
@@ -42,8 +40,10 @@ def find_patterns_in_file(filepath):
|
||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
||||
|
||||
parser.add_argument(
|
||||
"directories", nargs='+', metavar="DIR",
|
||||
help="Directories to search for definitions"
|
||||
"directories",
|
||||
nargs='+',
|
||||
metavar="DIR",
|
||||
help="Directories to search for definitions",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use DBI;
|
||||
use DBD::SQLite;
|
||||
use JSON;
|
||||
use Getopt::Long;
|
||||
|
||||
my $db; # = "homeserver.db";
|
||||
my $server = "http://localhost:8008";
|
||||
my $size = 320;
|
||||
|
||||
GetOptions("db|d=s", \$db,
|
||||
"server|s=s", \$server,
|
||||
"width|w=i", \$size) or usage();
|
||||
|
||||
usage() unless $db;
|
||||
|
||||
my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr;
|
||||
|
||||
my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr;
|
||||
|
||||
foreach (@$res) {
|
||||
my ($token, $mxid) = ($_->[0], $_->[1]);
|
||||
my ($user_id) = ($mxid =~ m/@(.*):/);
|
||||
my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id);
|
||||
if (!$url || $url =~ /#auto$/) {
|
||||
`curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`;
|
||||
my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`;
|
||||
my $content_uri = from_json($json)->{content_uri};
|
||||
`curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`;
|
||||
}
|
||||
}
|
||||
|
||||
sub usage {
|
||||
die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)";
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
import requests
|
||||
import collections
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
Entry = collections.namedtuple("Entry", "name position rows")
|
||||
|
||||
@@ -30,11 +31,11 @@ def parse_response(content):
|
||||
|
||||
|
||||
def replicate(server, streams):
|
||||
return parse_response(requests.get(
|
||||
server + "/_synapse/replication",
|
||||
verify=False,
|
||||
params=streams
|
||||
).content)
|
||||
return parse_response(
|
||||
requests.get(
|
||||
server + "/_synapse/replication", verify=False, params=streams
|
||||
).content
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
@@ -45,16 +46,16 @@ def main():
|
||||
try:
|
||||
streams = {
|
||||
row.name: row.position
|
||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
||||
for row in replicate(server, {"streams": "-1"})["streams"].rows
|
||||
}
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
except requests.exceptions.ConnectionError:
|
||||
time.sleep(0.1)
|
||||
|
||||
while True:
|
||||
try:
|
||||
results = replicate(server, streams)
|
||||
except:
|
||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
||||
except Exception:
|
||||
sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
|
||||
break
|
||||
for update in results.values():
|
||||
for row in update.rows:
|
||||
@@ -62,6 +63,5 @@ def main():
|
||||
streams[update.name] = update.position
|
||||
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
import getpass
|
||||
import sys
|
||||
import unicodedata
|
||||
|
||||
import bcrypt
|
||||
import getpass
|
||||
|
||||
import yaml
|
||||
|
||||
bcrypt_rounds=12
|
||||
bcrypt_rounds = 12
|
||||
password_pepper = ""
|
||||
|
||||
|
||||
def prompt_for_pass():
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
@@ -25,19 +25,27 @@ def prompt_for_pass():
|
||||
|
||||
return password
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Calculate the hash of a new password, so that passwords"
|
||||
" can be reset")
|
||||
description=(
|
||||
"Calculate the hash of a new password, so that passwords can be reset"
|
||||
)
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
"-p",
|
||||
"--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--config",
|
||||
"-c",
|
||||
"--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
|
||||
help=(
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
@@ -51,5 +59,21 @@ if __name__ == "__main__":
|
||||
if not password:
|
||||
password = prompt_for_pass()
|
||||
|
||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||
# On Python 2, make sure we decode it to Unicode before we normalise it
|
||||
if isinstance(password, bytes):
|
||||
try:
|
||||
password = password.decode(sys.stdin.encoding)
|
||||
except UnicodeDecodeError:
|
||||
print(
|
||||
"ERROR! Your password is not decodable using your terminal encoding (%s)."
|
||||
% (sys.stdin.encoding,)
|
||||
)
|
||||
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
hashed = bcrypt.hashpw(
|
||||
pw.encode('utf8') + password_pepper.encode("utf8"),
|
||||
bcrypt.gensalt(bcrypt_rounds),
|
||||
).decode('ascii')
|
||||
|
||||
print(hashed)
|
||||
|
||||
@@ -36,12 +36,9 @@ from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
import sys
|
||||
|
||||
import os
|
||||
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||
|
||||
@@ -77,24 +74,23 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||
if not os.path.exists(original_file):
|
||||
logger.warn(
|
||||
"Original for %s/%s (%s) does not exist",
|
||||
origin_server, file_id, original_file,
|
||||
origin_server,
|
||||
file_id,
|
||||
original_file,
|
||||
)
|
||||
else:
|
||||
mkdir_and_move(
|
||||
original_file,
|
||||
dest_paths.remote_media_filepath(origin_server, file_id),
|
||||
original_file, dest_paths.remote_media_filepath(origin_server, file_id)
|
||||
)
|
||||
|
||||
# now look for thumbnails
|
||||
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
|
||||
origin_server, file_id,
|
||||
)
|
||||
original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||
if not os.path.exists(original_thumb_dir):
|
||||
return
|
||||
|
||||
mkdir_and_move(
|
||||
original_thumb_dir,
|
||||
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||
dest_paths.remote_media_thumbnail_dir(origin_server, file_id),
|
||||
)
|
||||
|
||||
|
||||
@@ -109,24 +105,16 @@ def mkdir_and_move(original_file, dest_file):
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class = argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v", action='store_true', help='enable debug logging')
|
||||
parser.add_argument(
|
||||
"src_repo",
|
||||
help="Path to source content repo",
|
||||
)
|
||||
parser.add_argument(
|
||||
"dest_repo",
|
||||
help="Path to source content repo",
|
||||
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
parser.add_argument("-v", action='store_true', help='enable debug logging')
|
||||
parser.add_argument("src_repo", help="Path to source content repo")
|
||||
parser.add_argument("dest_repo", help="Path to source content repo")
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
}
|
||||
logging.basicConfig(**logging_config)
|
||||
|
||||
|
||||
@@ -14,197 +14,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import getpass
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import sys
|
||||
import urllib2
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
|
||||
try:
|
||||
if sys.version_info[:3] >= (2, 7, 9):
|
||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
||||
import ssl
|
||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
||||
else:
|
||||
f = urllib2.urlopen(req)
|
||||
body = f.read()
|
||||
f.close()
|
||||
nonce = json.loads(body)["nonce"]
|
||||
except urllib2.HTTPError as e:
|
||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||
if 400 <= e.code < 500:
|
||||
if e.info().type == "application/json":
|
||||
resp = json.load(e)
|
||||
if "error" in resp:
|
||||
print resp["error"]
|
||||
sys.exit(1)
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce)
|
||||
mac.update("\x00")
|
||||
mac.update(user)
|
||||
mac.update("\x00")
|
||||
mac.update(password)
|
||||
mac.update("\x00")
|
||||
mac.update("admin" if admin else "notadmin")
|
||||
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"nonce": nonce,
|
||||
"username": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
server_location = server_location.rstrip("/")
|
||||
|
||||
print "Sending registration request..."
|
||||
|
||||
req = urllib2.Request(
|
||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
||||
data=json.dumps(data),
|
||||
headers={'Content-Type': 'application/json'}
|
||||
)
|
||||
try:
|
||||
if sys.version_info[:3] >= (2, 7, 9):
|
||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
||||
import ssl
|
||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
||||
else:
|
||||
f = urllib2.urlopen(req)
|
||||
f.read()
|
||||
f.close()
|
||||
print "Success."
|
||||
except urllib2.HTTPError as e:
|
||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
||||
if 400 <= e.code < 500:
|
||||
if e.info().type == "application/json":
|
||||
resp = json.load(e)
|
||||
if "error" in resp:
|
||||
print resp["error"]
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
except:
|
||||
default_user = None
|
||||
|
||||
if default_user:
|
||||
user = raw_input("New user localpart [%s]: " % (default_user,))
|
||||
if not user:
|
||||
user = default_user
|
||||
else:
|
||||
user = raw_input("New user localpart: ")
|
||||
|
||||
if not user:
|
||||
print "Invalid user name"
|
||||
sys.exit(1)
|
||||
|
||||
if not password:
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
print "Password cannot be blank."
|
||||
sys.exit(1)
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
print "Passwords do not match"
|
||||
sys.exit(1)
|
||||
|
||||
if admin is None:
|
||||
admin = raw_input("Make admin [no]: ")
|
||||
if admin in ("y", "yes", "true"):
|
||||
admin = True
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
|
||||
from synapse._scripts.register_new_matrix_user import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Used to register new users with a given home server when"
|
||||
" registration has been disabled. The home server must be"
|
||||
" configured with the 'registration_shared_secret' option"
|
||||
" set.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u", "--user",
|
||||
default=None,
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a", "--admin",
|
||||
action="store_true",
|
||||
help="Register new user as an admin. Will prompt if --no-admin is not set either.",
|
||||
)
|
||||
admin_group.add_argument(
|
||||
"--no-admin",
|
||||
action="store_true",
|
||||
help="Register new user as a regular user. Will prompt if --admin is not set either.",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
"-c", "--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in shared secret.",
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
"-k", "--shared-secret",
|
||||
help="Shared secret as defined in server config file.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"server_url",
|
||||
default="https://localhost:8448",
|
||||
nargs='?',
|
||||
help="URL to use to talk to the home server. Defaults to "
|
||||
" 'https://localhost:8448'.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
secret = config.get("registration_shared_secret", None)
|
||||
if not secret:
|
||||
print "No 'registration_shared_secret' defined in config."
|
||||
sys.exit(1)
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
admin = None
|
||||
if args.admin or args.no_admin:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret, admin)
|
||||
main()
|
||||
|
||||
@@ -15,23 +15,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.enterprise import adbapi
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
import argparse
|
||||
import curses
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import yaml
|
||||
|
||||
from six import string_types
|
||||
|
||||
import yaml
|
||||
|
||||
from twisted.enterprise import adbapi
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.prepare_database import prepare_database
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
@@ -105,6 +105,7 @@ class Store(object):
|
||||
|
||||
*All* database interactions should go through this object.
|
||||
"""
|
||||
|
||||
def __init__(self, db_pool, engine):
|
||||
self.db_pool = db_pool
|
||||
self.database_engine = engine
|
||||
@@ -135,7 +136,8 @@ class Store(object):
|
||||
txn = conn.cursor()
|
||||
return func(
|
||||
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||
*args, **kwargs
|
||||
*args,
|
||||
**kwargs
|
||||
)
|
||||
except self.database_engine.module.DatabaseError as e:
|
||||
if self.database_engine.is_deadlock(e):
|
||||
@@ -158,22 +160,20 @@ class Store(object):
|
||||
def r(txn):
|
||||
txn.execute(sql, args)
|
||||
return txn.fetchall()
|
||||
|
||||
return self.runInteraction("execute_sql", r)
|
||||
|
||||
def insert_many_txn(self, txn, table, headers, rows):
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||
table,
|
||||
", ".join(k for k in headers),
|
||||
", ".join("%s" for _ in headers)
|
||||
", ".join("%s" for _ in headers),
|
||||
)
|
||||
|
||||
try:
|
||||
txn.executemany(sql, rows)
|
||||
except:
|
||||
logger.exception(
|
||||
"Failed to insert: %s",
|
||||
table,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to insert: %s", table)
|
||||
raise
|
||||
|
||||
|
||||
@@ -206,7 +206,7 @@ class Porter(object):
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
forward_chunk = 1
|
||||
@@ -221,10 +221,10 @@ class Porter(object):
|
||||
table, forward_chunk, backward_chunk
|
||||
)
|
||||
else:
|
||||
|
||||
def delete_all(txn):
|
||||
txn.execute(
|
||||
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
|
||||
(table,)
|
||||
"DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,)
|
||||
)
|
||||
txn.execute("TRUNCATE %s CASCADE" % (table,))
|
||||
|
||||
@@ -232,11 +232,7 @@ class Porter(object):
|
||||
|
||||
yield self.postgres_store._simple_insert(
|
||||
table="port_from_sqlite3",
|
||||
values={
|
||||
"table_name": table,
|
||||
"forward_rowid": 1,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
|
||||
)
|
||||
|
||||
forward_chunk = 1
|
||||
@@ -251,12 +247,16 @@ class Porter(object):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
def handle_table(
|
||||
self, table, postgres_size, table_size, forward_chunk, backward_chunk
|
||||
):
|
||||
logger.info(
|
||||
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||
table, postgres_size, table_size,
|
||||
backward_chunk+1, forward_chunk-1,
|
||||
table,
|
||||
postgres_size,
|
||||
table_size,
|
||||
backward_chunk + 1,
|
||||
forward_chunk - 1,
|
||||
)
|
||||
|
||||
if not table_size:
|
||||
@@ -271,7 +271,9 @@ class Porter(object):
|
||||
return
|
||||
|
||||
if table in (
|
||||
"user_directory", "user_directory_search", "users_who_share_rooms",
|
||||
"user_directory",
|
||||
"user_directory_search",
|
||||
"users_who_share_rooms",
|
||||
"users_in_pubic_room",
|
||||
):
|
||||
# We don't port these tables, as they're a faff and we can regenreate
|
||||
@@ -283,37 +285,35 @@ class Porter(object):
|
||||
# We need to make sure there is a single row, `(X, null), as that is
|
||||
# what synapse expects to be there.
|
||||
yield self.postgres_store._simple_insert(
|
||||
table=table,
|
||||
values={"stream_id": None},
|
||||
table=table, values={"stream_id": None}
|
||||
)
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
return
|
||||
|
||||
forward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,)
|
||||
)
|
||||
|
||||
backward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
||||
% (table,)
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,)
|
||||
)
|
||||
|
||||
do_forward = [True]
|
||||
do_backward = [True]
|
||||
|
||||
while True:
|
||||
|
||||
def r(txn):
|
||||
forward_rows = []
|
||||
backward_rows = []
|
||||
if do_forward[0]:
|
||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
||||
txn.execute(forward_select, (forward_chunk, self.batch_size))
|
||||
forward_rows = txn.fetchall()
|
||||
if not forward_rows:
|
||||
do_forward[0] = False
|
||||
|
||||
if do_backward[0]:
|
||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
||||
txn.execute(backward_select, (backward_chunk, self.batch_size))
|
||||
backward_rows = txn.fetchall()
|
||||
if not backward_rows:
|
||||
do_backward[0] = False
|
||||
@@ -325,9 +325,7 @@ class Porter(object):
|
||||
|
||||
return headers, forward_rows, backward_rows
|
||||
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
||||
"select", r
|
||||
)
|
||||
headers, frows, brows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
if frows or brows:
|
||||
if frows:
|
||||
@@ -339,9 +337,7 @@ class Porter(object):
|
||||
rows = self._convert_rows(table, headers, rows)
|
||||
|
||||
def insert(txn):
|
||||
self.postgres_store.insert_many_txn(
|
||||
txn, table, headers[1:], rows
|
||||
)
|
||||
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
@@ -362,8 +358,9 @@ class Porter(object):
|
||||
return
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
def handle_search_table(
|
||||
self, postgres_size, table_size, forward_chunk, backward_chunk
|
||||
):
|
||||
select = (
|
||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||
" FROM event_search as es"
|
||||
@@ -373,8 +370,9 @@ class Porter(object):
|
||||
)
|
||||
|
||||
while True:
|
||||
|
||||
def r(txn):
|
||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
||||
txn.execute(select, (forward_chunk, self.batch_size))
|
||||
rows = txn.fetchall()
|
||||
headers = [column[0] for column in txn.description]
|
||||
|
||||
@@ -402,18 +400,21 @@ class Porter(object):
|
||||
else:
|
||||
rows_dict.append(d)
|
||||
|
||||
txn.executemany(sql, [
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
row["origin_server_ts"],
|
||||
row["stream_ordering"],
|
||||
)
|
||||
for row in rows_dict
|
||||
])
|
||||
txn.executemany(
|
||||
sql,
|
||||
[
|
||||
(
|
||||
row["event_id"],
|
||||
row["room_id"],
|
||||
row["key"],
|
||||
row["sender"],
|
||||
row["value"],
|
||||
row["origin_server_ts"],
|
||||
row["stream_ordering"],
|
||||
)
|
||||
for row in rows_dict
|
||||
],
|
||||
)
|
||||
|
||||
self.postgres_store._simple_update_one_txn(
|
||||
txn,
|
||||
@@ -437,7 +438,8 @@ class Porter(object):
|
||||
def setup_db(self, db_config, database_engine):
|
||||
db_conn = database_engine.module.connect(
|
||||
**{
|
||||
k: v for k, v in db_config.get("args", {}).items()
|
||||
k: v
|
||||
for k, v in db_config.get("args", {}).items()
|
||||
if not k.startswith("cp_")
|
||||
}
|
||||
)
|
||||
@@ -450,13 +452,11 @@ class Porter(object):
|
||||
def run(self):
|
||||
try:
|
||||
sqlite_db_pool = adbapi.ConnectionPool(
|
||||
self.sqlite_config["name"],
|
||||
**self.sqlite_config["args"]
|
||||
self.sqlite_config["name"], **self.sqlite_config["args"]
|
||||
)
|
||||
|
||||
postgres_db_pool = adbapi.ConnectionPool(
|
||||
self.postgres_config["name"],
|
||||
**self.postgres_config["args"]
|
||||
self.postgres_config["name"], **self.postgres_config["args"]
|
||||
)
|
||||
|
||||
sqlite_engine = create_engine(sqlite_config)
|
||||
@@ -465,9 +465,7 @@ class Porter(object):
|
||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||
|
||||
yield self.postgres_store.execute(
|
||||
postgres_engine.check_database
|
||||
)
|
||||
yield self.postgres_store.execute(postgres_engine.check_database)
|
||||
|
||||
# Step 1. Set up databases.
|
||||
self.progress.set_state("Preparing SQLite3")
|
||||
@@ -477,6 +475,7 @@ class Porter(object):
|
||||
self.setup_db(postgres_config, postgres_engine)
|
||||
|
||||
self.progress.set_state("Creating port tables")
|
||||
|
||||
def create_port_table(txn):
|
||||
txn.execute(
|
||||
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||
@@ -501,10 +500,9 @@ class Porter(object):
|
||||
)
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"alter_table", alter_table
|
||||
)
|
||||
except Exception as e:
|
||||
yield self.postgres_store.runInteraction("alter_table", alter_table)
|
||||
except Exception:
|
||||
# On Error Resume Next
|
||||
pass
|
||||
|
||||
yield self.postgres_store.runInteraction(
|
||||
@@ -514,11 +512,7 @@ class Porter(object):
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||
table="sqlite_master",
|
||||
keyvalues={
|
||||
"type": "table",
|
||||
},
|
||||
retcol="name",
|
||||
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
|
||||
)
|
||||
|
||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||
@@ -545,18 +539,14 @@ class Porter(object):
|
||||
# Step 4. Do the copying.
|
||||
self.progress.set_state("Copying to postgres")
|
||||
yield defer.gatherResults(
|
||||
[
|
||||
self.handle_table(*res)
|
||||
for res in setup_res
|
||||
],
|
||||
consumeErrors=True,
|
||||
[self.handle_table(*res) for res in setup_res], consumeErrors=True
|
||||
)
|
||||
|
||||
# Step 5. Do final post-processing
|
||||
yield self._setup_state_group_id_seq()
|
||||
|
||||
self.progress.done()
|
||||
except:
|
||||
except Exception:
|
||||
global end_error_exec_info
|
||||
end_error_exec_info = sys.exc_info()
|
||||
logger.exception("")
|
||||
@@ -566,9 +556,7 @@ class Porter(object):
|
||||
def _convert_rows(self, table, headers, rows):
|
||||
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
|
||||
|
||||
bool_cols = [
|
||||
i for i, h in enumerate(headers) if h in bool_col_names
|
||||
]
|
||||
bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names]
|
||||
|
||||
class BadValueException(Exception):
|
||||
pass
|
||||
@@ -577,18 +565,21 @@ class Porter(object):
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||
raise BadValueException();
|
||||
logger.warn(
|
||||
"DROPPING ROW: NUL value in table %s col %s: %r",
|
||||
table,
|
||||
headers[j],
|
||||
col,
|
||||
)
|
||||
raise BadValueException()
|
||||
return col
|
||||
|
||||
outrows = []
|
||||
for i, row in enumerate(rows):
|
||||
try:
|
||||
outrows.append(tuple(
|
||||
conv(j, col)
|
||||
for j, col in enumerate(row)
|
||||
if j > 0
|
||||
))
|
||||
outrows.append(
|
||||
tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
|
||||
)
|
||||
except BadValueException:
|
||||
pass
|
||||
|
||||
@@ -616,9 +607,7 @@ class Porter(object):
|
||||
|
||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||
|
||||
headers, rows = yield self.sqlite_store.runInteraction(
|
||||
"select", r,
|
||||
)
|
||||
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||
|
||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||
|
||||
@@ -639,7 +628,7 @@ class Porter(object):
|
||||
txn.execute(
|
||||
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
|
||||
" ORDER BY rowid ASC LIMIT 1",
|
||||
(yesterday,)
|
||||
(yesterday,),
|
||||
)
|
||||
|
||||
rows = txn.fetchall()
|
||||
@@ -657,21 +646,17 @@ class Porter(object):
|
||||
"table_name": "sent_transactions",
|
||||
"forward_rowid": next_chunk,
|
||||
"backward_rowid": 0,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
def get_sent_table_size(txn):
|
||||
txn.execute(
|
||||
"SELECT count(*) FROM sent_transactions"
|
||||
" WHERE ts >= ?",
|
||||
(yesterday,)
|
||||
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
|
||||
)
|
||||
size, = txn.fetchone()
|
||||
return int(size)
|
||||
|
||||
remaining_count = yield self.sqlite_store.execute(
|
||||
get_sent_table_size
|
||||
)
|
||||
remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
|
||||
|
||||
total_count = remaining_count + inserted_rows
|
||||
|
||||
@@ -680,13 +665,11 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||
frows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
||||
forward_chunk,
|
||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
|
||||
)
|
||||
|
||||
brows = yield self.sqlite_store.execute_sql(
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
||||
backward_chunk,
|
||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
|
||||
)
|
||||
|
||||
defer.returnValue(frows[0][0] + brows[0][0])
|
||||
@@ -694,7 +677,7 @@ class Porter(object):
|
||||
@defer.inlineCallbacks
|
||||
def _get_already_ported_count(self, table):
|
||||
rows = yield self.postgres_store.execute_sql(
|
||||
"SELECT count(*) FROM %s" % (table,),
|
||||
"SELECT count(*) FROM %s" % (table,)
|
||||
)
|
||||
|
||||
defer.returnValue(rows[0][0])
|
||||
@@ -717,22 +700,21 @@ class Porter(object):
|
||||
def _setup_state_group_id_seq(self):
|
||||
def r(txn):
|
||||
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||
next_id = txn.fetchone()[0]+1
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
|
||||
(next_id,),
|
||||
)
|
||||
next_id = txn.fetchone()[0] + 1
|
||||
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
|
||||
|
||||
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||
|
||||
|
||||
##############################################
|
||||
###### The following is simply UI stuff ######
|
||||
# The following is simply UI stuff
|
||||
##############################################
|
||||
|
||||
|
||||
class Progress(object):
|
||||
"""Used to report progress of the port
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
|
||||
@@ -758,6 +740,7 @@ class Progress(object):
|
||||
class CursesProgress(Progress):
|
||||
"""Reports progress to a curses window
|
||||
"""
|
||||
|
||||
def __init__(self, stdscr):
|
||||
self.stdscr = stdscr
|
||||
|
||||
@@ -801,7 +784,7 @@ class CursesProgress(Progress):
|
||||
duration = int(now) - int(self.start_time)
|
||||
|
||||
minutes, seconds = divmod(duration, 60)
|
||||
duration_str = '%02dm %02ds' % (minutes, seconds,)
|
||||
duration_str = '%02dm %02ds' % (minutes, seconds)
|
||||
|
||||
if self.finished:
|
||||
status = "Time spent: %s (Done!)" % (duration_str,)
|
||||
@@ -814,16 +797,12 @@ class CursesProgress(Progress):
|
||||
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
|
||||
else:
|
||||
est_remaining_str = "Unknown"
|
||||
status = (
|
||||
"Time spent: %s (est. remaining: %s)"
|
||||
% (duration_str, est_remaining_str,)
|
||||
status = "Time spent: %s (est. remaining: %s)" % (
|
||||
duration_str,
|
||||
est_remaining_str,
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
0, 0,
|
||||
status,
|
||||
curses.A_BOLD,
|
||||
)
|
||||
self.stdscr.addstr(0, 0, status, curses.A_BOLD)
|
||||
|
||||
max_len = max([len(t) for t in self.tables.keys()])
|
||||
|
||||
@@ -831,9 +810,7 @@ class CursesProgress(Progress):
|
||||
middle_space = 1
|
||||
|
||||
items = self.tables.items()
|
||||
items.sort(
|
||||
key=lambda i: (i[1]["perc"], i[0]),
|
||||
)
|
||||
items.sort(key=lambda i: (i[1]["perc"], i[0]))
|
||||
|
||||
for i, (table, data) in enumerate(items):
|
||||
if i + 2 >= rows:
|
||||
@@ -844,9 +821,7 @@ class CursesProgress(Progress):
|
||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i + 2, left_margin + max_len - len(table),
|
||||
table,
|
||||
curses.A_BOLD | color,
|
||||
i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color
|
||||
)
|
||||
|
||||
size = 20
|
||||
@@ -857,15 +832,13 @@ class CursesProgress(Progress):
|
||||
)
|
||||
|
||||
self.stdscr.addstr(
|
||||
i + 2, left_margin + max_len + middle_space,
|
||||
i + 2,
|
||||
left_margin + max_len + middle_space,
|
||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
self.stdscr.addstr(
|
||||
rows - 1, 0,
|
||||
"Press any key to exit...",
|
||||
)
|
||||
self.stdscr.addstr(rows - 1, 0, "Press any key to exit...")
|
||||
|
||||
self.stdscr.refresh()
|
||||
self.last_update = time.time()
|
||||
@@ -877,29 +850,25 @@ class CursesProgress(Progress):
|
||||
|
||||
def set_state(self, state):
|
||||
self.stdscr.clear()
|
||||
self.stdscr.addstr(
|
||||
0, 0,
|
||||
state + "...",
|
||||
curses.A_BOLD,
|
||||
)
|
||||
self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD)
|
||||
self.stdscr.refresh()
|
||||
|
||||
|
||||
class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal
|
||||
"""
|
||||
|
||||
def update(self, table, num_done):
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
|
||||
data = self.tables[table]
|
||||
|
||||
print "%s: %d%% (%d/%d)" % (
|
||||
table, data["perc"],
|
||||
data["num_done"], data["total"],
|
||||
print(
|
||||
"%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"])
|
||||
)
|
||||
|
||||
def set_state(self, state):
|
||||
print state + "..."
|
||||
print(state + "...")
|
||||
|
||||
|
||||
##############################################
|
||||
@@ -909,34 +878,38 @@ class TerminalProgress(Progress):
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="A script to port an existing synapse SQLite database to"
|
||||
" a new PostgreSQL database."
|
||||
" a new PostgreSQL database."
|
||||
)
|
||||
parser.add_argument("-v", action='store_true')
|
||||
parser.add_argument(
|
||||
"--sqlite-database", required=True,
|
||||
"--sqlite-database",
|
||||
required=True,
|
||||
help="The snapshot of the SQLite database file. This must not be"
|
||||
" currently used by a running synapse server"
|
||||
" currently used by a running synapse server",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--postgres-config", type=argparse.FileType('r'), required=True,
|
||||
help="The database config file for the PostgreSQL database"
|
||||
"--postgres-config",
|
||||
type=argparse.FileType('r'),
|
||||
required=True,
|
||||
help="The database config file for the PostgreSQL database",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--curses", action='store_true',
|
||||
help="display a curses based progress UI"
|
||||
"--curses", action='store_true', help="display a curses based progress UI"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--batch-size", type=int, default=1000,
|
||||
"--batch-size",
|
||||
type=int,
|
||||
default=1000,
|
||||
help="The number of rows to select from the SQLite table each"
|
||||
" iteration [default=1000]",
|
||||
" iteration [default=1000]",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logging_config = {
|
||||
"level": logging.DEBUG if args.v else logging.INFO,
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||
}
|
||||
|
||||
if args.curses:
|
||||
|
||||
17
setup.cfg
17
setup.cfg
@@ -14,17 +14,16 @@ ignore =
|
||||
pylint.cfg
|
||||
tox.ini
|
||||
|
||||
[pep8]
|
||||
max-line-length = 90
|
||||
# W503 requires that binary operators be at the end, not start, of lines. Erik
|
||||
# doesn't like it. E203 is contrary to PEP8. E731 is silly.
|
||||
ignore = W503,E203,E731
|
||||
|
||||
[flake8]
|
||||
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
|
||||
# pep8 to do those checks), but not the "max-line-length" setting
|
||||
max-line-length = 90
|
||||
ignore=W503,E203,E731
|
||||
|
||||
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
|
||||
# for error codes. The ones we ignore are:
|
||||
# W503: line break before binary operator
|
||||
# W504: line break after binary operator
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
ignore=W503,W504,E203,E731
|
||||
|
||||
[isort]
|
||||
line_length = 89
|
||||
|
||||
6
setup.py
6
setup.py
@@ -1,6 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014-2017 OpenMarket Ltd
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -86,7 +88,7 @@ setup(
|
||||
name="matrix-synapse",
|
||||
version=version,
|
||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||
description="Reference Synapse Home Server",
|
||||
description="Reference homeserver for the Matrix decentralised comms protocol",
|
||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
||||
include_package_data=True,
|
||||
|
||||
@@ -27,4 +27,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "0.33.6"
|
||||
__version__ = "0.33.9"
|
||||
|
||||
0
synapse/_scripts/__init__.py
Normal file
0
synapse/_scripts/__init__.py
Normal file
215
synapse/_scripts/register_new_matrix_user.py
Normal file
215
synapse/_scripts/register_new_matrix_user.py
Normal file
@@ -0,0 +1,215 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import getpass
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from six.moves import input
|
||||
|
||||
import requests as _requests
|
||||
import yaml
|
||||
|
||||
|
||||
def request_registration(
|
||||
user,
|
||||
password,
|
||||
server_location,
|
||||
shared_secret,
|
||||
admin=False,
|
||||
requests=_requests,
|
||||
_print=print,
|
||||
exit=sys.exit,
|
||||
):
|
||||
|
||||
url = "%s/_matrix/client/r0/admin/register" % (server_location,)
|
||||
|
||||
# Get the nonce
|
||||
r = requests.get(url, verify=False)
|
||||
|
||||
if r.status_code is not 200:
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
_print(r.json()["error"])
|
||||
except Exception:
|
||||
pass
|
||||
return exit(1)
|
||||
|
||||
nonce = r.json()["nonce"]
|
||||
|
||||
mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1)
|
||||
|
||||
mac.update(nonce.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(user.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
|
||||
mac = mac.hexdigest()
|
||||
|
||||
data = {
|
||||
"nonce": nonce,
|
||||
"username": user,
|
||||
"password": password,
|
||||
"mac": mac,
|
||||
"admin": admin,
|
||||
}
|
||||
|
||||
_print("Sending registration request...")
|
||||
r = requests.post(url, json=data, verify=False)
|
||||
|
||||
if r.status_code is not 200:
|
||||
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||
if 400 <= r.status_code < 500:
|
||||
try:
|
||||
_print(r.json()["error"])
|
||||
except Exception:
|
||||
pass
|
||||
return exit(1)
|
||||
|
||||
_print("Success!")
|
||||
|
||||
|
||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||
if not user:
|
||||
try:
|
||||
default_user = getpass.getuser()
|
||||
except Exception:
|
||||
default_user = None
|
||||
|
||||
if default_user:
|
||||
user = input("New user localpart [%s]: " % (default_user,))
|
||||
if not user:
|
||||
user = default_user
|
||||
else:
|
||||
user = input("New user localpart: ")
|
||||
|
||||
if not user:
|
||||
print("Invalid user name")
|
||||
sys.exit(1)
|
||||
|
||||
if not password:
|
||||
password = getpass.getpass("Password: ")
|
||||
|
||||
if not password:
|
||||
print("Password cannot be blank.")
|
||||
sys.exit(1)
|
||||
|
||||
confirm_password = getpass.getpass("Confirm password: ")
|
||||
|
||||
if password != confirm_password:
|
||||
print("Passwords do not match")
|
||||
sys.exit(1)
|
||||
|
||||
if admin is None:
|
||||
admin = input("Make admin [no]: ")
|
||||
if admin in ("y", "yes", "true"):
|
||||
admin = True
|
||||
else:
|
||||
admin = False
|
||||
|
||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
logging.captureWarnings(True)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Used to register new users with a given home server when"
|
||||
" registration has been disabled. The home server must be"
|
||||
" configured with the 'registration_shared_secret' option"
|
||||
" set."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--user",
|
||||
default=None,
|
||||
help="Local part of the new user. Will prompt if omitted.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--password",
|
||||
default=None,
|
||||
help="New password for user. Will prompt if omitted.",
|
||||
)
|
||||
admin_group = parser.add_mutually_exclusive_group()
|
||||
admin_group.add_argument(
|
||||
"-a",
|
||||
"--admin",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Register new user as an admin. "
|
||||
"Will prompt if --no-admin is not set either."
|
||||
),
|
||||
)
|
||||
admin_group.add_argument(
|
||||
"--no-admin",
|
||||
action="store_true",
|
||||
help=(
|
||||
"Register new user as a regular user. "
|
||||
"Will prompt if --admin is not set either."
|
||||
),
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
type=argparse.FileType('r'),
|
||||
help="Path to server config file. Used to read in shared secret.",
|
||||
)
|
||||
|
||||
group.add_argument(
|
||||
"-k", "--shared-secret", help="Shared secret as defined in server config file."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"server_url",
|
||||
default="https://localhost:8448",
|
||||
nargs='?',
|
||||
help="URL to use to talk to the home server. Defaults to "
|
||||
" 'https://localhost:8448'.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
secret = config.get("registration_shared_secret", None)
|
||||
if not secret:
|
||||
print("No 'registration_shared_secret' defined in config.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
secret = args.shared_secret
|
||||
|
||||
admin = None
|
||||
if args.admin or args.no_admin:
|
||||
admin = args.admin
|
||||
|
||||
register_new_user(args.user, args.password, args.server_url, secret, admin)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -51,6 +51,7 @@ class LoginType(object):
|
||||
EMAIL_IDENTITY = u"m.login.email.identity"
|
||||
MSISDN = u"m.login.msisdn"
|
||||
RECAPTCHA = u"m.login.recaptcha"
|
||||
TERMS = u"m.login.terms"
|
||||
DUMMY = u"m.login.dummy"
|
||||
|
||||
# Only for C/S API v1
|
||||
@@ -61,6 +62,7 @@ class LoginType(object):
|
||||
class EventTypes(object):
|
||||
Member = "m.room.member"
|
||||
Create = "m.room.create"
|
||||
Tombstone = "m.room.tombstone"
|
||||
JoinRules = "m.room.join_rules"
|
||||
PowerLevels = "m.room.power_levels"
|
||||
Aliases = "m.room.aliases"
|
||||
@@ -101,6 +103,7 @@ class ThirdPartyEntityKind(object):
|
||||
class RoomVersions(object):
|
||||
V1 = "1"
|
||||
VDH_TEST = "vdh-test-version"
|
||||
STATE_V2_TEST = "state-v2-test"
|
||||
|
||||
|
||||
# the version we will give rooms which are created on this server
|
||||
@@ -108,7 +111,11 @@ DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||
|
||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||
# until we have a working v2.
|
||||
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
|
||||
KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V1,
|
||||
RoomVersions.VDH_TEST,
|
||||
RoomVersions.STATE_V2_TEST,
|
||||
}
|
||||
|
||||
ServerNoticeMsgType = "m.server_notice"
|
||||
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||
|
||||
@@ -59,6 +59,7 @@ class Codes(object):
|
||||
RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
|
||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
@@ -312,6 +313,20 @@ class LimitExceededError(SynapseError):
|
||||
)
|
||||
|
||||
|
||||
class RoomKeysVersionError(SynapseError):
|
||||
"""A client has tried to upload to a non-current version of the room_keys store
|
||||
"""
|
||||
def __init__(self, current_version):
|
||||
"""
|
||||
Args:
|
||||
current_version (str): the current version of the store they should have used
|
||||
"""
|
||||
super(RoomKeysVersionError, self).__init__(
|
||||
403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION
|
||||
)
|
||||
self.current_version = current_version
|
||||
|
||||
|
||||
class IncompatibleRoomVersionError(SynapseError):
|
||||
"""A server is trying to join a room whose version it does not support."""
|
||||
|
||||
|
||||
@@ -172,7 +172,10 @@ USER_FILTER_SCHEMA = {
|
||||
# events a lot easier as we can then use a negative lookbehind
|
||||
# assertion to split '\.' If we allowed \\ then it would
|
||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||
"pattern": "^((?!\\\).)*$"
|
||||
#
|
||||
# Note that because this is a regular expression, we have to escape
|
||||
# each backslash in the pattern.
|
||||
"pattern": r"^((?!\\\\).)*$"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1"
|
||||
STATIC_PREFIX = "/_matrix/static"
|
||||
WEB_CLIENT_PREFIX = "/_matrix/client"
|
||||
CONTENT_REPO_PREFIX = "/_matrix/content"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
@@ -17,6 +17,7 @@ import gc
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import psutil
|
||||
from daemonize import Daemonize
|
||||
|
||||
from twisted.internet import error, reactor
|
||||
@@ -24,12 +25,6 @@ from twisted.internet import error, reactor
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
|
||||
try:
|
||||
import affinity
|
||||
except Exception:
|
||||
affinity = None
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -89,15 +84,20 @@ def start_reactor(
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
if cpu_affinity is not None:
|
||||
if not affinity:
|
||||
quit_with_error(
|
||||
"Missing package 'affinity' required for cpu_affinity\n"
|
||||
"option\n\n"
|
||||
"Install by running:\n\n"
|
||||
" pip install affinity\n\n"
|
||||
)
|
||||
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
||||
affinity.set_process_affinity_mask(0, cpu_affinity)
|
||||
# Turn the bitmask into bits, reverse it so we go from 0 up
|
||||
mask_to_bits = bin(cpu_affinity)[2:][::-1]
|
||||
|
||||
cpus = []
|
||||
cpu_num = 0
|
||||
|
||||
for i in mask_to_bits:
|
||||
if i == "1":
|
||||
cpus.append(cpu_num)
|
||||
cpu_num += 1
|
||||
|
||||
p = psutil.Process()
|
||||
p.cpu_affinity(cpus)
|
||||
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
|
||||
@@ -178,6 +178,9 @@ def start(config_options):
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
# This should only be done on the user directory worker or the master
|
||||
config.update_user_directory = False
|
||||
|
||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
@@ -68,7 +68,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.get_json(
|
||||
self.main_uri + request.uri,
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
headers=headers,
|
||||
)
|
||||
defer.returnValue((200, result))
|
||||
@@ -125,7 +125,7 @@ class KeyUploadServlet(RestServlet):
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
result = yield self.http_client.post_json_get_json(
|
||||
self.main_uri + request.uri,
|
||||
self.main_uri + request.uri.decode('ascii'),
|
||||
body,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
@@ -20,6 +20,7 @@ import sys
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import psutil
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from twisted.application import service
|
||||
@@ -36,7 +37,6 @@ from synapse.api.urls import (
|
||||
FEDERATION_PREFIX,
|
||||
LEGACY_MEDIA_PREFIX,
|
||||
MEDIA_PREFIX,
|
||||
SERVER_KEY_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
STATIC_PREFIX,
|
||||
WEB_CLIENT_PREFIX,
|
||||
@@ -58,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem
|
||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||
from synapse.server import HomeServer
|
||||
@@ -235,10 +234,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources.update({
|
||||
SERVER_KEY_PREFIX: LocalKey(self),
|
||||
SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self),
|
||||
})
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
|
||||
if name == "webclient":
|
||||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
@@ -502,7 +498,6 @@ def run(hs):
|
||||
|
||||
def performance_stats_init():
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||
# so the next request will use this as the initial point.
|
||||
@@ -510,12 +505,9 @@ def run(hs):
|
||||
process.cpu_percent(interval=None)
|
||||
logger.info("report_stats can use psutil")
|
||||
stats_process.append(process)
|
||||
except (ImportError, AttributeError):
|
||||
logger.warn(
|
||||
"report_stats enabled but psutil is not installed or incorrect version."
|
||||
" Disabling reporting of memory/cpu stats."
|
||||
" Ensuring psutil is available will help matrix.org track performance"
|
||||
" changes across releases."
|
||||
except (AttributeError):
|
||||
logger.warning(
|
||||
"Unable to read memory/cpu stats. Disabling reporting."
|
||||
)
|
||||
|
||||
def generate_user_daily_visit_stats():
|
||||
@@ -530,29 +522,35 @@ def run(hs):
|
||||
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
||||
|
||||
# monthly active user limiting functionality
|
||||
clock.looping_call(
|
||||
hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60
|
||||
)
|
||||
hs.get_datastore().reap_monthly_active_users()
|
||||
def reap_monthly_active_users():
|
||||
return run_as_background_process(
|
||||
"reap_monthly_active_users",
|
||||
hs.get_datastore().reap_monthly_active_users,
|
||||
)
|
||||
clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60)
|
||||
reap_monthly_active_users()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def generate_monthly_active_users():
|
||||
current_mau_count = 0
|
||||
reserved_count = 0
|
||||
store = hs.get_datastore()
|
||||
if hs.config.limit_usage_by_mau:
|
||||
if hs.config.limit_usage_by_mau or hs.config.mau_stats_only:
|
||||
current_mau_count = yield store.get_monthly_active_count()
|
||||
reserved_count = yield store.get_registered_reserved_users_count()
|
||||
current_mau_gauge.set(float(current_mau_count))
|
||||
registered_reserved_users_mau_gauge.set(float(reserved_count))
|
||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||
|
||||
hs.get_datastore().initialise_reserved_users(
|
||||
hs.config.mau_limits_reserved_threepids
|
||||
)
|
||||
generate_monthly_active_users()
|
||||
def start_generate_monthly_active_users():
|
||||
return run_as_background_process(
|
||||
"generate_monthly_active_users",
|
||||
generate_monthly_active_users,
|
||||
)
|
||||
|
||||
start_generate_monthly_active_users()
|
||||
if hs.config.limit_usage_by_mau:
|
||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||
clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
if hs.config.report_stats:
|
||||
@@ -568,7 +566,7 @@ def run(hs):
|
||||
clock.call_later(5 * 60, start_phone_stats_home)
|
||||
|
||||
if hs.config.daemonize and hs.config.print_pidfile:
|
||||
print (hs.config.pid_file)
|
||||
print(hs.config.pid_file)
|
||||
|
||||
_base.start_reactor(
|
||||
"synapse-homeserver",
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import __func__
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
@@ -49,31 +50,31 @@ class PusherSlaveStore(
|
||||
SlavedAccountDataStore
|
||||
):
|
||||
update_pusher_last_stream_ordering_and_success = (
|
||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
||||
__func__(DataStore.update_pusher_last_stream_ordering_and_success)
|
||||
)
|
||||
|
||||
update_pusher_failing_since = (
|
||||
DataStore.update_pusher_failing_since.__func__
|
||||
__func__(DataStore.update_pusher_failing_since)
|
||||
)
|
||||
|
||||
update_pusher_last_stream_ordering = (
|
||||
DataStore.update_pusher_last_stream_ordering.__func__
|
||||
__func__(DataStore.update_pusher_last_stream_ordering)
|
||||
)
|
||||
|
||||
get_throttle_params_by_room = (
|
||||
DataStore.get_throttle_params_by_room.__func__
|
||||
__func__(DataStore.get_throttle_params_by_room)
|
||||
)
|
||||
|
||||
set_throttle_params = (
|
||||
DataStore.set_throttle_params.__func__
|
||||
__func__(DataStore.set_throttle_params)
|
||||
)
|
||||
|
||||
get_time_of_last_push_action_before = (
|
||||
DataStore.get_time_of_last_push_action_before.__func__
|
||||
__func__(DataStore.get_time_of_last_push_action_before)
|
||||
)
|
||||
|
||||
get_profile_displayname = (
|
||||
DataStore.get_profile_displayname.__func__
|
||||
__func__(DataStore.get_profile_displayname)
|
||||
)
|
||||
|
||||
|
||||
@@ -160,11 +161,11 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
self.pusher_pool.on_new_notifications(
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.pusher_pool.on_new_receipts(
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
except Exception:
|
||||
@@ -182,7 +183,7 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||
def start_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
logger.info("Starting pusher %r / %r", user_id, key)
|
||||
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
||||
return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
||||
@@ -33,7 +33,7 @@ from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
@@ -147,7 +147,7 @@ class SynchrotronPresence(object):
|
||||
and haven't come back yet. If there are poke the master about them.
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
for user_id, last_sync_ms in self.users_going_offline.items():
|
||||
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
||||
if now - last_sync_ms > 10 * 1000:
|
||||
self.users_going_offline.pop(user_id, None)
|
||||
self.send_user_sync(user_id, False, last_sync_ms)
|
||||
@@ -156,9 +156,9 @@ class SynchrotronPresence(object):
|
||||
# TODO Hows this supposed to work?
|
||||
pass
|
||||
|
||||
get_states = PresenceHandler.get_states.__func__
|
||||
get_state = PresenceHandler.get_state.__func__
|
||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
||||
get_states = __func__(PresenceHandler.get_states)
|
||||
get_state = __func__(PresenceHandler.get_state)
|
||||
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
|
||||
|
||||
def user_syncing(self, user_id, affect_presence):
|
||||
if affect_presence:
|
||||
@@ -208,7 +208,7 @@ class SynchrotronPresence(object):
|
||||
) for row in rows]
|
||||
|
||||
for state in states:
|
||||
self.user_to_current_state[row.user_id] = state
|
||||
self.user_to_current_state[state.user_id] = state
|
||||
|
||||
stream_id = token
|
||||
yield self.notify_from_replication(states, stream_id)
|
||||
@@ -226,7 +226,15 @@ class SynchrotronPresence(object):
|
||||
class SynchrotronTyping(object):
|
||||
def __init__(self, hs):
|
||||
self._latest_room_serial = 0
|
||||
self._reset()
|
||||
|
||||
def _reset(self):
|
||||
"""
|
||||
Reset the typing handler's data caches.
|
||||
"""
|
||||
# map room IDs to serial numbers
|
||||
self._room_serials = {}
|
||||
# map room IDs to sets of users currently typing
|
||||
self._room_typing = {}
|
||||
|
||||
def stream_positions(self):
|
||||
@@ -236,6 +244,12 @@ class SynchrotronTyping(object):
|
||||
return {"typing": self._latest_room_serial}
|
||||
|
||||
def process_replication_rows(self, token, rows):
|
||||
if self._latest_room_serial > token:
|
||||
# The master has gone backwards. To prevent inconsistent data, just
|
||||
# clear everything.
|
||||
self._reset()
|
||||
|
||||
# Set the latest serial token to whatever the server gave us.
|
||||
self._latest_room_serial = token
|
||||
|
||||
for row in rows:
|
||||
|
||||
@@ -28,7 +28,7 @@ if __name__ == "__main__":
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
print (getattr(config, key))
|
||||
print(getattr(config, key))
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||
|
||||
@@ -106,10 +106,7 @@ class Config(object):
|
||||
@classmethod
|
||||
def check_file(cls, file_path, config_name):
|
||||
if file_path is None:
|
||||
raise ConfigError(
|
||||
"Missing config for %s."
|
||||
% (config_name,)
|
||||
)
|
||||
raise ConfigError("Missing config for %s." % (config_name,))
|
||||
try:
|
||||
os.stat(file_path)
|
||||
except OSError as e:
|
||||
@@ -128,9 +125,7 @@ class Config(object):
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
if not os.path.isdir(dir_path):
|
||||
raise ConfigError(
|
||||
"%s is not a directory" % (dir_path,)
|
||||
)
|
||||
raise ConfigError("%s is not a directory" % (dir_path,))
|
||||
return dir_path
|
||||
|
||||
@classmethod
|
||||
@@ -156,21 +151,20 @@ class Config(object):
|
||||
return results
|
||||
|
||||
def generate_config(
|
||||
self,
|
||||
config_dir_path,
|
||||
server_name,
|
||||
is_generating_file,
|
||||
report_stats=None,
|
||||
self, config_dir_path, server_name, is_generating_file, report_stats=None
|
||||
):
|
||||
default_config = "# vim:ft=yaml\n"
|
||||
|
||||
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
|
||||
"default_config",
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=is_generating_file,
|
||||
report_stats=report_stats,
|
||||
))
|
||||
default_config += "\n\n".join(
|
||||
dedent(conf)
|
||||
for conf in self.invoke_all(
|
||||
"default_config",
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
is_generating_file=is_generating_file,
|
||||
report_stats=report_stats,
|
||||
)
|
||||
)
|
||||
|
||||
config = yaml.load(default_config)
|
||||
|
||||
@@ -178,23 +172,22 @@ class Config(object):
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(
|
||||
description=description,
|
||||
)
|
||||
config_parser = argparse.ArgumentParser(description=description)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
"-c",
|
||||
"--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
" may specify directories containing *.yaml files.",
|
||||
)
|
||||
|
||||
config_parser.add_argument(
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Where files such as certs and signing keys are stored when"
|
||||
" their location is given explicitly in the config."
|
||||
" Defaults to the directory containing the last config file",
|
||||
" their location is given explicitly in the config."
|
||||
" Defaults to the directory containing the last config file",
|
||||
)
|
||||
|
||||
config_args = config_parser.parse_args(argv)
|
||||
@@ -203,9 +196,7 @@ class Config(object):
|
||||
|
||||
obj = cls()
|
||||
obj.read_config_files(
|
||||
config_files,
|
||||
keys_directory=config_args.keys_directory,
|
||||
generate_keys=False,
|
||||
config_files, keys_directory=config_args.keys_directory, generate_keys=False
|
||||
)
|
||||
return obj
|
||||
|
||||
@@ -213,38 +204,38 @@ class Config(object):
|
||||
def load_or_generate_config(cls, description, argv):
|
||||
config_parser = argparse.ArgumentParser(add_help=False)
|
||||
config_parser.add_argument(
|
||||
"-c", "--config-path",
|
||||
"-c",
|
||||
"--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="Specify config file. Can be given multiple times and"
|
||||
" may specify directories containing *.yaml files."
|
||||
" may specify directories containing *.yaml files.",
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--generate-config",
|
||||
action="store_true",
|
||||
help="Generate a config file for the server name"
|
||||
help="Generate a config file for the server name",
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--report-stats",
|
||||
action="store",
|
||||
help="Whether the generated config reports anonymized usage statistics",
|
||||
choices=["yes", "no"]
|
||||
choices=["yes", "no"],
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--generate-keys",
|
||||
action="store_true",
|
||||
help="Generate any missing key files then exit"
|
||||
help="Generate any missing key files then exit",
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"--keys-directory",
|
||||
metavar="DIRECTORY",
|
||||
help="Used with 'generate-*' options to specify where files such as"
|
||||
" certs and signing keys should be stored in, unless explicitly"
|
||||
" specified in the config."
|
||||
" certs and signing keys should be stored in, unless explicitly"
|
||||
" specified in the config.",
|
||||
)
|
||||
config_parser.add_argument(
|
||||
"-H", "--server-name",
|
||||
help="The server name to generate a config file for"
|
||||
"-H", "--server-name", help="The server name to generate a config file for"
|
||||
)
|
||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||
|
||||
@@ -257,8 +248,8 @@ class Config(object):
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
config_parser.error(
|
||||
"Please specify either --report-stats=yes or --report-stats=no\n\n" +
|
||||
MISSING_REPORT_STATS_SPIEL
|
||||
"Please specify either --report-stats=yes or --report-stats=no\n\n"
|
||||
+ MISSING_REPORT_STATS_SPIEL
|
||||
)
|
||||
if not config_files:
|
||||
config_parser.error(
|
||||
@@ -287,26 +278,32 @@ class Config(object):
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
is_generating_file=True
|
||||
is_generating_file=True,
|
||||
)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_str)
|
||||
print((
|
||||
"A config file has been generated in %r for server name"
|
||||
" %r with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it"
|
||||
" to your needs."
|
||||
) % (config_path, server_name))
|
||||
print(
|
||||
(
|
||||
"A config file has been generated in %r for server name"
|
||||
" %r with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it"
|
||||
" to your needs."
|
||||
)
|
||||
% (config_path, server_name)
|
||||
)
|
||||
print(
|
||||
"If this server name is incorrect, you will need to"
|
||||
" regenerate the SSL certificates"
|
||||
)
|
||||
return
|
||||
else:
|
||||
print((
|
||||
"Config file %r already exists. Generating any missing key"
|
||||
" files."
|
||||
) % (config_path,))
|
||||
print(
|
||||
(
|
||||
"Config file %r already exists. Generating any missing key"
|
||||
" files."
|
||||
)
|
||||
% (config_path,)
|
||||
)
|
||||
generate_keys = True
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -338,8 +335,7 @@ class Config(object):
|
||||
|
||||
return obj
|
||||
|
||||
def read_config_files(self, config_files, keys_directory=None,
|
||||
generate_keys=False):
|
||||
def read_config_files(self, config_files, keys_directory=None, generate_keys=False):
|
||||
if not keys_directory:
|
||||
keys_directory = os.path.dirname(config_files[-1])
|
||||
|
||||
@@ -364,8 +360,9 @@ class Config(object):
|
||||
|
||||
if "report_stats" not in config:
|
||||
raise ConfigError(
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
||||
MISSING_REPORT_STATS_SPIEL
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS
|
||||
+ "\n"
|
||||
+ MISSING_REPORT_STATS_SPIEL
|
||||
)
|
||||
|
||||
if generate_keys:
|
||||
@@ -399,16 +396,16 @@ def find_config_files(search_paths):
|
||||
for entry in os.listdir(config_path):
|
||||
entry_path = os.path.join(config_path, entry)
|
||||
if not os.path.isfile(entry_path):
|
||||
print (
|
||||
"Found subdirectory in config directory: %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
err = "Found subdirectory in config directory: %r. IGNORING."
|
||||
print(err % (entry_path,))
|
||||
continue
|
||||
|
||||
if not entry.endswith(".yaml"):
|
||||
print (
|
||||
"Found file in config directory that does not"
|
||||
" end in '.yaml': %r. IGNORING."
|
||||
) % (entry_path, )
|
||||
err = (
|
||||
"Found file in config directory that does not end in "
|
||||
"'.yaml': %r. IGNORING."
|
||||
)
|
||||
print(err % (entry_path,))
|
||||
continue
|
||||
|
||||
files.append(entry_path)
|
||||
|
||||
@@ -42,6 +42,14 @@ DEFAULT_CONFIG = """\
|
||||
# until the user consents to the privacy policy. The value of the setting is
|
||||
# used as the text of the error.
|
||||
#
|
||||
# 'require_at_registration', if enabled, will add a step to the registration
|
||||
# process, similar to how captcha works. Users will be required to accept the
|
||||
# policy before their account is created.
|
||||
#
|
||||
# 'policy_name' is the display name of the policy users will see when registering
|
||||
# for an account. Has no effect unless `require_at_registration` is enabled.
|
||||
# Defaults to "Privacy Policy".
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
@@ -54,6 +62,8 @@ DEFAULT_CONFIG = """\
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# require_at_registration: False
|
||||
# policy_name: Privacy Policy
|
||||
#
|
||||
"""
|
||||
|
||||
@@ -67,6 +77,8 @@ class ConsentConfig(Config):
|
||||
self.user_consent_server_notice_content = None
|
||||
self.user_consent_server_notice_to_guests = False
|
||||
self.block_events_without_consent_error = None
|
||||
self.user_consent_at_registration = False
|
||||
self.user_consent_policy_name = "Privacy Policy"
|
||||
|
||||
def read_config(self, config):
|
||||
consent_config = config.get("user_consent")
|
||||
@@ -83,6 +95,12 @@ class ConsentConfig(Config):
|
||||
self.user_consent_server_notice_to_guests = bool(consent_config.get(
|
||||
"send_server_notice_to_guests", False,
|
||||
))
|
||||
self.user_consent_at_registration = bool(consent_config.get(
|
||||
"require_at_registration", False,
|
||||
))
|
||||
self.user_consent_policy_name = consent_config.get(
|
||||
"policy_name", "Privacy Policy",
|
||||
)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return DEFAULT_CONFIG
|
||||
|
||||
@@ -13,10 +13,18 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
# This file can't be called email.py because if it is, we cannot:
|
||||
import email.utils
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ._base import Config
|
||||
import pkg_resources
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmailConfig(Config):
|
||||
@@ -38,7 +46,6 @@ class EmailConfig(Config):
|
||||
"smtp_host",
|
||||
"smtp_port",
|
||||
"notif_from",
|
||||
"template_dir",
|
||||
"notif_template_html",
|
||||
"notif_template_text",
|
||||
]
|
||||
@@ -62,9 +69,26 @@ class EmailConfig(Config):
|
||||
self.email_smtp_host = email_config["smtp_host"]
|
||||
self.email_smtp_port = email_config["smtp_port"]
|
||||
self.email_notif_from = email_config["notif_from"]
|
||||
self.email_template_dir = email_config["template_dir"]
|
||||
self.email_notif_template_html = email_config["notif_template_html"]
|
||||
self.email_notif_template_text = email_config["notif_template_text"]
|
||||
|
||||
template_dir = email_config.get("template_dir")
|
||||
# we need an absolute path, because we change directory after starting (and
|
||||
# we don't yet know what auxilliary templates like mail.css we will need).
|
||||
# (Note that loading as package_resources with jinja.PackageLoader doesn't
|
||||
# work for the same reason.)
|
||||
if not template_dir:
|
||||
template_dir = pkg_resources.resource_filename(
|
||||
'synapse', 'res/templates'
|
||||
)
|
||||
template_dir = os.path.abspath(template_dir)
|
||||
|
||||
for f in self.email_notif_template_text, self.email_notif_template_html:
|
||||
p = os.path.join(template_dir, f)
|
||||
if not os.path.isfile(p):
|
||||
raise ConfigError("Unable to find email template file %s" % (p, ))
|
||||
self.email_template_dir = template_dir
|
||||
|
||||
self.email_notif_for_new_users = email_config.get(
|
||||
"notif_for_new_users", True
|
||||
)
|
||||
@@ -113,7 +137,9 @@ class EmailConfig(Config):
|
||||
# require_transport_security: False
|
||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||
# app_name: Matrix
|
||||
# template_dir: res/templates
|
||||
# # if template_dir is unset, uses the example templates that are part of
|
||||
# # the Synapse distribution.
|
||||
# #template_dir: res/templates
|
||||
# notif_template_html: notif_mail.html
|
||||
# notif_template_text: notif_mail.txt
|
||||
# notif_for_new_users: True
|
||||
|
||||
@@ -31,6 +31,7 @@ from .push import PushConfig
|
||||
from .ratelimiting import RatelimitConfig
|
||||
from .registration import RegistrationConfig
|
||||
from .repository import ContentRepositoryConfig
|
||||
from .room_directory import RoomDirectoryConfig
|
||||
from .saml2 import SAML2Config
|
||||
from .server import ServerConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
@@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||
ConsentConfig,
|
||||
ServerNoticesConfig,
|
||||
ServerNoticesConfig, RoomDirectoryConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ handlers:
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
encoding: utf8
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
|
||||
from distutils.util import strtobool
|
||||
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.types import RoomAlias
|
||||
from synapse.util.stringutils import random_string_with_symbols
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
|
||||
@@ -44,6 +44,10 @@ class RegistrationConfig(Config):
|
||||
)
|
||||
|
||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||
for room_alias in self.auto_join_rooms:
|
||||
if not RoomAlias.is_valid(room_alias):
|
||||
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
|
||||
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
registration_shared_secret = random_string_with_symbols(50)
|
||||
@@ -89,15 +93,24 @@ class RegistrationConfig(Config):
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
#
|
||||
# Also defines the ID server which will be called when an account is
|
||||
# deactivated (one will be picked arbitrarily).
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
# Users who register on this homeserver will automatically be joined
|
||||
# to these rooms
|
||||
#auto_join_rooms:
|
||||
# - "#example:example.com"
|
||||
|
||||
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||
# the rooms exist by creating them when the first user on the
|
||||
# homeserver registers.
|
||||
# Setting to false means that if the rooms are not manually created,
|
||||
# users cannot be auto-joined since they do not exist.
|
||||
autocreate_auto_join_rooms: true
|
||||
""" % locals()
|
||||
|
||||
def add_arguments(self, parser):
|
||||
|
||||
@@ -178,7 +178,7 @@ class ContentRepositoryConfig(Config):
|
||||
def default_config(self, **kwargs):
|
||||
media_store = self.default_path("media_store")
|
||||
uploads_path = self.default_path("uploads")
|
||||
return """
|
||||
return r"""
|
||||
# Directory where uploaded images and attachments are stored.
|
||||
media_store_path: "%(media_store)s"
|
||||
|
||||
|
||||
102
synapse/config/room_directory.py
Normal file
102
synapse/config/room_directory.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.util import glob_to_regex
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
class RoomDirectoryConfig(Config):
|
||||
def read_config(self, config):
|
||||
alias_creation_rules = config["alias_creation_rules"]
|
||||
|
||||
self._alias_creation_rules = [
|
||||
_AliasRule(rule)
|
||||
for rule in alias_creation_rules
|
||||
]
|
||||
|
||||
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# The `alias_creation` option controls who's allowed to create aliases
|
||||
# on this server.
|
||||
#
|
||||
# The format of this option is a list of rules that contain globs that
|
||||
# match against user_id and the new alias (fully qualified with server
|
||||
# name). The action in the first rule that matches is taken, which can
|
||||
# currently either be "allow" or "deny".
|
||||
#
|
||||
# If no rules match the request is denied.
|
||||
alias_creation_rules:
|
||||
- user_id: "*"
|
||||
alias: "*"
|
||||
action: allow
|
||||
"""
|
||||
|
||||
def is_alias_creation_allowed(self, user_id, alias):
|
||||
"""Checks if the given user is allowed to create the given alias
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
alias (str)
|
||||
|
||||
Returns:
|
||||
boolean: True if user is allowed to crate the alias
|
||||
"""
|
||||
for rule in self._alias_creation_rules:
|
||||
if rule.matches(user_id, alias):
|
||||
return rule.action == "allow"
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class _AliasRule(object):
|
||||
def __init__(self, rule):
|
||||
action = rule["action"]
|
||||
user_id = rule["user_id"]
|
||||
alias = rule["alias"]
|
||||
|
||||
if action in ("allow", "deny"):
|
||||
self.action = action
|
||||
else:
|
||||
raise ConfigError(
|
||||
"alias_creation_rules rules can only have action of 'allow'"
|
||||
" or 'deny'"
|
||||
)
|
||||
|
||||
try:
|
||||
self._user_id_regex = glob_to_regex(user_id)
|
||||
self._alias_regex = glob_to_regex(alias)
|
||||
except Exception as e:
|
||||
raise ConfigError("Failed to parse glob into regex: %s", e)
|
||||
|
||||
def matches(self, user_id, alias):
|
||||
"""Tests if this rule matches the given user_id and alias.
|
||||
|
||||
Args:
|
||||
user_id (str)
|
||||
alias (str)
|
||||
|
||||
Returns:
|
||||
boolean
|
||||
"""
|
||||
|
||||
# Note: The regexes are anchored at both ends
|
||||
if not self._user_id_regex.match(user_id):
|
||||
return False
|
||||
|
||||
if not self._alias_regex.match(alias):
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -77,6 +77,7 @@ class ServerConfig(Config):
|
||||
self.max_mau_value = config.get(
|
||||
"max_mau_value", 0,
|
||||
)
|
||||
self.mau_stats_only = config.get("mau_stats_only", False)
|
||||
|
||||
self.mau_limits_reserved_threepids = config.get(
|
||||
"mau_limit_reserved_threepids", []
|
||||
@@ -372,6 +373,11 @@ class ServerConfig(Config):
|
||||
# max_mau_value: 50
|
||||
# mau_trial_days: 2
|
||||
#
|
||||
# If enabled, the metrics for the number of monthly active users will
|
||||
# be populated, however no one will be limited. If limit_usage_by_mau
|
||||
# is true, this is implied to be true.
|
||||
# mau_stats_only: False
|
||||
#
|
||||
# Sometimes the server admin will want to ensure certain accounts are
|
||||
# never blocked by mau checking. These accounts are specified here.
|
||||
#
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
|
||||
import logging
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
@@ -28,15 +30,15 @@ from synapse.util import logcontext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KEY_API_V1 = b"/_matrix/key/v1/"
|
||||
KEY_API_V2 = "/_matrix/key/v2/server/%s"
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
def fetch_server_key(server_name, tls_client_options_factory, key_id):
|
||||
"""Fetch the keys for a remote server."""
|
||||
|
||||
factory = SynapseKeyClientFactory()
|
||||
factory.path = path
|
||||
factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), )
|
||||
factory.host = server_name
|
||||
endpoint = matrix_federation_endpoint(
|
||||
reactor, server_name, tls_client_options_factory, timeout=30
|
||||
@@ -55,7 +57,7 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
except (ConnectError, DomainError) as e:
|
||||
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
logger.exception("Error getting key for %r", server_name)
|
||||
raise IOError("Cannot get key for %r" % server_name)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2017 New Vector Ltd.
|
||||
# Copyright 2017, 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,8 +18,6 @@ import hashlib
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
from signedjson.key import (
|
||||
decode_verify_key_bytes,
|
||||
encode_verify_key_base64,
|
||||
@@ -395,32 +393,13 @@ class Keyring(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_server(self, server_name_and_key_ids):
|
||||
@defer.inlineCallbacks
|
||||
def get_key(server_name, key_ids):
|
||||
keys = None
|
||||
try:
|
||||
keys = yield self.get_server_verify_key_v2_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
|
||||
if not keys:
|
||||
keys = yield self.get_server_verify_key_v1_direct(
|
||||
server_name, key_ids
|
||||
)
|
||||
|
||||
keys = {server_name: keys}
|
||||
|
||||
defer.returnValue(keys)
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
run_in_background(get_key, server_name, key_ids)
|
||||
run_in_background(
|
||||
self.get_server_verify_key_v2_direct,
|
||||
server_name,
|
||||
key_ids,
|
||||
)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
@@ -525,10 +504,7 @@ class Keyring(object):
|
||||
continue
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory,
|
||||
path=("/_matrix/key/v2/server/%s" % (
|
||||
urllib.parse.quote(requested_key_id),
|
||||
)).encode("ascii"),
|
||||
server_name, self.hs.tls_client_options_factory, requested_key_id
|
||||
)
|
||||
|
||||
if (u"signatures" not in response
|
||||
@@ -657,78 +633,6 @@ class Keyring(object):
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_server_verify_key_v1_direct(self, server_name, key_ids):
|
||||
"""Finds a verification key for the server with one of the key ids.
|
||||
Args:
|
||||
server_name (str): The name of the server to fetch a key for.
|
||||
keys_ids (list of str): The key_ids to check for.
|
||||
"""
|
||||
|
||||
# Try to fetch the key from the remote server.
|
||||
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_client_options_factory
|
||||
)
|
||||
|
||||
# Check the response.
|
||||
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, tls_certificate
|
||||
)
|
||||
|
||||
if ("signatures" not in response
|
||||
or server_name not in response["signatures"]):
|
||||
raise KeyLookupError("Key response not signed by remote server")
|
||||
|
||||
if "tls_certificate" not in response:
|
||||
raise KeyLookupError("Key response missing TLS certificate")
|
||||
|
||||
tls_certificate_b64 = response["tls_certificate"]
|
||||
|
||||
if encode_base64(x509_certificate_bytes) != tls_certificate_b64:
|
||||
raise KeyLookupError("TLS certificate doesn't match")
|
||||
|
||||
# Cache the result in the datastore.
|
||||
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
verify_keys = {}
|
||||
for key_id, key_base64 in response["verify_keys"].items():
|
||||
if is_signing_algorithm_supported(key_id):
|
||||
key_bytes = decode_base64(key_base64)
|
||||
verify_key = decode_verify_key_bytes(key_id, key_bytes)
|
||||
verify_key.time_added = time_now_ms
|
||||
verify_keys[key_id] = verify_key
|
||||
|
||||
for key_id in response["signatures"][server_name]:
|
||||
if key_id not in response["verify_keys"]:
|
||||
raise KeyLookupError(
|
||||
"Key response must include verification keys for all"
|
||||
" signatures"
|
||||
)
|
||||
if key_id in verify_keys:
|
||||
verify_signed_json(
|
||||
response,
|
||||
server_name,
|
||||
verify_keys[key_id]
|
||||
)
|
||||
|
||||
yield self.store.store_server_certificate(
|
||||
server_name,
|
||||
server_name,
|
||||
time_now_ms,
|
||||
tls_certificate,
|
||||
)
|
||||
|
||||
yield self.store_keys(
|
||||
server_name=server_name,
|
||||
from_server=server_name,
|
||||
verify_keys=verify_keys,
|
||||
)
|
||||
|
||||
defer.returnValue(verify_keys)
|
||||
|
||||
def store_keys(self, server_name, from_server, verify_keys):
|
||||
"""Store a collection of verify keys for a given server
|
||||
Args:
|
||||
|
||||
@@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, (
|
||||
"You cannot issue a third party invite for %s." %
|
||||
(event.content.display_name,)
|
||||
)
|
||||
403, "You don't have permission to invite users",
|
||||
)
|
||||
else:
|
||||
logger.debug("Allowing! %s", event)
|
||||
@@ -203,11 +200,11 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
membership = event.content["membership"]
|
||||
|
||||
# Check if this is the room creator joining:
|
||||
if len(event.prev_events) == 1 and Membership.JOIN == membership:
|
||||
if len(event.prev_event_ids()) == 1 and Membership.JOIN == membership:
|
||||
# Get room creation event:
|
||||
key = (EventTypes.Create, "", )
|
||||
create = auth_events.get(key)
|
||||
if create and event.prev_events[0][0] == create.event_id:
|
||||
if create and event.prev_event_ids()[0] == create.event_id:
|
||||
if create.content["creator"] == event.state_key:
|
||||
return
|
||||
|
||||
@@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events):
|
||||
|
||||
if user_level < invite_level:
|
||||
raise AuthError(
|
||||
403, "You cannot invite user %s." % target_user_id
|
||||
403, "You don't have permission to invite users",
|
||||
)
|
||||
elif Membership.JOIN == membership:
|
||||
# Joins are valid iff caller == target and they were:
|
||||
@@ -693,7 +690,7 @@ def auth_types_for_event(event):
|
||||
auth_types = []
|
||||
|
||||
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||
auth_types.append((EventTypes.Member, event.user_id, ))
|
||||
auth_types.append((EventTypes.Member, event.sender, ))
|
||||
auth_types.append((EventTypes.Create, "", ))
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
|
||||
@@ -159,6 +159,24 @@ class EventBase(object):
|
||||
def keys(self):
|
||||
return six.iterkeys(self._event_dict)
|
||||
|
||||
def prev_event_ids(self):
|
||||
"""Returns the list of prev event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of event IDs of this event's prev_events
|
||||
"""
|
||||
return [e for e, _ in self.prev_events]
|
||||
|
||||
def auth_event_ids(self):
|
||||
"""Returns the list of auth event IDs. The order matches the order
|
||||
specified in the event, though there is no meaning to it.
|
||||
|
||||
Returns:
|
||||
list[str]: The list of event IDs of this event's auth_events
|
||||
"""
|
||||
return [e for e, _ in self.auth_events]
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
|
||||
import six
|
||||
from six import iteritems
|
||||
@@ -44,6 +43,7 @@ from synapse.replication.http.federation import (
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import glob_to_regex
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.logcontext import nested_logging_context
|
||||
@@ -162,8 +162,30 @@ class FederationServer(FederationBase):
|
||||
p["age_ts"] = request_time - int(p["age"])
|
||||
del p["age"]
|
||||
|
||||
# We try and pull out an event ID so that if later checks fail we
|
||||
# can log something sensible. We don't mandate an event ID here in
|
||||
# case future event formats get rid of the key.
|
||||
possible_event_id = p.get("event_id", "<Unknown>")
|
||||
|
||||
# Now we get the room ID so that we can check that we know the
|
||||
# version of the room.
|
||||
room_id = p.get("room_id")
|
||||
if not room_id:
|
||||
logger.info(
|
||||
"Ignoring PDU as does not have a room_id. Event ID: %s",
|
||||
possible_event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
# In future we will actually use the room version to parse the
|
||||
# PDU into an event.
|
||||
yield self.store.get_room_version(room_id)
|
||||
except NotFoundError:
|
||||
logger.info("Ignoring PDU for unknown room_id: %s", room_id)
|
||||
continue
|
||||
|
||||
event = event_from_pdu_json(p)
|
||||
room_id = event.room_id
|
||||
pdus_by_room.setdefault(room_id, []).append(event)
|
||||
|
||||
pdu_results = {}
|
||||
@@ -323,11 +345,6 @@ class FederationServer(FederationBase):
|
||||
else:
|
||||
defer.returnValue((404, ""))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_pull_request(self, origin, versions):
|
||||
raise NotImplementedError("Pull transactions not implemented")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
@@ -507,19 +524,19 @@ class FederationServer(FederationBase):
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
latest_events, limit):
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
yield self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
logger.info(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
" limit: %d, min_depth: %d",
|
||||
earliest_events, latest_events, limit, min_depth
|
||||
" limit: %d",
|
||||
earliest_events, latest_events, limit,
|
||||
)
|
||||
|
||||
missing_events = yield self.handler.on_get_missing_events(
|
||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
||||
origin, room_id, earliest_events, latest_events, limit,
|
||||
)
|
||||
|
||||
if len(missing_events) < 5:
|
||||
@@ -729,22 +746,10 @@ def _acl_entry_matches(server_name, acl_entry):
|
||||
if not isinstance(acl_entry, six.string_types):
|
||||
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
|
||||
return False
|
||||
regex = _glob_to_regex(acl_entry)
|
||||
regex = glob_to_regex(acl_entry)
|
||||
return regex.match(server_name)
|
||||
|
||||
|
||||
def _glob_to_regex(glob):
|
||||
res = ''
|
||||
for c in glob:
|
||||
if c == '*':
|
||||
res = res + '.*'
|
||||
elif c == '?':
|
||||
res = res + '.'
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
return re.compile(res + "\\Z", re.IGNORECASE)
|
||||
|
||||
|
||||
class FederationHandlerRegistry(object):
|
||||
"""Allows classes to register themselves as handlers for a given EDU or
|
||||
query type for incoming federation traffic.
|
||||
@@ -800,7 +805,7 @@ class FederationHandlerRegistry(object):
|
||||
yield handler(origin, content)
|
||||
except SynapseError as e:
|
||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
logger.exception("Failed to handle edu %r", edu_type)
|
||||
|
||||
def on_query(self, query_type, args):
|
||||
|
||||
@@ -183,9 +183,7 @@ class TransactionQueue(object):
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
destinations = yield self.state.get_current_hosts_in_room(
|
||||
event.room_id, latest_event_ids=[
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
event.room_id, latest_event_ids=event.prev_event_ids(),
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
@@ -633,14 +631,6 @@ class TransactionQueue(object):
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
|
||||
if response:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
"Transaction returned error for %s: %s",
|
||||
e_id, r,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
@@ -657,19 +647,24 @@ class TransactionQueue(object):
|
||||
destination, txn_id, code
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
|
||||
|
||||
if code != 200:
|
||||
if code == 200:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
destination, txn_id, e_id, r,
|
||||
)
|
||||
else:
|
||||
for p in pdus:
|
||||
logger.info(
|
||||
"Failed to send event %s to %s", p.event_id, destination
|
||||
logger.warn(
|
||||
"TX [%s] {%s} Failed to send event %s",
|
||||
destination, txn_id, p.event_id,
|
||||
)
|
||||
success = False
|
||||
|
||||
|
||||
@@ -143,9 +143,17 @@ class TransportLayerClient(object):
|
||||
transaction (Transaction)
|
||||
|
||||
Returns:
|
||||
Deferred: Results of the deferred is a tuple in the form of
|
||||
(response_code, response_body) where the response_body is a
|
||||
python dict decoded from json
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
|
||||
Fails with ``HTTPRequestException`` if we get an HTTP response
|
||||
code >= 300.
|
||||
|
||||
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||
to retry this server.
|
||||
|
||||
Fails with ``FederationDeniedError`` if this destination
|
||||
is not on our federation whitelist
|
||||
"""
|
||||
logger.debug(
|
||||
"send_data dest=%s, txid=%s",
|
||||
@@ -170,11 +178,6 @@ class TransportLayerClient(object):
|
||||
backoff_on_404=True, # If we get a 404 the other side has gone
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"send_data dest=%s, txid=%s, got response: 200",
|
||||
transaction.destination, transaction.transaction_id,
|
||||
)
|
||||
|
||||
defer.returnValue(response)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet):
|
||||
defer.returnValue((code, response))
|
||||
|
||||
|
||||
class FederationPullServlet(BaseFederationServlet):
|
||||
PATH = "/pull/"
|
||||
|
||||
# This is for when someone asks us for everything since version X
|
||||
def on_GET(self, origin, content, query):
|
||||
return self.handler.on_pull_request(query["origin"][0], query["v"])
|
||||
|
||||
|
||||
class FederationEventServlet(BaseFederationServlet):
|
||||
PATH = "/event/(?P<event_id>[^/]*)/"
|
||||
|
||||
@@ -560,7 +552,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, room_id):
|
||||
limit = int(content.get("limit", 10))
|
||||
min_depth = int(content.get("min_depth", 0))
|
||||
earliest_events = content.get("earliest_events", [])
|
||||
latest_events = content.get("latest_events", [])
|
||||
|
||||
@@ -569,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events,
|
||||
latest_events=latest_events,
|
||||
min_depth=min_depth,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
@@ -1263,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||
|
||||
FEDERATION_SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
FederationEventServlet,
|
||||
FederationStateServlet,
|
||||
FederationStateIdsServlet,
|
||||
|
||||
@@ -117,9 +117,6 @@ class Transaction(JsonEncodedObject):
|
||||
"Require 'transaction_id' to construct a Transaction"
|
||||
)
|
||||
|
||||
for p in pdus:
|
||||
p.transaction_id = kwargs["transaction_id"]
|
||||
|
||||
kwargs["pdus"] = [p.get_pdu_json() for p in pdus]
|
||||
|
||||
return Transaction(**kwargs)
|
||||
|
||||
@@ -28,6 +28,7 @@ from synapse.metrics import (
|
||||
event_processing_loop_room_count,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util import log_failure
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -36,17 +37,6 @@ logger = logging.getLogger(__name__)
|
||||
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
|
||||
|
||||
|
||||
def log_failure(failure):
|
||||
logger.error(
|
||||
"Application Services Failure",
|
||||
exc_info=(
|
||||
failure.type,
|
||||
failure.value,
|
||||
failure.getTracebackObject()
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ApplicationServicesHandler(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
@@ -112,7 +102,10 @@ class ApplicationServicesHandler(object):
|
||||
|
||||
if not self.started_scheduler:
|
||||
def start_scheduler():
|
||||
return self.scheduler.start().addErrback(log_failure)
|
||||
return self.scheduler.start().addErrback(
|
||||
log_failure, "Application Services Failure",
|
||||
)
|
||||
|
||||
run_as_background_process("as_scheduler", start_scheduler)
|
||||
self.started_scheduler = True
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import bcrypt
|
||||
import pymacaroons
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, threads
|
||||
from twisted.internet import defer
|
||||
from twisted.web.client import PartialDownloadError
|
||||
|
||||
import synapse.util.stringutils as stringutils
|
||||
@@ -37,8 +37,8 @@ from synapse.api.errors import (
|
||||
)
|
||||
from synapse.module_api import ModuleApi
|
||||
from synapse.types import UserID
|
||||
from synapse.util import logcontext
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -59,6 +59,7 @@ class AuthHandler(BaseHandler):
|
||||
LoginType.EMAIL_IDENTITY: self._check_email_identity,
|
||||
LoginType.MSISDN: self._check_msisdn,
|
||||
LoginType.DUMMY: self._check_dummy_auth,
|
||||
LoginType.TERMS: self._check_terms_auth,
|
||||
}
|
||||
self.bcrypt_rounds = hs.config.bcrypt_rounds
|
||||
|
||||
@@ -431,6 +432,9 @@ class AuthHandler(BaseHandler):
|
||||
def _check_dummy_auth(self, authdict, _):
|
||||
return defer.succeed(True)
|
||||
|
||||
def _check_terms_auth(self, authdict, _):
|
||||
return defer.succeed(True)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_threepid(self, medium, authdict):
|
||||
if 'threepid_creds' not in authdict:
|
||||
@@ -462,6 +466,22 @@ class AuthHandler(BaseHandler):
|
||||
def _get_params_recaptcha(self):
|
||||
return {"public_key": self.hs.config.recaptcha_public_key}
|
||||
|
||||
def _get_params_terms(self):
|
||||
return {
|
||||
"policies": {
|
||||
"privacy_policy": {
|
||||
"version": self.hs.config.user_consent_version,
|
||||
"en": {
|
||||
"name": self.hs.config.user_consent_policy_name,
|
||||
"url": "%s_matrix/consent?v=%s" % (
|
||||
self.hs.config.public_baseurl,
|
||||
self.hs.config.user_consent_version,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
def _auth_dict_for_flows(self, flows, session):
|
||||
public_flows = []
|
||||
for f in flows:
|
||||
@@ -469,6 +489,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
get_params = {
|
||||
LoginType.RECAPTCHA: self._get_params_recaptcha,
|
||||
LoginType.TERMS: self._get_params_terms,
|
||||
}
|
||||
|
||||
params = {}
|
||||
@@ -884,11 +905,7 @@ class AuthHandler(BaseHandler):
|
||||
bcrypt.gensalt(self.bcrypt_rounds),
|
||||
).decode('ascii')
|
||||
|
||||
return make_deferred_yieldable(
|
||||
threads.deferToThreadPool(
|
||||
self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash
|
||||
),
|
||||
)
|
||||
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash)
|
||||
|
||||
def validate_hash(self, password, stored_hash):
|
||||
"""Validates that self.hash(password) == stored_hash.
|
||||
@@ -913,13 +930,7 @@ class AuthHandler(BaseHandler):
|
||||
if not isinstance(stored_hash, bytes):
|
||||
stored_hash = stored_hash.encode('ascii')
|
||||
|
||||
return make_deferred_yieldable(
|
||||
threads.deferToThreadPool(
|
||||
self.hs.get_reactor(),
|
||||
self.hs.get_reactor().getThreadPool(),
|
||||
_do_validate_hash,
|
||||
),
|
||||
)
|
||||
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
||||
else:
|
||||
return defer.succeed(False)
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ import logging
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import UserID, create_requester
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
@@ -121,7 +121,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
None
|
||||
"""
|
||||
if not self._user_parter_running:
|
||||
run_in_background(self._user_parter_loop)
|
||||
run_as_background_process("user_parter_loop", self._user_parter_loop)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _user_parter_loop(self):
|
||||
|
||||
@@ -43,6 +43,7 @@ class DirectoryHandler(BaseHandler):
|
||||
self.state = hs.get_state_handler()
|
||||
self.appservice_handler = hs.get_application_service_handler()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.config = hs.config
|
||||
|
||||
self.federation = hs.get_federation_client()
|
||||
hs.get_federation_registry().register_query_handler(
|
||||
@@ -80,41 +81,88 @@ class DirectoryHandler(BaseHandler):
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_association(self, user_id, room_alias, room_id, servers=None):
|
||||
# association creation for human users
|
||||
# TODO(erikj): Do user auth.
|
||||
def create_association(self, requester, room_alias, room_id, servers=None,
|
||||
send_event=True):
|
||||
"""Attempt to create a new alias
|
||||
|
||||
if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
|
||||
raise SynapseError(
|
||||
403, "This user is not permitted to create this alias",
|
||||
)
|
||||
Args:
|
||||
requester (Requester)
|
||||
room_alias (RoomAlias)
|
||||
room_id (str)
|
||||
servers (list[str]|None): List of servers that others servers
|
||||
should try and join via
|
||||
send_event (bool): Whether to send an updated m.room.aliases event
|
||||
|
||||
can_create = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
user_id=user_id
|
||||
)
|
||||
if not can_create:
|
||||
raise SynapseError(
|
||||
400, "This alias is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
service = requester.app_service
|
||||
if service:
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400, "This application service has not reserved"
|
||||
" this kind of alias.", errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
else:
|
||||
if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
|
||||
raise AuthError(
|
||||
403, "This user is not permitted to create this alias",
|
||||
)
|
||||
|
||||
if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
|
||||
# Lets just return a generic message, as there may be all sorts of
|
||||
# reasons why we said no. TODO: Allow configurable error messages
|
||||
# per alias creation rule?
|
||||
raise SynapseError(
|
||||
403, "Not allowed to create alias",
|
||||
)
|
||||
|
||||
can_create = yield self.can_modify_alias(
|
||||
room_alias,
|
||||
user_id=user_id
|
||||
)
|
||||
if not can_create:
|
||||
raise AuthError(
|
||||
400, "This alias is reserved by an application service.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
yield self._create_association(room_alias, room_id, servers, creator=user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_appservice_association(self, service, room_alias, room_id,
|
||||
servers=None):
|
||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||
raise SynapseError(
|
||||
400, "This application service has not reserved"
|
||||
" this kind of alias.", errcode=Codes.EXCLUSIVE
|
||||
if send_event:
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
room_id
|
||||
)
|
||||
|
||||
# association creation for app services
|
||||
yield self._create_association(room_alias, room_id, servers)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_association(self, requester, user_id, room_alias):
|
||||
# association deletion for human users
|
||||
def delete_association(self, requester, room_alias, send_event=True):
|
||||
"""Remove an alias from the directory
|
||||
|
||||
(this is only meant for human users; AS users should call
|
||||
delete_appservice_association)
|
||||
|
||||
Args:
|
||||
requester (Requester):
|
||||
room_alias (RoomAlias):
|
||||
send_event (bool): Whether to send an updated m.room.aliases event.
|
||||
Note that, if we delete the canonical alias, we will always attempt
|
||||
to send an m.room.canonical_alias event
|
||||
|
||||
Returns:
|
||||
Deferred[unicode]: room id that the alias used to point to
|
||||
|
||||
Raises:
|
||||
NotFoundError: if the alias doesn't exist
|
||||
|
||||
AuthError: if the user doesn't have perms to delete the alias (ie, the user
|
||||
is neither the creator of the alias, nor a server admin.
|
||||
|
||||
SynapseError: if the alias belongs to an AS
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
try:
|
||||
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
||||
@@ -141,11 +189,11 @@ class DirectoryHandler(BaseHandler):
|
||||
room_id = yield self._delete_association(room_alias)
|
||||
|
||||
try:
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
requester.user.to_string(),
|
||||
room_id
|
||||
)
|
||||
if send_event:
|
||||
yield self.send_room_alias_update_event(
|
||||
requester,
|
||||
room_id
|
||||
)
|
||||
|
||||
yield self._update_canonical_alias(
|
||||
requester,
|
||||
@@ -211,10 +259,8 @@ class DirectoryHandler(BaseHandler):
|
||||
servers = result["servers"]
|
||||
|
||||
if not room_id:
|
||||
raise SynapseError(
|
||||
404,
|
||||
raise NotFoundError(
|
||||
"Room alias %s not found" % (room_alias.to_string(),),
|
||||
Codes.NOT_FOUND
|
||||
)
|
||||
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
@@ -254,14 +300,12 @@ class DirectoryHandler(BaseHandler):
|
||||
"servers": result.servers,
|
||||
})
|
||||
else:
|
||||
raise SynapseError(
|
||||
404,
|
||||
raise NotFoundError(
|
||||
"Room alias %r not found" % (room_alias.to_string(),),
|
||||
Codes.NOT_FOUND
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_room_alias_update_event(self, requester, user_id, room_id):
|
||||
def send_room_alias_update_event(self, requester, room_id):
|
||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
@@ -270,7 +314,7 @@ class DirectoryHandler(BaseHandler):
|
||||
"type": EventTypes.Aliases,
|
||||
"state_key": self.hs.hostname,
|
||||
"room_id": room_id,
|
||||
"sender": user_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": {"aliases": aliases},
|
||||
},
|
||||
ratelimit=False
|
||||
|
||||
297
synapse/handlers/e2e_room_keys.py
Normal file
297
synapse/handlers/e2e_room_keys.py
Normal file
@@ -0,0 +1,297 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017, 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import NotFoundError, RoomKeysVersionError, StoreError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class E2eRoomKeysHandler(object):
|
||||
"""
|
||||
Implements an optional realtime backup mechanism for encrypted E2E megolm room keys.
|
||||
This gives a way for users to store and recover their megolm keys if they lose all
|
||||
their clients. It should also extend easily to future room key mechanisms.
|
||||
The actual payload of the encrypted keys is completely opaque to the handler.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
# Used to lock whenever a client is uploading key data. This prevents collisions
|
||||
# between clients trying to upload the details of a new session, given all
|
||||
# clients belonging to a user will receive and try to upload a new session at
|
||||
# roughly the same time. Also used to lock out uploads when the key is being
|
||||
# changed.
|
||||
self._upload_linearizer = Linearizer("upload_room_keys_lock")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_room_keys(self, user_id, version, room_id=None, session_id=None):
|
||||
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
|
||||
room, or a given session.
|
||||
See EndToEndRoomKeyStore.get_e2e_room_keys for full details.
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose keys we're getting
|
||||
version(str): the version ID of the backup we're getting keys from
|
||||
room_id(string): room ID to get keys for, for None to get keys for all rooms
|
||||
session_id(string): session ID to get keys for, for None to get keys for all
|
||||
sessions
|
||||
Raises:
|
||||
NotFoundError: if the backup version does not exist
|
||||
Returns:
|
||||
A deferred list of dicts giving the session_data and message metadata for
|
||||
these room keys.
|
||||
"""
|
||||
|
||||
# we deliberately take the lock to get keys so that changing the version
|
||||
# works atomically
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
# make sure the backup version exists
|
||||
try:
|
||||
yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown backup version")
|
||||
else:
|
||||
raise
|
||||
|
||||
results = yield self.store.get_e2e_room_keys(
|
||||
user_id, version, room_id, session_id
|
||||
)
|
||||
|
||||
defer.returnValue(results)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
|
||||
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
|
||||
room or a given session.
|
||||
See EndToEndRoomKeyStore.delete_e2e_room_keys for full details.
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose backup we're deleting
|
||||
version(str): the version ID of the backup we're deleting
|
||||
room_id(string): room ID to delete keys for, for None to delete keys for all
|
||||
rooms
|
||||
session_id(string): session ID to delete keys for, for None to delete keys
|
||||
for all sessions
|
||||
Returns:
|
||||
A deferred of the deletion transaction
|
||||
"""
|
||||
|
||||
# lock for consistency with uploading
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upload_room_keys(self, user_id, version, room_keys):
|
||||
"""Bulk upload a list of room keys into a given backup version, asserting
|
||||
that the given version is the current backup version. room_keys are merged
|
||||
into the current backup as described in RoomKeysServlet.on_PUT().
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose backup we're setting
|
||||
version(str): the version ID of the backup we're updating
|
||||
room_keys(dict): a nested dict describing the room_keys we're setting:
|
||||
|
||||
{
|
||||
"rooms": {
|
||||
"!abc:matrix.org": {
|
||||
"sessions": {
|
||||
"c0ff33": {
|
||||
"first_message_index": 1,
|
||||
"forwarded_count": 1,
|
||||
"is_verified": false,
|
||||
"session_data": "SSBBTSBBIEZJU0gK"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Raises:
|
||||
NotFoundError: if there are no versions defined
|
||||
RoomKeysVersionError: if the uploaded version is not the current version
|
||||
"""
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# XXX: perhaps we should use a finer grained lock here?
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
|
||||
# Check that the version we're trying to upload is the current version
|
||||
try:
|
||||
version_info = yield self.store.get_e2e_room_keys_version_info(user_id)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Version '%s' not found" % (version,))
|
||||
else:
|
||||
raise
|
||||
|
||||
if version_info['version'] != version:
|
||||
# Check that the version we're trying to upload actually exists
|
||||
try:
|
||||
version_info = yield self.store.get_e2e_room_keys_version_info(
|
||||
user_id, version,
|
||||
)
|
||||
# if we get this far, the version must exist
|
||||
raise RoomKeysVersionError(current_version=version_info['version'])
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Version '%s' not found" % (version,))
|
||||
else:
|
||||
raise
|
||||
|
||||
# go through the room_keys.
|
||||
# XXX: this should/could be done concurrently, given we're in a lock.
|
||||
for room_id, room in iteritems(room_keys['rooms']):
|
||||
for session_id, session in iteritems(room['sessions']):
|
||||
yield self._upload_room_key(
|
||||
user_id, version, room_id, session_id, session
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _upload_room_key(self, user_id, version, room_id, session_id, room_key):
|
||||
"""Upload a given room_key for a given room and session into a given
|
||||
version of the backup. Merges the key with any which might already exist.
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose backup we're setting
|
||||
version(str): the version ID of the backup we're updating
|
||||
room_id(str): the ID of the room whose keys we're setting
|
||||
session_id(str): the session whose room_key we're setting
|
||||
room_key(dict): the room_key being set
|
||||
"""
|
||||
|
||||
# get the room_key for this particular row
|
||||
current_room_key = None
|
||||
try:
|
||||
current_room_key = yield self.store.get_e2e_room_key(
|
||||
user_id, version, room_id, session_id
|
||||
)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
if self._should_replace_room_key(current_room_key, room_key):
|
||||
yield self.store.set_e2e_room_key(
|
||||
user_id, version, room_id, session_id, room_key
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _should_replace_room_key(current_room_key, room_key):
|
||||
"""
|
||||
Determine whether to replace a given current_room_key (if any)
|
||||
with a newly uploaded room_key backup
|
||||
|
||||
Args:
|
||||
current_room_key (dict): Optional, the current room_key dict if any
|
||||
room_key (dict): The new room_key dict which may or may not be fit to
|
||||
replace the current_room_key
|
||||
|
||||
Returns:
|
||||
True if current_room_key should be replaced by room_key in the backup
|
||||
"""
|
||||
|
||||
if current_room_key:
|
||||
# spelt out with if/elifs rather than nested boolean expressions
|
||||
# purely for legibility.
|
||||
|
||||
if room_key['is_verified'] and not current_room_key['is_verified']:
|
||||
return True
|
||||
elif (
|
||||
room_key['first_message_index'] <
|
||||
current_room_key['first_message_index']
|
||||
):
|
||||
return True
|
||||
elif room_key['forwarded_count'] < current_room_key['forwarded_count']:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_version(self, user_id, version_info):
|
||||
"""Create a new backup version. This automatically becomes the new
|
||||
backup version for the user's keys; previous backups will no longer be
|
||||
writeable to.
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose backup version we're creating
|
||||
version_info(dict): metadata about the new version being created
|
||||
|
||||
{
|
||||
"algorithm": "m.megolm_backup.v1",
|
||||
"auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
|
||||
}
|
||||
|
||||
Returns:
|
||||
A deferred of a string that gives the new version number.
|
||||
"""
|
||||
|
||||
# TODO: Validate the JSON to make sure it has the right keys.
|
||||
|
||||
# lock everyone out until we've switched version
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
new_version = yield self.store.create_e2e_room_keys_version(
|
||||
user_id, version_info
|
||||
)
|
||||
defer.returnValue(new_version)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_version_info(self, user_id, version=None):
|
||||
"""Get the info about a given version of the user's backup
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose current backup version we're querying
|
||||
version(str): Optional; if None gives the most recent version
|
||||
otherwise a historical one.
|
||||
Raises:
|
||||
StoreError: code 404 if the requested backup version doesn't exist
|
||||
Returns:
|
||||
A deferred of a info dict that gives the info about the new version.
|
||||
|
||||
{
|
||||
"version": "1234",
|
||||
"algorithm": "m.megolm_backup.v1",
|
||||
"auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
|
||||
}
|
||||
"""
|
||||
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_version(self, user_id, version=None):
|
||||
"""Deletes a given version of the user's e2e_room_keys backup
|
||||
|
||||
Args:
|
||||
user_id(str): the user whose current backup version we're deleting
|
||||
version(str): the version id of the backup being deleted
|
||||
Raises:
|
||||
StoreError: code 404 if this backup version doesn't exist
|
||||
"""
|
||||
|
||||
with (yield self._upload_linearizer.queue(user_id)):
|
||||
yield self.store.delete_e2e_room_keys_version(user_id, version)
|
||||
@@ -48,13 +48,14 @@ from synapse.crypto.event_signing import (
|
||||
compute_event_signature,
|
||||
)
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationCleanRoomRestServlet,
|
||||
ReplicationFederationSendEventsRestServlet,
|
||||
)
|
||||
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
||||
from synapse.state import resolve_events_with_factory
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
from synapse.state import StateResolutionStore, resolve_events_with_store
|
||||
from synapse.types import UserID, create_requester, get_domain_from_id
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.distributor import user_joined_room
|
||||
@@ -105,6 +106,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
self.hs = hs
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore() # type: synapse.storage.DataStore
|
||||
self.federation_client = hs.get_federation_client()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
@@ -202,27 +204,22 @@ class FederationHandler(BaseHandler):
|
||||
self.room_queues[room_id].append((pdu, origin))
|
||||
return
|
||||
|
||||
# If we're no longer in the room just ditch the event entirely. This
|
||||
# is probably an old server that has come back and thinks we're still
|
||||
# in the room (or we've been rejoined to the room by a state reset).
|
||||
# If we're not in the room just ditch the event entirely. This is
|
||||
# probably an old server that has come back and thinks we're still in
|
||||
# the room (or we've been rejoined to the room by a state reset).
|
||||
#
|
||||
# If we were never in the room then maybe our database got vaped and
|
||||
# we should check if we *are* in fact in the room. If we are then we
|
||||
# can magically rejoin the room.
|
||||
# Note that if we were never in the room then we would have already
|
||||
# dropped the event, since we wouldn't know the room version.
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
was_in_room = yield self.store.was_host_joined(
|
||||
pdu.room_id, self.server_name,
|
||||
logger.info(
|
||||
"[%s %s] Ignoring PDU from %s as we're not in the room",
|
||||
room_id, event_id, origin,
|
||||
)
|
||||
if was_in_room:
|
||||
logger.info(
|
||||
"[%s %s] Ignoring PDU from %s as we've left the room",
|
||||
room_id, event_id, origin,
|
||||
)
|
||||
defer.returnValue(None)
|
||||
defer.returnValue(None)
|
||||
|
||||
state = None
|
||||
auth_chain = []
|
||||
@@ -239,7 +236,7 @@ class FederationHandler(BaseHandler):
|
||||
room_id, event_id, min_depth,
|
||||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
prevs = set(pdu.prev_event_ids())
|
||||
seen = yield self.store.have_seen_events(prevs)
|
||||
|
||||
if min_depth and pdu.depth < min_depth:
|
||||
@@ -309,8 +306,8 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if sent_to_us_directly:
|
||||
logger.warn(
|
||||
"[%s %s] Failed to fetch %d prev events: rejecting",
|
||||
room_id, event_id, len(prevs - seen),
|
||||
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
|
||||
room_id, event_id, len(prevs - seen), shortstr(prevs - seen)
|
||||
)
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
@@ -384,24 +381,24 @@ class FederationHandler(BaseHandler):
|
||||
for x in remote_state:
|
||||
event_map[x.event_id] = x
|
||||
|
||||
# Resolve any conflicting state
|
||||
@defer.inlineCallbacks
|
||||
def fetch(ev_ids):
|
||||
fetched = yield self.store.get_events(
|
||||
ev_ids, get_prev_content=False, check_redacted=False,
|
||||
)
|
||||
# add any events we fetch here to the `event_map` so that we
|
||||
# can use them to build the state event list below.
|
||||
event_map.update(fetched)
|
||||
defer.returnValue(fetched)
|
||||
|
||||
room_version = yield self.store.get_room_version(room_id)
|
||||
state_map = yield resolve_events_with_factory(
|
||||
room_version, state_maps, event_map, fetch,
|
||||
state_map = yield resolve_events_with_store(
|
||||
room_version, state_maps, event_map,
|
||||
state_res_store=StateResolutionStore(self.store),
|
||||
)
|
||||
|
||||
# we need to give _process_received_pdu the actual state events
|
||||
# We need to give _process_received_pdu the actual state events
|
||||
# rather than event ids, so generate that now.
|
||||
|
||||
# First though we need to fetch all the events that are in
|
||||
# state_map, so we can build up the state below.
|
||||
evs = yield self.store.get_events(
|
||||
list(state_map.values()),
|
||||
get_prev_content=False,
|
||||
check_redacted=False,
|
||||
)
|
||||
event_map.update(evs)
|
||||
|
||||
state = [
|
||||
event_map[e] for e in six.itervalues(state_map)
|
||||
]
|
||||
@@ -452,8 +449,8 @@ class FederationHandler(BaseHandler):
|
||||
latest |= seen
|
||||
|
||||
logger.info(
|
||||
"[%s %s]: Requesting %d prev_events: %s",
|
||||
room_id, event_id, len(prevs - seen), shortstr(prevs - seen)
|
||||
"[%s %s]: Requesting missing events between %s and %s",
|
||||
room_id, event_id, shortstr(latest), event_id,
|
||||
)
|
||||
|
||||
# XXX: we set timeout to 10s to help workaround
|
||||
@@ -557,86 +554,54 @@ class FederationHandler(BaseHandler):
|
||||
room_id, event_id, event,
|
||||
)
|
||||
|
||||
# FIXME (erikj): Awful hack to make the case where we are not currently
|
||||
# in the room work
|
||||
# If state and auth_chain are None, then we don't need to do this check
|
||||
# as we already know we have enough state in the DB to handle this
|
||||
# event.
|
||||
if state and auth_chain and not event.internal_metadata.is_outlier():
|
||||
is_in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
self.server_name
|
||||
)
|
||||
else:
|
||||
is_in_room = True
|
||||
event_ids = set()
|
||||
if state:
|
||||
event_ids |= {e.event_id for e in state}
|
||||
if auth_chain:
|
||||
event_ids |= {e.event_id for e in auth_chain}
|
||||
|
||||
seen_ids = yield self.store.have_seen_events(event_ids)
|
||||
|
||||
if state and auth_chain is not None:
|
||||
# If we have any state or auth_chain given to us by the replication
|
||||
# layer, then we should handle them (if we haven't before.)
|
||||
|
||||
event_infos = []
|
||||
|
||||
for e in itertools.chain(auth_chain, state):
|
||||
if e.event_id in seen_ids:
|
||||
continue
|
||||
e.internal_metadata.outlier = True
|
||||
auth_ids = e.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
||||
}
|
||||
event_infos.append({
|
||||
"event": e,
|
||||
"auth_events": auth,
|
||||
})
|
||||
seen_ids.add(e.event_id)
|
||||
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"[%s %s] Got event for room we're not in",
|
||||
room_id, event_id,
|
||||
"[%s %s] persisting newly-received auth/state events %s",
|
||||
room_id, event_id, [e["event"].event_id for e in event_infos]
|
||||
)
|
||||
yield self._handle_new_events(origin, event_infos)
|
||||
|
||||
try:
|
||||
yield self._persist_auth_tree(
|
||||
origin, auth_chain, state, event
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event_id,
|
||||
)
|
||||
|
||||
else:
|
||||
event_ids = set()
|
||||
if state:
|
||||
event_ids |= {e.event_id for e in state}
|
||||
if auth_chain:
|
||||
event_ids |= {e.event_id for e in auth_chain}
|
||||
|
||||
seen_ids = yield self.store.have_seen_events(event_ids)
|
||||
|
||||
if state and auth_chain is not None:
|
||||
# If we have any state or auth_chain given to us by the replication
|
||||
# layer, then we should handle them (if we haven't before.)
|
||||
|
||||
event_infos = []
|
||||
|
||||
for e in itertools.chain(auth_chain, state):
|
||||
if e.event_id in seen_ids:
|
||||
continue
|
||||
e.internal_metadata.outlier = True
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in auth_chain
|
||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
||||
}
|
||||
event_infos.append({
|
||||
"event": e,
|
||||
"auth_events": auth,
|
||||
})
|
||||
seen_ids.add(e.event_id)
|
||||
|
||||
logger.info(
|
||||
"[%s %s] persisting newly-received auth/state events %s",
|
||||
room_id, event_id, [e["event"].event_id for e in event_infos]
|
||||
)
|
||||
yield self._handle_new_events(origin, event_infos)
|
||||
|
||||
try:
|
||||
context = yield self._handle_new_event(
|
||||
origin,
|
||||
event,
|
||||
state=state,
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event.event_id,
|
||||
)
|
||||
try:
|
||||
context = yield self._handle_new_event(
|
||||
origin,
|
||||
event,
|
||||
state=state,
|
||||
)
|
||||
except AuthError as e:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
e.code,
|
||||
e.msg,
|
||||
affected=event.event_id,
|
||||
)
|
||||
|
||||
room = yield self.store.get_room(room_id)
|
||||
|
||||
@@ -726,7 +691,7 @@ class FederationHandler(BaseHandler):
|
||||
edges = [
|
||||
ev.event_id
|
||||
for ev in events
|
||||
if set(e_id for e_id, _ in ev.prev_events) - event_ids
|
||||
if set(ev.prev_event_ids()) - event_ids
|
||||
]
|
||||
|
||||
logger.info(
|
||||
@@ -753,7 +718,7 @@ class FederationHandler(BaseHandler):
|
||||
required_auth = set(
|
||||
a_id
|
||||
for event in events + list(state_events.values()) + list(auth_events.values())
|
||||
for a_id, _ in event.auth_events
|
||||
for a_id in event.auth_event_ids()
|
||||
)
|
||||
auth_events.update({
|
||||
e_id: event_map[e_id] for e_id in required_auth if e_id in event_map
|
||||
@@ -769,7 +734,7 @@ class FederationHandler(BaseHandler):
|
||||
auth_events.update(ret_events)
|
||||
|
||||
required_auth.update(
|
||||
a_id for event in ret_events.values() for a_id, _ in event.auth_events
|
||||
a_id for event in ret_events.values() for a_id in event.auth_event_ids()
|
||||
)
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
|
||||
@@ -796,7 +761,7 @@ class FederationHandler(BaseHandler):
|
||||
required_auth.update(
|
||||
a_id
|
||||
for event in results if event
|
||||
for a_id, _ in event.auth_events
|
||||
for a_id in event.auth_event_ids()
|
||||
)
|
||||
missing_auth = required_auth - set(auth_events)
|
||||
|
||||
@@ -816,7 +781,7 @@ class FederationHandler(BaseHandler):
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id, _ in a.auth_events
|
||||
for a_id in a.auth_event_ids()
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -828,7 +793,7 @@ class FederationHandler(BaseHandler):
|
||||
"auth_events": {
|
||||
(auth_events[a_id].type, auth_events[a_id].state_key):
|
||||
auth_events[a_id]
|
||||
for a_id, _ in event_map[e_id].auth_events
|
||||
for a_id in event_map[e_id].auth_event_ids()
|
||||
if a_id in auth_events
|
||||
}
|
||||
})
|
||||
@@ -1041,17 +1006,17 @@ class FederationHandler(BaseHandler):
|
||||
Raises:
|
||||
SynapseError if the event does not pass muster
|
||||
"""
|
||||
if len(ev.prev_events) > 20:
|
||||
if len(ev.prev_event_ids()) > 20:
|
||||
logger.warn("Rejecting event %s which has %i prev_events",
|
||||
ev.event_id, len(ev.prev_events))
|
||||
ev.event_id, len(ev.prev_event_ids()))
|
||||
raise SynapseError(
|
||||
http_client.BAD_REQUEST,
|
||||
"Too many prev_events",
|
||||
)
|
||||
|
||||
if len(ev.auth_events) > 10:
|
||||
if len(ev.auth_event_ids()) > 10:
|
||||
logger.warn("Rejecting event %s which has %i auth_events",
|
||||
ev.event_id, len(ev.auth_events))
|
||||
ev.event_id, len(ev.auth_event_ids()))
|
||||
raise SynapseError(
|
||||
http_client.BAD_REQUEST,
|
||||
"Too many auth_events",
|
||||
@@ -1076,7 +1041,7 @@ class FederationHandler(BaseHandler):
|
||||
def on_event_auth(self, event_id):
|
||||
event = yield self.store.get_event(event_id)
|
||||
auth = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
[auth_id for auth_id in event.auth_event_ids()],
|
||||
include_given=True
|
||||
)
|
||||
defer.returnValue([e for e in auth])
|
||||
@@ -1337,8 +1302,37 @@ class FederationHandler(BaseHandler):
|
||||
context = yield self.state_handler.compute_event_context(event)
|
||||
yield self.persist_events_and_notify([(event, context)])
|
||||
|
||||
sender = UserID.from_string(event.sender)
|
||||
target = UserID.from_string(event.state_key)
|
||||
if (sender.localpart == target.localpart):
|
||||
run_as_background_process(
|
||||
"_auto_accept_invite",
|
||||
self._auto_accept_invite,
|
||||
sender, target, event.room_id,
|
||||
)
|
||||
|
||||
defer.returnValue(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _auto_accept_invite(self, sender, target, room_id):
|
||||
joined = False
|
||||
for attempt in range(0, 10):
|
||||
try:
|
||||
yield self.hs.get_room_member_handler().update_membership(
|
||||
requester=create_requester(target.to_string()),
|
||||
target=target,
|
||||
room_id=room_id,
|
||||
action="join",
|
||||
)
|
||||
joined = True
|
||||
break
|
||||
except Exception:
|
||||
# We're going to retry, but we should log the error
|
||||
logger.exception("Error auto-accepting invite on attempt %d" % attempt)
|
||||
yield self.clock.sleep(1)
|
||||
if not joined:
|
||||
logger.error("Giving up on trying to auto-accept invite: too many attempts")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def do_remotely_reject_invite(self, target_hosts, room_id, user_id):
|
||||
origin, event = yield self._make_and_verify_event(
|
||||
@@ -1698,7 +1692,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
missing_auth_events = set()
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
for e_id, _ in e.auth_events:
|
||||
for e_id in e.auth_event_ids():
|
||||
if e_id not in event_map:
|
||||
missing_auth_events.add(e_id)
|
||||
|
||||
@@ -1717,7 +1711,7 @@ class FederationHandler(BaseHandler):
|
||||
for e in itertools.chain(auth_events, state, [event]):
|
||||
auth_for_e = {
|
||||
(event_map[e_id].type, event_map[e_id].state_key): event_map[e_id]
|
||||
for e_id, _ in e.auth_events
|
||||
for e_id in e.auth_event_ids()
|
||||
if e_id in event_map
|
||||
}
|
||||
if create_event:
|
||||
@@ -1785,10 +1779,10 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# This is a hack to fix some old rooms where the initial join event
|
||||
# didn't reference the create event in its auth events.
|
||||
if event.type == EventTypes.Member and not event.auth_events:
|
||||
if len(event.prev_events) == 1 and event.depth < 5:
|
||||
if event.type == EventTypes.Member and not event.auth_event_ids():
|
||||
if len(event.prev_event_ids()) == 1 and event.depth < 5:
|
||||
c = yield self.store.get_event(
|
||||
event.prev_events[0][0],
|
||||
event.prev_event_ids()[0],
|
||||
allow_none=True,
|
||||
)
|
||||
if c and c.type == EventTypes.Create:
|
||||
@@ -1835,7 +1829,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# Now get the current auth_chain for the event.
|
||||
local_auth_chain = yield self.store.get_auth_chain(
|
||||
[auth_id for auth_id, _ in event.auth_events],
|
||||
[auth_id for auth_id in event.auth_event_ids()],
|
||||
include_given=True
|
||||
)
|
||||
|
||||
@@ -1852,7 +1846,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||
latest_events, limit, min_depth):
|
||||
latest_events, limit):
|
||||
in_room = yield self.auth.check_host_in_room(
|
||||
room_id,
|
||||
origin
|
||||
@@ -1861,14 +1855,12 @@ class FederationHandler(BaseHandler):
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
limit = min(limit, 20)
|
||||
min_depth = max(min_depth, 0)
|
||||
|
||||
missing_events = yield self.store.get_missing_events(
|
||||
room_id=room_id,
|
||||
earliest_events=earliest_events,
|
||||
latest_events=latest_events,
|
||||
limit=limit,
|
||||
min_depth=min_depth,
|
||||
)
|
||||
|
||||
missing_events = yield filter_events_for_server(
|
||||
@@ -1893,7 +1885,7 @@ class FederationHandler(BaseHandler):
|
||||
"""
|
||||
# Check if we have all the auth events.
|
||||
current_state = set(e.event_id for e in auth_events.values())
|
||||
event_auth_events = set(e_id for e_id, _ in event.auth_events)
|
||||
event_auth_events = set(event.auth_event_ids())
|
||||
|
||||
if event.is_state():
|
||||
event_key = (event.type, event.state_key)
|
||||
@@ -1937,7 +1929,7 @@ class FederationHandler(BaseHandler):
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = [e_id for e_id, _ in e.auth_events]
|
||||
auth_ids = e.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e for e in remote_auth_chain
|
||||
if e.event_id in auth_ids or e.type == EventTypes.Create
|
||||
@@ -1958,7 +1950,7 @@ class FederationHandler(BaseHandler):
|
||||
pass
|
||||
|
||||
have_events = yield self.store.get_seen_events_with_rejections(
|
||||
[e_id for e_id, _ in event.auth_events]
|
||||
event.auth_event_ids()
|
||||
)
|
||||
seen_events = set(have_events.keys())
|
||||
except Exception:
|
||||
@@ -2060,7 +2052,7 @@ class FederationHandler(BaseHandler):
|
||||
continue
|
||||
|
||||
try:
|
||||
auth_ids = [e_id for e_id, _ in ev.auth_events]
|
||||
auth_ids = ev.auth_event_ids()
|
||||
auth = {
|
||||
(e.type, e.state_key): e
|
||||
for e in result["auth_chain"]
|
||||
@@ -2252,7 +2244,7 @@ class FederationHandler(BaseHandler):
|
||||
missing_remote_ids = [e.event_id for e in missing_remotes]
|
||||
base_remote_rejected = list(missing_remotes)
|
||||
for e in missing_remotes:
|
||||
for e_id, _ in e.auth_events:
|
||||
for e_id in e.auth_event_ids():
|
||||
if e_id in missing_remote_ids:
|
||||
try:
|
||||
base_remote_rejected.remove(e)
|
||||
@@ -2522,7 +2514,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if not backfilled: # Never notify for backfilled events
|
||||
for event, _ in event_and_contexts:
|
||||
self._notify_persisted_event(event, max_stream_id)
|
||||
yield self._notify_persisted_event(event, max_stream_id)
|
||||
|
||||
def _notify_persisted_event(self, event, max_stream_id):
|
||||
"""Checks to see if notifier/pushers should be notified about the
|
||||
@@ -2555,7 +2547,7 @@ class FederationHandler(BaseHandler):
|
||||
extra_users=extra_users
|
||||
)
|
||||
|
||||
self.pusher_pool.on_new_notifications(
|
||||
return self.pusher_pool.on_new_notifications(
|
||||
event_stream_id, max_stream_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from six import iteritems
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.errors import HttpResponseException, SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -37,9 +37,23 @@ def _create_rerouter(func_name):
|
||||
)
|
||||
else:
|
||||
destination = get_domain_from_id(group_id)
|
||||
return getattr(self.transport_client, func_name)(
|
||||
d = getattr(self.transport_client, func_name)(
|
||||
destination, group_id, *args, **kwargs
|
||||
)
|
||||
|
||||
# Capture errors returned by the remote homeserver and
|
||||
# re-throw specific errors as SynapseErrors. This is so
|
||||
# when the remote end responds with things like 403 Not
|
||||
# In Group, we can communicate that to the client instead
|
||||
# of a 500.
|
||||
def h(failure):
|
||||
failure.trap(HttpResponseException)
|
||||
e = failure.value
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
return failure
|
||||
d.addErrback(h)
|
||||
return d
|
||||
return f
|
||||
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
room_end_token = "s%d" % (event.stream_ordering,)
|
||||
deferred_room_state = run_in_background(
|
||||
self.store.get_state_for_events,
|
||||
[event.event_id], None,
|
||||
[event.event_id],
|
||||
)
|
||||
deferred_room_state.addCallback(
|
||||
lambda states: states[event.event_id]
|
||||
@@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
def _room_initial_sync_parted(self, user_id, room_id, pagin_config,
|
||||
membership, member_event_id, is_peeking):
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[member_event_id], None
|
||||
[member_event_id],
|
||||
)
|
||||
|
||||
room_state = room_state[member_event_id]
|
||||
|
||||
@@ -35,6 +35,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomAlias, UserID
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.frozenutils import frozendict_json_encoder
|
||||
@@ -80,7 +81,7 @@ class MessageHandler(object):
|
||||
elif membership == Membership.LEAVE:
|
||||
key = (event_type, state_key)
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[membership_event_id], [key]
|
||||
[membership_event_id], StateFilter.from_types([key])
|
||||
)
|
||||
data = room_state[membership_event_id].get(key)
|
||||
|
||||
@@ -88,7 +89,7 @@ class MessageHandler(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_events(
|
||||
self, user_id, room_id, types=None, filtered_types=None,
|
||||
self, user_id, room_id, state_filter=StateFilter.all(),
|
||||
at_token=None, is_guest=False,
|
||||
):
|
||||
"""Retrieve all state events for a given room. If the user is
|
||||
@@ -100,13 +101,8 @@ class MessageHandler(object):
|
||||
Args:
|
||||
user_id(str): The user requesting state events.
|
||||
room_id(str): The room ID to get all state events from.
|
||||
types(list[(str, str|None)]|None): List of (type, state_key) tuples
|
||||
which are used to filter the state fetched. If `state_key` is None,
|
||||
all events are returned of the given type.
|
||||
May be None, which matches any key.
|
||||
filtered_types(list[str]|None): Only apply filtering via `types` to this
|
||||
list of event types. Other types of events are returned unfiltered.
|
||||
If None, `types` filtering is applied to all events.
|
||||
state_filter (StateFilter): The state filter used to fetch state
|
||||
from the database.
|
||||
at_token(StreamToken|None): the stream token of the at which we are requesting
|
||||
the stats. If the user is not allowed to view the state as of that
|
||||
stream token, we raise a 403 SynapseError. If None, returns the current
|
||||
@@ -139,7 +135,7 @@ class MessageHandler(object):
|
||||
event = last_events[0]
|
||||
if visible_events:
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[event.event_id], types, filtered_types=filtered_types,
|
||||
[event.event_id], state_filter=state_filter,
|
||||
)
|
||||
room_state = room_state[event.event_id]
|
||||
else:
|
||||
@@ -158,12 +154,12 @@ class MessageHandler(object):
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
room_id, types, filtered_types=filtered_types,
|
||||
room_id, state_filter=state_filter,
|
||||
)
|
||||
room_state = yield self.store.get_events(state_ids.values())
|
||||
elif membership == Membership.LEAVE:
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
[membership_event_id], types, filtered_types=filtered_types,
|
||||
[membership_event_id], state_filter=state_filter,
|
||||
)
|
||||
room_state = room_state[membership_event_id]
|
||||
|
||||
@@ -431,6 +427,9 @@ class EventCreationHandler(object):
|
||||
|
||||
if event.is_state():
|
||||
prev_state = yield self.deduplicate_state_event(event, context)
|
||||
logger.info(
|
||||
"Not bothering to persist duplicate state event %s", event.event_id,
|
||||
)
|
||||
if prev_state is not None:
|
||||
defer.returnValue(prev_state)
|
||||
|
||||
@@ -779,7 +778,7 @@ class EventCreationHandler(object):
|
||||
event, context=context
|
||||
)
|
||||
|
||||
self.pusher_pool.on_new_notifications(
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
event_stream_id, max_stream_id,
|
||||
)
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ from twisted.python.failure import Failure
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomStreamToken
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
from synapse.util.logcontext import run_in_background
|
||||
@@ -255,16 +256,14 @@ class PaginationHandler(object):
|
||||
if event_filter and event_filter.lazy_load_members():
|
||||
# TODO: remove redundant members
|
||||
|
||||
types = [
|
||||
(EventTypes.Member, state_key)
|
||||
for state_key in set(
|
||||
event.sender # FIXME: we also care about invite targets etc.
|
||||
for event in events
|
||||
)
|
||||
]
|
||||
# FIXME: we also care about invite targets etc.
|
||||
state_filter = StateFilter.from_types(
|
||||
(EventTypes.Member, event.sender)
|
||||
for event in events
|
||||
)
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
events[0].event_id, types=types,
|
||||
events[0].event_id, state_filter=state_filter,
|
||||
)
|
||||
|
||||
if state_ids:
|
||||
|
||||
@@ -142,10 +142,8 @@ class BaseProfileHandler(BaseHandler):
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get displayname")
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception("Failed to get displayname")
|
||||
else:
|
||||
defer.returnValue(result["displayname"])
|
||||
|
||||
defer.returnValue(result["displayname"])
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_displayname(self, target_user, requester, new_displayname, by_admin=False):
|
||||
@@ -199,8 +197,6 @@ class BaseProfileHandler(BaseHandler):
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get avatar_url")
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception("Failed to get avatar_url")
|
||||
|
||||
defer.returnValue(result["avatar_url"])
|
||||
|
||||
|
||||
@@ -119,7 +119,7 @@ class ReceiptsHandler(BaseHandler):
|
||||
"receipt_key", max_batch_id, rooms=affected_room_ids
|
||||
)
|
||||
# Note that the min here shouldn't be relied upon to be accurate.
|
||||
self.hs.get_pusherpool().on_new_receipts(
|
||||
yield self.hs.get_pusherpool().on_new_receipts(
|
||||
min_batch_id, max_batch_id, affected_room_ids,
|
||||
)
|
||||
|
||||
|
||||
@@ -220,15 +220,42 @@ class RegistrationHandler(BaseHandler):
|
||||
|
||||
# auto-join the user to any rooms we're supposed to dump them into
|
||||
fake_requester = create_requester(user_id)
|
||||
|
||||
# try to create the room if we're the first user on the server
|
||||
should_auto_create_rooms = False
|
||||
if self.hs.config.autocreate_auto_join_rooms:
|
||||
count = yield self.store.count_all_users()
|
||||
should_auto_create_rooms = count == 1
|
||||
|
||||
for r in self.hs.config.auto_join_rooms:
|
||||
try:
|
||||
yield self._join_user_to_room(fake_requester, r)
|
||||
if should_auto_create_rooms:
|
||||
room_alias = RoomAlias.from_string(r)
|
||||
if self.hs.hostname != room_alias.domain:
|
||||
logger.warning(
|
||||
'Cannot create room alias %s, '
|
||||
'it does not match server domain',
|
||||
r,
|
||||
)
|
||||
else:
|
||||
# create room expects the localpart of the room alias
|
||||
room_alias_localpart = room_alias.localpart
|
||||
|
||||
# getting the RoomCreationHandler during init gives a dependency
|
||||
# loop
|
||||
yield self.hs.get_room_creation_handler().create_room(
|
||||
fake_requester,
|
||||
config={
|
||||
"preset": "public_chat",
|
||||
"room_alias_name": room_alias_localpart
|
||||
},
|
||||
ratelimit=False,
|
||||
)
|
||||
else:
|
||||
yield self._join_user_to_room(fake_requester, r)
|
||||
except Exception as e:
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
|
||||
# We used to generate default identicons here, but nowadays
|
||||
# we want clients to generate their own as part of their branding
|
||||
# rather than there being consistent matrix-wide ones, so we don't.
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
||||
@@ -21,7 +21,7 @@ import math
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
|
||||
from six import string_types
|
||||
from six import iteritems, string_types
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
@@ -32,9 +32,11 @@ from synapse.api.constants import (
|
||||
JoinRules,
|
||||
RoomCreationPreset,
|
||||
)
|
||||
from synapse.api.errors import AuthError, Codes, StoreError, SynapseError
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -72,6 +74,334 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
|
||||
# linearizer to stop two upgrades happening at once
|
||||
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upgrade_room(self, requester, old_room_id, new_version):
|
||||
"""Replace a room with a new room with a different version
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_version (unicode): the new room version to use
|
||||
|
||||
Returns:
|
||||
Deferred[unicode]: the new room id
|
||||
"""
|
||||
yield self.ratelimit(requester)
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
with (yield self._upgrade_linearizer.queue(old_room_id)):
|
||||
# start by allocating a new room id
|
||||
r = yield self.store.get_room(old_room_id)
|
||||
if r is None:
|
||||
raise NotFoundError("Unknown room id %s" % (old_room_id,))
|
||||
new_room_id = yield self._generate_room_id(
|
||||
creator_id=user_id, is_public=r["is_public"],
|
||||
)
|
||||
|
||||
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
|
||||
|
||||
# we create and auth the tombstone event before properly creating the new
|
||||
# room, to check our user has perms in the old room.
|
||||
tombstone_event, tombstone_context = (
|
||||
yield self.event_creation_handler.create_event(
|
||||
requester, {
|
||||
"type": EventTypes.Tombstone,
|
||||
"state_key": "",
|
||||
"room_id": old_room_id,
|
||||
"sender": user_id,
|
||||
"content": {
|
||||
"body": "This room has been replaced",
|
||||
"replacement_room": new_room_id,
|
||||
}
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
)
|
||||
yield self.auth.check_from_context(tombstone_event, tombstone_context)
|
||||
|
||||
yield self.clone_exiting_room(
|
||||
requester,
|
||||
old_room_id=old_room_id,
|
||||
new_room_id=new_room_id,
|
||||
new_room_version=new_version,
|
||||
tombstone_event_id=tombstone_event.event_id,
|
||||
)
|
||||
|
||||
# now send the tombstone
|
||||
yield self.event_creation_handler.send_nonmember_event(
|
||||
requester, tombstone_event, tombstone_context,
|
||||
)
|
||||
|
||||
old_room_state = yield tombstone_context.get_current_state_ids(self.store)
|
||||
|
||||
# update any aliases
|
||||
yield self._move_aliases_to_new_room(
|
||||
requester, old_room_id, new_room_id, old_room_state,
|
||||
)
|
||||
|
||||
# and finally, shut down the PLs in the old room, and update them in the new
|
||||
# room.
|
||||
yield self._update_upgraded_room_pls(
|
||||
requester, old_room_id, new_room_id, old_room_state,
|
||||
)
|
||||
|
||||
defer.returnValue(new_room_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_upgraded_room_pls(
|
||||
self, requester, old_room_id, new_room_id, old_room_state,
|
||||
):
|
||||
"""Send updated power levels in both rooms after an upgrade
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_room_id (unicode): the id of the replacement room
|
||||
old_room_state (dict[tuple[str, str], str]): the state map for the old room
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
|
||||
|
||||
if old_room_pl_event_id is None:
|
||||
logger.warning(
|
||||
"Not supported: upgrading a room with no PL event. Not setting PLs "
|
||||
"in old room.",
|
||||
)
|
||||
return
|
||||
|
||||
old_room_pl_state = yield self.store.get_event(old_room_pl_event_id)
|
||||
|
||||
# we try to stop regular users from speaking by setting the PL required
|
||||
# to send regular events and invites to 'Moderator' level. That's normally
|
||||
# 50, but if the default PL in a room is 50 or more, then we set the
|
||||
# required PL above that.
|
||||
|
||||
pl_content = dict(old_room_pl_state.content)
|
||||
users_default = int(pl_content.get("users_default", 0))
|
||||
restricted_level = max(users_default + 1, 50)
|
||||
|
||||
updated = False
|
||||
for v in ("invite", "events_default"):
|
||||
current = int(pl_content.get(v, 0))
|
||||
if current < restricted_level:
|
||||
logger.info(
|
||||
"Setting level for %s in %s to %i (was %i)",
|
||||
v, old_room_id, restricted_level, current,
|
||||
)
|
||||
pl_content[v] = restricted_level
|
||||
updated = True
|
||||
else:
|
||||
logger.info(
|
||||
"Not setting level for %s (already %i)",
|
||||
v, current,
|
||||
)
|
||||
|
||||
if updated:
|
||||
try:
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, {
|
||||
"type": EventTypes.PowerLevels,
|
||||
"state_key": '',
|
||||
"room_id": old_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": pl_content,
|
||||
}, ratelimit=False,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warning("Unable to update PLs in old room: %s", e)
|
||||
|
||||
logger.info("Setting correct PLs in new room")
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester, {
|
||||
"type": EventTypes.PowerLevels,
|
||||
"state_key": '',
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": old_room_pl_state.content,
|
||||
}, ratelimit=False,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def clone_exiting_room(
|
||||
self, requester, old_room_id, new_room_id, new_room_version,
|
||||
tombstone_event_id,
|
||||
):
|
||||
"""Populate a new room based on an old room
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester): the user requesting the upgrade
|
||||
old_room_id (unicode): the id of the room to be replaced
|
||||
new_room_id (unicode): the id to give the new room (should already have been
|
||||
created with _gemerate_room_id())
|
||||
new_room_version (unicode): the new room version to use
|
||||
tombstone_event_id (unicode|str): the ID of the tombstone event in the old
|
||||
room.
|
||||
Returns:
|
||||
Deferred[None]
|
||||
"""
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
creation_content = {
|
||||
"room_version": new_room_version,
|
||||
"predecessor": {
|
||||
"room_id": old_room_id,
|
||||
"event_id": tombstone_event_id,
|
||||
}
|
||||
}
|
||||
|
||||
initial_state = dict()
|
||||
|
||||
types_to_copy = (
|
||||
(EventTypes.JoinRules, ""),
|
||||
(EventTypes.Name, ""),
|
||||
(EventTypes.Topic, ""),
|
||||
(EventTypes.RoomHistoryVisibility, ""),
|
||||
(EventTypes.GuestAccess, ""),
|
||||
(EventTypes.RoomAvatar, ""),
|
||||
)
|
||||
|
||||
old_room_state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
old_room_id, StateFilter.from_types(types_to_copy),
|
||||
)
|
||||
# map from event_id to BaseEvent
|
||||
old_room_state_events = yield self.store.get_events(old_room_state_ids.values())
|
||||
|
||||
for k, old_event_id in iteritems(old_room_state_ids):
|
||||
old_event = old_room_state_events.get(old_event_id)
|
||||
if old_event:
|
||||
initial_state[k] = old_event.content
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
new_room_id,
|
||||
|
||||
# we expect to override all the presets with initial_state, so this is
|
||||
# somewhat arbitrary.
|
||||
preset_config=RoomCreationPreset.PRIVATE_CHAT,
|
||||
|
||||
invite_list=[],
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
)
|
||||
|
||||
# XXX invites/joins
|
||||
# XXX 3pid invites
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _move_aliases_to_new_room(
|
||||
self, requester, old_room_id, new_room_id, old_room_state,
|
||||
):
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
|
||||
aliases = yield self.store.get_aliases_for_room(old_room_id)
|
||||
|
||||
# check to see if we have a canonical alias.
|
||||
canonical_alias = None
|
||||
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
|
||||
if canonical_alias_event_id:
|
||||
canonical_alias_event = yield self.store.get_event(canonical_alias_event_id)
|
||||
if canonical_alias_event:
|
||||
canonical_alias = canonical_alias_event.content.get("alias", "")
|
||||
|
||||
# first we try to remove the aliases from the old room (we suppress sending
|
||||
# the room_aliases event until the end).
|
||||
#
|
||||
# Note that we'll only be able to remove aliases that (a) aren't owned by an AS,
|
||||
# and (b) unless the user is a server admin, which the user created.
|
||||
#
|
||||
# This is probably correct - given we don't allow such aliases to be deleted
|
||||
# normally, it would be odd to allow it in the case of doing a room upgrade -
|
||||
# but it makes the upgrade less effective, and you have to wonder why a room
|
||||
# admin can't remove aliases that point to that room anyway.
|
||||
# (cf https://github.com/matrix-org/synapse/issues/2360)
|
||||
#
|
||||
removed_aliases = []
|
||||
for alias_str in aliases:
|
||||
alias = RoomAlias.from_string(alias_str)
|
||||
try:
|
||||
yield directory_handler.delete_association(
|
||||
requester, alias, send_event=False,
|
||||
)
|
||||
removed_aliases.append(alias_str)
|
||||
except SynapseError as e:
|
||||
logger.warning(
|
||||
"Unable to remove alias %s from old room: %s",
|
||||
alias, e,
|
||||
)
|
||||
|
||||
# if we didn't find any aliases, or couldn't remove anyway, we can skip the rest
|
||||
# of this.
|
||||
if not removed_aliases:
|
||||
return
|
||||
|
||||
try:
|
||||
# this can fail if, for some reason, our user doesn't have perms to send
|
||||
# m.room.aliases events in the old room (note that we've already checked that
|
||||
# they have perms to send a tombstone event, so that's not terribly likely).
|
||||
#
|
||||
# If that happens, it's regrettable, but we should carry on: it's the same
|
||||
# as when you remove an alias from the directory normally - it just means that
|
||||
# the aliases event gets out of sync with the directory
|
||||
# (cf https://github.com/vector-im/riot-web/issues/2369)
|
||||
yield directory_handler.send_room_alias_update_event(
|
||||
requester, old_room_id,
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warning(
|
||||
"Failed to send updated alias event on old room: %s", e,
|
||||
)
|
||||
|
||||
# we can now add any aliases we successfully removed to the new room.
|
||||
for alias in removed_aliases:
|
||||
try:
|
||||
yield directory_handler.create_association(
|
||||
requester, RoomAlias.from_string(alias),
|
||||
new_room_id, servers=(self.hs.hostname, ),
|
||||
send_event=False,
|
||||
)
|
||||
logger.info("Moved alias %s to new room", alias)
|
||||
except SynapseError as e:
|
||||
# I'm not really expecting this to happen, but it could if the spam
|
||||
# checking module decides it shouldn't, or similar.
|
||||
logger.error(
|
||||
"Error adding alias %s to new room: %s",
|
||||
alias, e,
|
||||
)
|
||||
|
||||
try:
|
||||
if canonical_alias and (canonical_alias in removed_aliases):
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.CanonicalAlias,
|
||||
"state_key": "",
|
||||
"room_id": new_room_id,
|
||||
"sender": requester.user.to_string(),
|
||||
"content": {"alias": canonical_alias, },
|
||||
},
|
||||
ratelimit=False
|
||||
)
|
||||
|
||||
yield directory_handler.send_room_alias_update_event(
|
||||
requester, new_room_id,
|
||||
)
|
||||
except SynapseError as e:
|
||||
# again I'm not really expecting this to fail, but if it does, I'd rather
|
||||
# we returned the new room to the client at this point.
|
||||
logger.error(
|
||||
"Unable to send updated alias events in new room: %s", e,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_room(self, requester, config, ratelimit=True,
|
||||
@@ -164,36 +494,16 @@ class RoomCreationHandler(BaseHandler):
|
||||
visibility = config.get("visibility", None)
|
||||
is_public = visibility == "public"
|
||||
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
# try a few times till one goes through, giving up eventually.
|
||||
attempts = 0
|
||||
room_id = None
|
||||
while attempts < 5:
|
||||
try:
|
||||
random_string = stringutils.random_string(18)
|
||||
gen_room_id = RoomID(
|
||||
random_string,
|
||||
self.hs.hostname,
|
||||
)
|
||||
yield self.store.store_room(
|
||||
room_id=gen_room_id.to_string(),
|
||||
room_creator_user_id=user_id,
|
||||
is_public=is_public
|
||||
)
|
||||
room_id = gen_room_id.to_string()
|
||||
break
|
||||
except StoreError:
|
||||
attempts += 1
|
||||
if not room_id:
|
||||
raise StoreError(500, "Couldn't generate a room ID.")
|
||||
room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public)
|
||||
|
||||
if room_alias:
|
||||
directory_handler = self.hs.get_handlers().directory_handler
|
||||
yield directory_handler.create_association(
|
||||
user_id=user_id,
|
||||
requester=requester,
|
||||
room_id=room_id,
|
||||
room_alias=room_alias,
|
||||
servers=[self.hs.hostname],
|
||||
send_event=False,
|
||||
)
|
||||
|
||||
preset_config = config.get(
|
||||
@@ -214,18 +524,15 @@ class RoomCreationHandler(BaseHandler):
|
||||
# override any attempt to set room versions via the creation_content
|
||||
creation_content["room_version"] = room_version
|
||||
|
||||
room_member_handler = self.hs.get_room_member_handler()
|
||||
|
||||
yield self._send_events_for_new_room(
|
||||
requester,
|
||||
room_id,
|
||||
room_member_handler,
|
||||
preset_config=preset_config,
|
||||
invite_list=invite_list,
|
||||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
room_alias=room_alias,
|
||||
power_level_content_override=config.get("power_level_content_override", {}),
|
||||
power_level_content_override=config.get("power_level_content_override"),
|
||||
creator_join_profile=creator_join_profile,
|
||||
)
|
||||
|
||||
@@ -261,7 +568,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
if is_direct:
|
||||
content["is_direct"] = is_direct
|
||||
|
||||
yield room_member_handler.update_membership(
|
||||
yield self.room_member_handler.update_membership(
|
||||
requester,
|
||||
UserID.from_string(invitee),
|
||||
room_id,
|
||||
@@ -289,7 +596,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
if room_alias:
|
||||
result["room_alias"] = room_alias.to_string()
|
||||
yield directory_handler.send_room_alias_update_event(
|
||||
requester, user_id, room_id
|
||||
requester, room_id
|
||||
)
|
||||
|
||||
defer.returnValue(result)
|
||||
@@ -299,14 +606,13 @@ class RoomCreationHandler(BaseHandler):
|
||||
self,
|
||||
creator, # A Requester object.
|
||||
room_id,
|
||||
room_member_handler,
|
||||
preset_config,
|
||||
invite_list,
|
||||
initial_state,
|
||||
creation_content,
|
||||
room_alias,
|
||||
power_level_content_override,
|
||||
creator_join_profile,
|
||||
room_alias=None,
|
||||
power_level_content_override=None,
|
||||
creator_join_profile=None,
|
||||
):
|
||||
def create(etype, content, **kwargs):
|
||||
e = {
|
||||
@@ -322,6 +628,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def send(etype, content, **kwargs):
|
||||
event = create(etype, content, **kwargs)
|
||||
logger.info("Sending %s in new room", etype)
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
creator,
|
||||
event,
|
||||
@@ -344,7 +651,8 @@ class RoomCreationHandler(BaseHandler):
|
||||
content=creation_content,
|
||||
)
|
||||
|
||||
yield room_member_handler.update_membership(
|
||||
logger.info("Sending %s in new room", EventTypes.Member)
|
||||
yield self.room_member_handler.update_membership(
|
||||
creator,
|
||||
creator.user,
|
||||
room_id,
|
||||
@@ -386,7 +694,8 @@ class RoomCreationHandler(BaseHandler):
|
||||
for invitee in invite_list:
|
||||
power_level_content["users"][invitee] = 100
|
||||
|
||||
power_level_content.update(power_level_content_override)
|
||||
if power_level_content_override:
|
||||
power_level_content.update(power_level_content_override)
|
||||
|
||||
yield send(
|
||||
etype=EventTypes.PowerLevels,
|
||||
@@ -425,6 +734,30 @@ class RoomCreationHandler(BaseHandler):
|
||||
content=content,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _generate_room_id(self, creator_id, is_public):
|
||||
# autogen room IDs and try to create it. We may clash, so just
|
||||
# try a few times till one goes through, giving up eventually.
|
||||
attempts = 0
|
||||
while attempts < 5:
|
||||
try:
|
||||
random_string = stringutils.random_string(18)
|
||||
gen_room_id = RoomID(
|
||||
random_string,
|
||||
self.hs.hostname,
|
||||
).to_string()
|
||||
if isinstance(gen_room_id, bytes):
|
||||
gen_room_id = gen_room_id.decode('utf-8')
|
||||
yield self.store.store_room(
|
||||
room_id=gen_room_id,
|
||||
room_creator_user_id=creator_id,
|
||||
is_public=is_public,
|
||||
)
|
||||
defer.returnValue(gen_room_id)
|
||||
except StoreError:
|
||||
attempts += 1
|
||||
raise StoreError(500, "Couldn't generate a room ID.")
|
||||
|
||||
|
||||
class RoomContextHandler(object):
|
||||
def __init__(self, hs):
|
||||
@@ -488,23 +821,24 @@ class RoomContextHandler(object):
|
||||
else:
|
||||
last_event_id = event_id
|
||||
|
||||
types = None
|
||||
filtered_types = None
|
||||
if event_filter and event_filter.lazy_load_members():
|
||||
members = set(ev.sender for ev in itertools.chain(
|
||||
results["events_before"],
|
||||
(results["event"],),
|
||||
results["events_after"],
|
||||
))
|
||||
filtered_types = [EventTypes.Member]
|
||||
types = [(EventTypes.Member, member) for member in members]
|
||||
state_filter = StateFilter.from_lazy_load_member_list(
|
||||
ev.sender
|
||||
for ev in itertools.chain(
|
||||
results["events_before"],
|
||||
(results["event"],),
|
||||
results["events_after"],
|
||||
)
|
||||
)
|
||||
else:
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
# XXX: why do we return the state as of the last event rather than the
|
||||
# first? Shouldn't we be consistent with /sync?
|
||||
# https://github.com/matrix-org/matrix-doc/issues/687
|
||||
|
||||
state = yield self.store.get_state_for_events(
|
||||
[last_event_id], types, filtered_types=filtered_types,
|
||||
[last_event_id], state_filter=state_filter,
|
||||
)
|
||||
results["state"] = list(state[last_event_id].values())
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
from six import iteritems
|
||||
from six import PY3, iteritems
|
||||
from six.moves import range
|
||||
|
||||
import msgpack
|
||||
@@ -444,9 +444,16 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", (
|
||||
|
||||
@classmethod
|
||||
def from_token(cls, token):
|
||||
if PY3:
|
||||
# The argument raw=False is only available on new versions of
|
||||
# msgpack, and only really needed on Python 3. Gate it behind
|
||||
# a PY3 check to avoid causing issues on Debian-packaged versions.
|
||||
decoded = msgpack.loads(decode_base64(token), raw=False)
|
||||
else:
|
||||
decoded = msgpack.loads(decode_base64(token))
|
||||
return RoomListNextBatch(**{
|
||||
cls.REVERSE_KEY_DICT[key]: val
|
||||
for key, val in msgpack.loads(decode_base64(token)).items()
|
||||
for key, val in decoded.items()
|
||||
})
|
||||
|
||||
def to_token(self):
|
||||
|
||||
@@ -28,8 +28,9 @@ from twisted.internet import defer
|
||||
import synapse.server
|
||||
import synapse.types
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.types import RoomID, UserID
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||
from synapse.types import RoomAlias, RoomID, UserID
|
||||
from synapse.util import logcontext
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.distributor import user_joined_room, user_left_room
|
||||
|
||||
@@ -416,6 +417,10 @@ class RoomMemberHandler(object):
|
||||
ret = yield self._remote_join(
|
||||
requester, remote_room_hosts, room_id, target, content
|
||||
)
|
||||
logcontext.run_in_background(
|
||||
self._send_merged_user_invites,
|
||||
requester, room_id,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
elif effective_membership_state == Membership.LEAVE:
|
||||
@@ -450,8 +455,58 @@ class RoomMemberHandler(object):
|
||||
prev_events_and_hashes=prev_events_and_hashes,
|
||||
content=content,
|
||||
)
|
||||
if effective_membership_state == Membership.JOIN:
|
||||
logcontext.run_in_background(
|
||||
self._send_merged_user_invites,
|
||||
requester, room_id,
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_merged_user_invites(self, requester, room_id):
|
||||
try:
|
||||
profile_alias = "#_profile_%s:%s" % (
|
||||
requester.user.localpart, self.hs.hostname,
|
||||
)
|
||||
profile_alias = RoomAlias.from_string(profile_alias)
|
||||
try:
|
||||
profile_room_id, remote_room_hosts = yield self.lookup_room_alias(
|
||||
profile_alias,
|
||||
)
|
||||
except NotFoundError:
|
||||
logger.info(
|
||||
"Not sending merged invites as %s does not exists",
|
||||
profile_alias
|
||||
)
|
||||
return
|
||||
|
||||
linked_accounts = yield self.state_handler.get_current_state(
|
||||
room_id=profile_room_id.to_string(),
|
||||
event_type="m.linked_accounts",
|
||||
state_key="",
|
||||
)
|
||||
if not linked_accounts or not linked_accounts.content['all_children']:
|
||||
return
|
||||
for child_id in linked_accounts.content['all_children']:
|
||||
child = UserID.from_string(child_id)
|
||||
if self.hs.is_mine(child) or child_id == requester.user.to_string():
|
||||
# TODO: Handle auto-invite for local users (not a priority)
|
||||
continue
|
||||
try:
|
||||
yield self.update_membership(
|
||||
requester=requester,
|
||||
target=child,
|
||||
room_id=room_id,
|
||||
action="invite",
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Failed to invite %s to %s", child_id, room_id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to send invites to children of %s in %s",
|
||||
requester.user.to_string(), room_id,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_membership_event(
|
||||
self,
|
||||
@@ -578,7 +633,7 @@ class RoomMemberHandler(object):
|
||||
mapping = yield directory_handler.get_association(room_alias)
|
||||
|
||||
if not mapping:
|
||||
raise SynapseError(404, "No such room alias")
|
||||
raise NotFoundError("No such room alias")
|
||||
|
||||
room_id = mapping["room_id"]
|
||||
servers = mapping["servers"]
|
||||
|
||||
@@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
@@ -324,9 +325,12 @@ class SearchHandler(BaseHandler):
|
||||
else:
|
||||
last_event_id = event.event_id
|
||||
|
||||
state_filter = StateFilter.from_types(
|
||||
[(EventTypes.Member, sender) for sender in senders]
|
||||
)
|
||||
|
||||
state = yield self.store.get_state_for_event(
|
||||
last_event_id,
|
||||
types=[(EventTypes.Member, sender) for sender in senders]
|
||||
last_event_id, state_filter
|
||||
)
|
||||
|
||||
res["profile_info"] = {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user