Compare commits
332 Commits
v1.68.0
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6a5e42121c | ||
|
|
f0dec49f01 | ||
|
|
1d1ab0e41f | ||
|
|
404404733c | ||
|
|
af592d7d4c | ||
|
|
b00294b8b1 | ||
|
|
78909f5028 | ||
|
|
2e2cffe1a2 | ||
|
|
b1379a7ca8 | ||
|
|
6546308c1e | ||
|
|
051402d1df | ||
|
|
ddbba28d52 | ||
|
|
9473ebb9e7 | ||
|
|
b922b54b61 | ||
|
|
dbfc9b803e | ||
|
|
cc3a52b33d | ||
|
|
15bdb0da52 | ||
|
|
b2890369cd | ||
|
|
278f8543be | ||
|
|
00d108fce4 | ||
|
|
2bb2c32e8e | ||
|
|
7911e2835d | ||
|
|
730b13dbc9 | ||
|
|
81815e0561 | ||
|
|
453914b472 | ||
|
|
d1efa7b3a4 | ||
|
|
1335367ca7 | ||
|
|
44f0d573cf | ||
|
|
e0d9013adf | ||
|
|
cc3a04876f | ||
|
|
6a6e1e8c07 | ||
|
|
aa70556699 | ||
|
|
67583281e3 | ||
|
|
1357ae869f | ||
|
|
4dc05f3019 | ||
|
|
cbe01ccc3f | ||
|
|
40fa8294e3 | ||
|
|
0d59ae706a | ||
|
|
0cfbb35131 | ||
|
|
04fd6221de | ||
|
|
86b7d9b886 | ||
|
|
8756d5c87e | ||
|
|
23fa636ed7 | ||
|
|
d902181de9 | ||
|
|
85fcbba595 | ||
|
|
9192d74b0b | ||
|
|
2d0ba3f89a | ||
|
|
0f1befd0b1 | ||
|
|
c9dffd5b33 | ||
|
|
d125919963 | ||
|
|
8c8fcdb87d | ||
|
|
8c94dd3a27 | ||
|
|
581b37b5d6 | ||
|
|
19c0e55ef7 | ||
|
|
872ea2f4de | ||
|
|
1e73effebf | ||
|
|
09b588854e | ||
|
|
386e72a22d | ||
|
|
c6987f65fe | ||
|
|
1469fed0e3 | ||
|
|
6c82b3759f | ||
|
|
94f239d911 | ||
|
|
673970bb5a | ||
|
|
cb76892c7d | ||
|
|
cd02bfc026 | ||
|
|
5f06488418 | ||
|
|
278b530875 | ||
|
|
b7a7ff6ee3 | ||
|
|
1d45ad8b2a | ||
|
|
d24346f530 | ||
|
|
1c642156d7 | ||
|
|
5f77b74215 | ||
|
|
4dd7aa371b | ||
|
|
1433b5d5b6 | ||
|
|
fab495a9e1 | ||
|
|
cacda2d1f5 | ||
|
|
7fe3b908a5 | ||
|
|
755bfeee3a | ||
|
|
da2c93d4b6 | ||
|
|
09c602b558 | ||
|
|
70b3396506 | ||
|
|
3841900aaa | ||
|
|
0b7830e457 | ||
|
|
695a85d1bc | ||
|
|
fe50738e59 | ||
|
|
7f6f3ff337 | ||
|
|
6fcdda68d3 | ||
|
|
15a240f135 | ||
|
|
04d7f56f53 | ||
|
|
fa8616e65c | ||
|
|
2a76a7369f | ||
|
|
1c777ef1e8 | ||
|
|
06b0c4edfe | ||
|
|
85aa0f513b | ||
|
|
847e2393f3 | ||
|
|
2b940d2668 | ||
|
|
f91b547a07 | ||
|
|
4eaf3eb840 | ||
|
|
844ce47b9b | ||
|
|
b951d6bd4c | ||
|
|
dbf18f514e | ||
|
|
e440f9674a | ||
|
|
8e50299d8b | ||
|
|
a8677bc9b8 | ||
|
|
6c5082f3e0 | ||
|
|
c3a4780080 | ||
|
|
4af93bd7f6 | ||
|
|
dc02d9f8c5 | ||
|
|
828b5502cf | ||
|
|
2c63cdcc3f | ||
|
|
6fee2f49f3 | ||
|
|
c101fc6568 | ||
|
|
130668b66c | ||
|
|
4283bd1cf9 | ||
|
|
6b24235142 | ||
|
|
2fa1bf598d | ||
|
|
ccce8cdfc5 | ||
|
|
2c2c3f8b2c | ||
|
|
cd01a1d3b5 | ||
|
|
1eb8dcf4c9 | ||
|
|
6b097a3e17 | ||
|
|
40bb37eb27 | ||
|
|
616dcc1d18 | ||
|
|
8cd22674a1 | ||
|
|
c75836fe77 | ||
|
|
bc2bd92b93 | ||
|
|
d1bdeccb50 | ||
|
|
b43be004b4 | ||
|
|
d241a1350d | ||
|
|
022f25b309 | ||
|
|
5a983cccfb | ||
|
|
97b3d037c0 | ||
|
|
126a15794c | ||
|
|
c7446906bd | ||
|
|
c3e4edb4d6 | ||
|
|
9ff4155f6c | ||
|
|
6e0dde3215 | ||
|
|
424d1d28cc | ||
|
|
f3f303aa22 | ||
|
|
29ee4b6698 | ||
|
|
16c5d95b59 | ||
|
|
2019b60f3b | ||
|
|
7d59a515bb | ||
|
|
3bbe532abb | ||
|
|
b6baa46db0 | ||
|
|
e6e876b9b1 | ||
|
|
87099b6ea5 | ||
|
|
e4e55f8eef | ||
|
|
9c23442ac9 | ||
|
|
c152e58116 | ||
|
|
10a432a5f0 | ||
|
|
821f74a8c0 | ||
|
|
19eb23bf32 | ||
|
|
c604d2c218 | ||
|
|
f9bc5428c4 | ||
|
|
09be8ab5f9 | ||
|
|
3f057e4c54 | ||
|
|
a86b2f6837 | ||
|
|
6a92944854 | ||
|
|
6136768e76 | ||
|
|
02086e1da0 | ||
|
|
a9934d48c1 | ||
|
|
422cff7df6 | ||
|
|
17c031b251 | ||
|
|
d94bcbced3 | ||
|
|
e580f03e02 | ||
|
|
544cc400e3 | ||
|
|
475bc8acb9 | ||
|
|
1bf2832714 | ||
|
|
e03d7c5fd0 | ||
|
|
ab8047b4bf | ||
|
|
8074430d25 | ||
|
|
dc37b68a25 | ||
|
|
8faf7245fd | ||
|
|
f1673866ed | ||
|
|
cb72c65609 | ||
|
|
5e2cfb64d7 | ||
|
|
c0e868e423 | ||
|
|
00c93d2e7e | ||
|
|
66a7857334 | ||
|
|
0adeccafc6 | ||
|
|
2295095c97 | ||
|
|
1fa2e58772 | ||
|
|
d6ae14e60e | ||
|
|
0c853e0970 | ||
|
|
cb20b885cb | ||
|
|
44741aa85b | ||
|
|
b753f63000 | ||
|
|
a98ac3cc1e | ||
|
|
b42177f94f | ||
|
|
bb69dbf3e3 | ||
|
|
e9a0419c8d | ||
|
|
720b12c209 | ||
|
|
79c592cec6 | ||
|
|
f6f6bdc7b3 | ||
|
|
a09a7d40e3 | ||
|
|
7b7478e8b6 | ||
|
|
51436c8dd5 | ||
|
|
0b037d6c91 | ||
|
|
e3d4755454 | ||
|
|
dcced5a8d7 | ||
|
|
c3b0e5e178 | ||
|
|
0506bb100e | ||
|
|
2b6d41ebd6 | ||
|
|
3e74ad20db | ||
|
|
a7ba457b2b | ||
|
|
f0019f3f3b | ||
|
|
d8663f5e63 | ||
|
|
04ce2edddc | ||
|
|
b4ec4f5e71 | ||
|
|
94017e867d | ||
|
|
17bc4ecff2 | ||
|
|
6ec9fcc808 | ||
|
|
b95d8b463e | ||
|
|
b64b7297f2 | ||
|
|
0e5106a0cc | ||
|
|
23a4973b83 | ||
|
|
e70c6b720e | ||
|
|
27fa0fa698 | ||
|
|
471e13a103 | ||
|
|
1613857b90 | ||
|
|
70a4317692 | ||
|
|
92ae90aca2 | ||
|
|
0c9c159f45 | ||
|
|
a34638c126 | ||
|
|
5a6d025246 | ||
|
|
b381701f8c | ||
|
|
4cb0f5a99d | ||
|
|
a2c7259827 | ||
|
|
8e8638ac40 | ||
|
|
b706111b78 | ||
|
|
d42541733d | ||
|
|
9f218b73e9 | ||
|
|
2d5ce8c087 | ||
|
|
4cceb6ba66 | ||
|
|
ddcb52e455 | ||
|
|
6c85796769 | ||
|
|
3ac0e76b79 | ||
|
|
a52d27a68b | ||
|
|
2c237debd3 | ||
|
|
cc8a3582e0 | ||
|
|
719488dda8 | ||
|
|
a423f45294 | ||
|
|
7f4f2a3782 | ||
|
|
606b2d9009 | ||
|
|
d65862c41f | ||
|
|
061739d10f | ||
|
|
7a441c4f30 | ||
|
|
343038c3c3 | ||
|
|
2769ef4df1 | ||
|
|
a52c40e2a6 | ||
|
|
ad4c14e4b0 | ||
|
|
535f8c8f7d | ||
|
|
285d72556b | ||
|
|
8e52cb0bce | ||
|
|
6d543d6d9f | ||
|
|
b2aadd81a8 | ||
|
|
285b9e9b6c | ||
|
|
5507bfa769 | ||
|
|
4490697b98 | ||
|
|
3dfc4a08dc | ||
|
|
e8f30a76ca | ||
|
|
1cc2ca81ba | ||
|
|
6f0c3e669d | ||
|
|
15754d720f | ||
|
|
ebd9e2dac6 | ||
|
|
a466164647 | ||
|
|
be76cd8200 | ||
|
|
8625ad8099 | ||
|
|
e5fdf16d46 | ||
|
|
568016929f | ||
|
|
99a7e7e023 | ||
|
|
73ecff7e9e | ||
|
|
5f659d4a88 | ||
|
|
df8b91ed2b | ||
|
|
d768c50c0e | ||
|
|
1386ce4735 | ||
|
|
7766bd5b35 | ||
|
|
25c0e69392 | ||
|
|
220b21936e | ||
|
|
4b17a5ace8 | ||
|
|
6caa303083 | ||
|
|
5c429b86b4 | ||
|
|
8ab16a92ed | ||
|
|
a2cf66a94d | ||
|
|
29269d9d3f | ||
|
|
35e9d6a616 | ||
|
|
f5aaa55e27 | ||
|
|
87fe9db467 | ||
|
|
299b00d968 | ||
|
|
ac1b0d03a5 | ||
|
|
9bd442e202 | ||
|
|
42dd992bb7 | ||
|
|
50c92f3a69 | ||
|
|
a95ce6dd08 | ||
|
|
e8318a4333 | ||
|
|
85e161631a | ||
|
|
d6b85a2a7d | ||
|
|
2fae1a3f78 | ||
|
|
58ab96747c | ||
|
|
0a38c7ec6d | ||
|
|
41461fd4d6 | ||
|
|
6b4593a80f | ||
|
|
f34b0bc262 | ||
|
|
c5defa4cba | ||
|
|
dcdd50e458 | ||
|
|
ac1a31740b | ||
|
|
f49f73c0da | ||
|
|
db868db594 | ||
|
|
e3512a7719 | ||
|
|
efd108b45d | ||
|
|
03c2bfb7f8 | ||
|
|
c06b2b7142 | ||
|
|
ac7e5683d6 | ||
|
|
c9316f9f76 | ||
|
|
f7c89c44c5 | ||
|
|
8c3dcdf1b9 | ||
|
|
b7272b73aa | ||
|
|
1a1abdda42 | ||
|
|
efabf44c76 | ||
|
|
ccca14140a | ||
|
|
0fd2f2d460 | ||
|
|
269eddad6f | ||
|
|
8ae42ab8fa | ||
|
|
6bd8763804 | ||
|
|
16e1a9d9a7 | ||
|
|
e0804ef898 | ||
|
|
a35842caec | ||
|
|
2b522cceb6 | ||
|
|
85fc7ea1a1 | ||
|
|
fff9b955fa | ||
|
|
42d261c32f |
132
.ci/scripts/auditwheel_wrapper.py
Executable file
132
.ci/scripts/auditwheel_wrapper.py
Executable file
@@ -0,0 +1,132 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Wraps `auditwheel repair` to first check if we're repairing a potentially abi3
|
||||
# compatible wheel, if so rename the wheel before repairing it.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Optional
|
||||
from zipfile import ZipFile
|
||||
|
||||
from packaging.tags import Tag
|
||||
from packaging.utils import parse_wheel_filename
|
||||
from packaging.version import Version
|
||||
|
||||
|
||||
def check_is_abi3_compatible(wheel_file: str) -> None:
|
||||
"""Check the contents of the built wheel for any `.so` files that are *not*
|
||||
abi3 compatible.
|
||||
"""
|
||||
|
||||
with ZipFile(wheel_file, "r") as wheel:
|
||||
for file in wheel.namelist():
|
||||
if not file.endswith(".so"):
|
||||
continue
|
||||
|
||||
if not file.endswith(".abi3.so"):
|
||||
raise Exception(f"Found non-abi3 lib: {file}")
|
||||
|
||||
|
||||
def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str:
|
||||
"""Replaces the cpython wheel file with a ABI3 compatible wheel"""
|
||||
|
||||
if tag.abi == "abi3":
|
||||
# Nothing to do.
|
||||
return wheel_file
|
||||
|
||||
check_is_abi3_compatible(wheel_file)
|
||||
|
||||
abi3_tag = Tag(tag.interpreter, "abi3", tag.platform)
|
||||
|
||||
dirname = os.path.dirname(wheel_file)
|
||||
new_wheel_file = os.path.join(
|
||||
dirname,
|
||||
f"{name}-{version}-{abi3_tag}.whl",
|
||||
)
|
||||
|
||||
os.rename(wheel_file, new_wheel_file)
|
||||
|
||||
print("Renamed wheel to", new_wheel_file)
|
||||
|
||||
return new_wheel_file
|
||||
|
||||
|
||||
def main(wheel_file: str, dest_dir: str, archs: Optional[str]) -> None:
|
||||
"""Entry point"""
|
||||
|
||||
# Parse the wheel file name into its parts. Note that `parse_wheel_filename`
|
||||
# normalizes the package name (i.e. it converts matrix_synapse ->
|
||||
# matrix-synapse), which is not what we want.
|
||||
_, version, build, tags = parse_wheel_filename(os.path.basename(wheel_file))
|
||||
name = os.path.basename(wheel_file).split("-")[0]
|
||||
|
||||
if len(tags) != 1:
|
||||
# We expect only a wheel file with only a single tag
|
||||
raise Exception(f"Unexpectedly found multiple tags: {tags}")
|
||||
|
||||
tag = next(iter(tags))
|
||||
|
||||
if build:
|
||||
# We don't use build tags in Synapse
|
||||
raise Exception(f"Unexpected build tag: {build}")
|
||||
|
||||
# If the wheel is for cpython then convert it into an abi3 wheel.
|
||||
if tag.interpreter.startswith("cp"):
|
||||
wheel_file = cpython(wheel_file, name, version, tag)
|
||||
|
||||
# Finally, repair the wheel.
|
||||
if archs is not None:
|
||||
# If we are given archs then we are on macos and need to use
|
||||
# `delocate-listdeps`.
|
||||
subprocess.run(["delocate-listdeps", wheel_file], check=True)
|
||||
subprocess.run(
|
||||
["delocate-wheel", "--require-archs", archs, "-w", dest_dir, wheel_file],
|
||||
check=True,
|
||||
)
|
||||
else:
|
||||
subprocess.run(["auditwheel", "repair", "-w", dest_dir, wheel_file], check=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tag wheel as abi3 and repair it.")
|
||||
|
||||
parser.add_argument(
|
||||
"--wheel-dir",
|
||||
"-w",
|
||||
metavar="WHEEL_DIR",
|
||||
help="Directory to store delocated wheels",
|
||||
required=True,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--require-archs",
|
||||
metavar="archs",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"wheel_file",
|
||||
metavar="WHEEL_FILE",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
wheel_file = args.wheel_file
|
||||
wheel_dir = args.wheel_dir
|
||||
archs = args.require_archs
|
||||
|
||||
main(wheel_file, wheel_dir, archs)
|
||||
@@ -18,6 +18,13 @@
|
||||
import json
|
||||
import os
|
||||
|
||||
|
||||
def set_output(key: str, value: str):
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
|
||||
with open(os.environ["GITHUB_OUTPUT"], "at") as f:
|
||||
print(f"{key}={value}", file=f)
|
||||
|
||||
|
||||
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
||||
|
||||
# First calculate the various trial jobs.
|
||||
@@ -39,7 +46,7 @@ if not IS_PR:
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10")
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
@@ -55,7 +62,7 @@ trial_postgres_tests = [
|
||||
if not IS_PR:
|
||||
trial_postgres_tests.append(
|
||||
{
|
||||
"python-version": "3.10",
|
||||
"python-version": "3.11",
|
||||
"database": "postgres",
|
||||
"postgres-version": "14",
|
||||
"extras": "all",
|
||||
@@ -81,7 +88,7 @@ print("::endgroup::")
|
||||
test_matrix = json.dumps(
|
||||
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
|
||||
)
|
||||
print(f"::set-output name=trial_test_matrix::{test_matrix}")
|
||||
set_output("trial_test_matrix", test_matrix)
|
||||
|
||||
|
||||
# First calculate the various sytest jobs.
|
||||
@@ -125,4 +132,4 @@ print(json.dumps(sytest_tests, indent=4))
|
||||
print("::endgroup::")
|
||||
|
||||
test_matrix = json.dumps(sytest_tests)
|
||||
print(f"::set-output name=sytest_test_matrix::{test_matrix}")
|
||||
set_output("sytest_test_matrix", test_matrix)
|
||||
|
||||
@@ -21,7 +21,7 @@ endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
go get -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
!Cargo.lock
|
||||
!Cargo.toml
|
||||
!build_rust.py
|
||||
|
||||
rust/target
|
||||
|
||||
9
.flake8
9
.flake8
@@ -8,4 +8,11 @@
|
||||
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E501: Line too long (black enforces this for us)
|
||||
ignore=W503,W504,E203,E731,E501
|
||||
#
|
||||
# flake8-bugbear runs extra checks. Its error codes are described at
|
||||
# https://github.com/PyCQA/flake8-bugbear#list-of-warnings
|
||||
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
|
||||
# B023: Functions defined inside a loop must not use variables redefined in the loop
|
||||
# B024: Abstract base class with no abstract method.
|
||||
|
||||
ignore=W503,W504,E203,E731,E501,B019,B023,B024
|
||||
|
||||
22
.github/dependabot.yml
vendored
Normal file
22
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
version: 2
|
||||
updates:
|
||||
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
|
||||
package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/docker"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
46
.github/workflows/dependabot_changelog.yml
vendored
Normal file
46
.github/workflows/dependabot_changelog.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Write changelog for dependabot PR
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened # For debugging!
|
||||
|
||||
permissions:
|
||||
# Needed to be able to push the commit. See
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
|
||||
# for a similar example
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
add-changelog:
|
||||
runs-on: 'ubuntu-latest'
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
- name: Write, commit and push changelog
|
||||
run: |
|
||||
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
|
||||
git add changelog.d
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "GitHub Actions"
|
||||
git commit -m "Changelog"
|
||||
git push
|
||||
shell: bash
|
||||
# The `git push` above does not trigger CI on the dependabot PR.
|
||||
#
|
||||
# By default, workflows can't trigger other workflows when they're just using the
|
||||
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
|
||||
# recursive workflow loops by accident, because that'll get very expensive very
|
||||
# quickly.) Instead, you have to manually call out to another workflow, or else
|
||||
# make your changes (i.e. the `git push` above) using a personal access token.
|
||||
# See
|
||||
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
|
||||
#
|
||||
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
|
||||
# See git commit history for previous attempts. If anyone desperately wants to try
|
||||
# again in the future, make a matrix-bot account and use its access token to git push.
|
||||
|
||||
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
|
||||
# are sufficiently locked down to dependabot only as above.
|
||||
15
.github/workflows/docker.yml
vendored
15
.github/workflows/docker.yml
vendored
@@ -17,19 +17,19 @@ jobs:
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Inspect builder
|
||||
run: docker buildx inspect
|
||||
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
@@ -48,10 +48,15 @@ jobs:
|
||||
type=pep440,pattern={{raw}}
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: true
|
||||
labels: "gitsha1=${{ github.sha }}"
|
||||
tags: "${{ steps.set-tag.outputs.tags }}"
|
||||
file: "docker/Dockerfile"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
# arm64 builds OOM without the git fetch setting. c.f.
|
||||
# https://github.com/rust-lang/cargo/issues/10583
|
||||
build-args: |
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI=true
|
||||
|
||||
8
.github/workflows/docs.yaml
vendored
8
.github/workflows/docs.yaml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0
|
||||
with:
|
||||
mdbook-version: '0.4.17'
|
||||
|
||||
@@ -54,11 +54,11 @@ jobs:
|
||||
esac
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "::set-output name=branch-version::$branch"
|
||||
echo "branch-version=$branch" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
uses: peaceiris/actions-gh-pages@de7ea6f8efb354206b205ef54722213d99067935 # v3.9.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./book
|
||||
|
||||
19
.github/workflows/latest_deps.yml
vendored
19
.github/workflows/latest_deps.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
mypy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
postgres-version: "14"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.postgres-version }}
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: pip install .[all,test]
|
||||
@@ -133,7 +133,7 @@ jobs:
|
||||
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -155,7 +155,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
@@ -182,8 +182,8 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -201,15 +201,16 @@ jobs:
|
||||
open-issue:
|
||||
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
|
||||
needs:
|
||||
# TODO: should mypy be included here? It feels more brittle than the other two.
|
||||
# TODO: should mypy be included here? It feels more brittle than the others.
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
- complement
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
65
.github/workflows/release-artifacts.yml
vendored
65
.github/workflows/release-artifacts.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
|
||||
# we do the full build on tags.
|
||||
tags: ["v*"]
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -24,8 +25,10 @@ jobs:
|
||||
name: "Calculate list of debian distros"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- id: set-distros
|
||||
run: |
|
||||
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
|
||||
@@ -33,7 +36,7 @@ jobs:
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
dists=$(scripts-dev/build_debian_packages.py --show-dists-json)
|
||||
fi
|
||||
echo "::set-output name=distros::$dists"
|
||||
echo "distros=$dists" >> "$GITHUB_OUTPUT"
|
||||
# map the step outputs to job outputs
|
||||
outputs:
|
||||
distros: ${{ steps.set-distros.outputs.distros }}
|
||||
@@ -49,18 +52,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: src
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Set up docker layer caching
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
@@ -68,7 +71,9 @@ jobs:
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
- name: Set up python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Build the packages
|
||||
# see https://github.com/docker/build-push-action/issues/252
|
||||
@@ -84,38 +89,59 @@ jobs:
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
- name: Upload debs as artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
name: Build wheels on ${{ matrix.os }} for ${{ matrix.arch }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-10.15]
|
||||
os: [ubuntu-20.04, macos-11]
|
||||
arch: [x86_64, aarch64]
|
||||
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
|
||||
# It is not read by the rest of the workflow.
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-10.15"
|
||||
os: "macos-11"
|
||||
# Don't build aarch64 wheels on mac.
|
||||
- os: "macos-11"
|
||||
arch: aarch64
|
||||
# Don't build aarch64 wheels on PR CI.
|
||||
- is_pr: true
|
||||
arch: aarch64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
# setup-python@v4 doesn't impose a default python version. Need to use 3.x
|
||||
# here, because `python` on osx points to Python 2.7.
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
|
||||
# Only build a single wheel in CI.
|
||||
- name: Set env vars.
|
||||
run: |
|
||||
echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
- name: Build aarch64 wheels
|
||||
if: matrix.arch == 'aarch64'
|
||||
run: echo 'CIBW_ARCHS_LINUX=aarch64' >> $GITHUB_ENV
|
||||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
@@ -123,6 +149,9 @@ jobs:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
|
||||
# Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583
|
||||
CARGO_NET_GIT_FETCH_WITH_CLI: true
|
||||
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
@@ -145,7 +174,7 @@ jobs:
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
@@ -162,7 +191,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v3
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
|
||||
57
.github/workflows/tests.yml
vendored
57
.github/workflows/tests.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches: ["develop", "release-*"]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -30,8 +31,8 @@ jobs:
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
extras: "all"
|
||||
@@ -41,8 +42,8 @@ jobs:
|
||||
check-schema-delta:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
|
||||
- run: scripts-dev/check_schema_delta.py --force-colors
|
||||
|
||||
@@ -54,19 +55,19 @@ jobs:
|
||||
lint-crlf:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Check line endings
|
||||
run: scripts-dev/check_line_terminators.sh
|
||||
|
||||
lint-newsfile:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/setup-python@v4
|
||||
- run: "pip install 'towncrier>=18.6.0rc1'"
|
||||
- run: scripts-dev/check-newsfragment.sh
|
||||
env:
|
||||
@@ -75,7 +76,7 @@ jobs:
|
||||
lint-pydantic:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -89,7 +90,7 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -107,7 +108,7 @@ jobs:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -140,8 +141,8 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
- id: get-matrix
|
||||
run: .ci/scripts/calculate_jobs.py
|
||||
outputs:
|
||||
@@ -157,7 +158,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
|
||||
if: ${{ matrix.job.postgres-version }}
|
||||
@@ -166,6 +167,14 @@ jobs:
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
|
||||
postgres:${{ matrix.job.postgres-version }}
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.58.1
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: ${{ matrix.job.python-version }}
|
||||
@@ -199,7 +208,7 @@ jobs:
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -270,7 +279,7 @@ jobs:
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
# Install libs necessary for PyPy to build binary wheels for dependencies
|
||||
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
@@ -313,7 +322,7 @@ jobs:
|
||||
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
@@ -331,7 +340,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
|
||||
@@ -361,7 +370,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -384,7 +393,7 @@ jobs:
|
||||
- python-version: "3.7"
|
||||
postgres-version: "10"
|
||||
|
||||
- python-version: "3.10"
|
||||
- python-version: "3.11"
|
||||
postgres-version: "14"
|
||||
|
||||
services:
|
||||
@@ -402,7 +411,7 @@ jobs:
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1 postgresql-client
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
@@ -444,8 +453,8 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -473,7 +482,7 @@ jobs:
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
|
||||
29
.github/workflows/triage-incoming.yml
vendored
29
.github/workflows/triage-incoming.yml
vendored
@@ -5,24 +5,11 @@ on:
|
||||
types: [ opened ]
|
||||
|
||||
jobs:
|
||||
add_new_issues:
|
||||
name: Add new issues to the triage board
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: octokit/graphql-action@v2.x
|
||||
id: add_to_project
|
||||
with:
|
||||
headers: '{"GraphQL-Features": "projects_next_graphql"}'
|
||||
query: |
|
||||
mutation add_to_project($projectid:ID!,$contentid:ID!) {
|
||||
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
|
||||
item {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
projectid: ${{ env.PROJECT_ID }}
|
||||
contentid: ${{ github.event.issue.node_id }}
|
||||
env:
|
||||
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
|
||||
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
triage:
|
||||
uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1
|
||||
with:
|
||||
project_id: 'PVT_kwDOAIB0Bs4AFDdZ'
|
||||
content_id: ${{ github.event.issue.node_id }}
|
||||
secrets:
|
||||
github_access_token: ${{ secrets.ELEMENT_BOT_TOKEN }}
|
||||
|
||||
|
||||
17
.github/workflows/twisted_trunk.yml
vendored
17
.github/workflows/twisted_trunk.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
@@ -112,7 +112,7 @@ jobs:
|
||||
if: ${{ always() }}
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
with:
|
||||
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
|
||||
@@ -138,8 +138,8 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
- name: Run actions/checkout@v3 for synapse
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
@@ -151,12 +151,11 @@ jobs:
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.1.14
|
||||
pipx install poetry==1.2.0
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.14 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
@@ -177,7 +176,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
1
.rustfmt.toml
Normal file
1
.rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
||||
group_imports = "StdExternalCrate"
|
||||
413
CHANGES.md
413
CHANGES.md
@@ -1,3 +1,416 @@
|
||||
Synapse 1.71.0 (2022-11-08)
|
||||
===========================
|
||||
|
||||
Please note that, as announced in the release notes for Synapse 1.69.0, legacy Prometheus metric names are now disabled by default.
|
||||
They will be removed altogether in Synapse 1.73.0.
|
||||
If not already done, server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
|
||||
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.71/upgrade.html#upgrading-to-v1710) for more details.
|
||||
|
||||
**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support PostgreSQL 10, which reaches upstream end-of-life on November 10th, 2022. Future releases of Synapse will require PostgreSQL 11+.
|
||||
|
||||
No significant changes since 1.71.0rc2.
|
||||
|
||||
|
||||
Synapse 1.71.0rc2 (2022-11-04)
|
||||
==============================
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Document the changes to monthly active user metrics due to deprecation of legacy Prometheus metric names. ([\#14358](https://github.com/matrix-org/synapse/issues/14358), [\#14360](https://github.com/matrix-org/synapse/issues/14360))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Disable legacy Prometheus metric names by default. They can still be re-enabled for now, but they will be removed altogether in Synapse 1.73.0. ([\#14353](https://github.com/matrix-org/synapse/issues/14353))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Run unit tests against Python 3.11. ([\#13812](https://github.com/matrix-org/synapse/issues/13812))
|
||||
|
||||
|
||||
Synapse 1.71.0rc1 (2022-11-01)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support back-channel logouts from OpenID Connect providers. ([\#11414](https://github.com/matrix-org/synapse/issues/11414))
|
||||
- Allow use of Postgres and SQLlite full-text search operators in search queries. ([\#11635](https://github.com/matrix-org/synapse/issues/11635), [\#14310](https://github.com/matrix-org/synapse/issues/14310), [\#14311](https://github.com/matrix-org/synapse/issues/14311))
|
||||
- Implement [MSC3664](https://github.com/matrix-org/matrix-doc/pull/3664), Pushrules for relations. Contributed by Nico. ([\#11804](https://github.com/matrix-org/synapse/issues/11804))
|
||||
- Improve aesthetics of HTML templates. Note that these changes do not retroactively apply to templates which have been [customised](https://matrix-org.github.io/synapse/latest/templates.html#templates) by server admins. ([\#13652](https://github.com/matrix-org/synapse/issues/13652))
|
||||
- Enable write-ahead logging for SQLite installations. Contributed by [@asymmetric](https://github.com/asymmetric). ([\#13897](https://github.com/matrix-org/synapse/issues/13897))
|
||||
- Show erasure status when [listing users](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#query-user-account) in the Admin API. ([\#14205](https://github.com/matrix-org/synapse/issues/14205))
|
||||
- Provide a specific error code when a `/sync` request provides a filter which doesn't represent a JSON object. ([\#14262](https://github.com/matrix-org/synapse/issues/14262))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where the `update_synapse_database` script could not be run with multiple databases. Contributed by @thefinn93 @ Beeper. ([\#13422](https://github.com/matrix-org/synapse/issues/13422))
|
||||
- Fix a bug which prevented setting an avatar on homeservers which have an explicit port in their `server_name` and have `max_avatar_size` and/or `allowed_avatar_mimetypes` configuration. Contributed by @ashfame. ([\#13927](https://github.com/matrix-org/synapse/issues/13927))
|
||||
- Check appservice user interest against the local users instead of all users in the room to align with [MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905). ([\#13958](https://github.com/matrix-org/synapse/issues/13958))
|
||||
- Fix a long-standing bug where Synapse would accidentally include extra information in the response to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14064](https://github.com/matrix-org/synapse/issues/14064))
|
||||
- Fix a bug introduced in Synapse 1.64.0 where presence updates could be missing from `/sync` responses. ([\#14243](https://github.com/matrix-org/synapse/issues/14243))
|
||||
- Fix a bug introduced in Synapse 1.60.0 which caused an error to be logged when Synapse received a SIGHUP signal if debug logging was enabled. ([\#14258](https://github.com/matrix-org/synapse/issues/14258))
|
||||
- Prevent history insertion ([MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716)) during an partial join ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706)). ([\#14291](https://github.com/matrix-org/synapse/issues/14291))
|
||||
- Fix a bug introduced in Synapse 1.34.0 where device names would be returned via a federation user key query request when `allow_device_name_lookup_over_federation` was set to `false`. ([\#14304](https://github.com/matrix-org/synapse/issues/14304))
|
||||
- Fix a bug introduced in Synapse 0.34.0 where logs could include error spam when background processes are measured as taking a negative amount of time. ([\#14323](https://github.com/matrix-org/synapse/issues/14323))
|
||||
- Fix a bug introduced in Synapse 1.70.0 where clients were unable to PUT new [dehydrated devices](https://github.com/matrix-org/matrix-spec-proposals/pull/2697). ([\#14336](https://github.com/matrix-org/synapse/issues/14336))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Explain how to disable the use of [`trusted_key_servers`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#trusted_key_servers). ([\#13999](https://github.com/matrix-org/synapse/issues/13999))
|
||||
- Add workers settings to [configuration manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#individual-worker-configuration). ([\#14086](https://github.com/matrix-org/synapse/issues/14086))
|
||||
- Correct the name of the config option [`encryption_enabled_by_default_for_room_type`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#encryption_enabled_by_default_for_room_type). ([\#14110](https://github.com/matrix-org/synapse/issues/14110))
|
||||
- Update docstrings of `SynapseError` and `FederationError` to bettter describe what they are used for and the effects of using them are. ([\#14191](https://github.com/matrix-org/synapse/issues/14191))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Remove unused `@lru_cache` decorator. ([\#13595](https://github.com/matrix-org/synapse/issues/13595))
|
||||
- Save login tokens in database and prevent login token reuse. ([\#13844](https://github.com/matrix-org/synapse/issues/13844))
|
||||
- Refactor OIDC tests to better mimic an actual OIDC provider. ([\#13910](https://github.com/matrix-org/synapse/issues/13910))
|
||||
- Fix type annotation causing import time error in the Complement forking launcher. ([\#14084](https://github.com/matrix-org/synapse/issues/14084))
|
||||
- Refactor [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to loop over federation destinations with standard pattern and error handling. ([\#14096](https://github.com/matrix-org/synapse/issues/14096))
|
||||
- Add initial power level event to batch of bulk persisted events when creating a new room. ([\#14228](https://github.com/matrix-org/synapse/issues/14228))
|
||||
- Refactor `/key/` endpoints to use `RestServlet` classes. ([\#14229](https://github.com/matrix-org/synapse/issues/14229))
|
||||
- Switch to using the `matrix-org/backend-meta` version of `triage-incoming` for new issues in CI. ([\#14230](https://github.com/matrix-org/synapse/issues/14230))
|
||||
- Build wheels on macos 11, not 10.15. ([\#14249](https://github.com/matrix-org/synapse/issues/14249))
|
||||
- Add debugging to help diagnose lost device list updates. ([\#14268](https://github.com/matrix-org/synapse/issues/14268))
|
||||
- Add Rust cache to CI for `trial` runs. ([\#14287](https://github.com/matrix-org/synapse/issues/14287))
|
||||
- Improve type hinting of `RawHeaders`. ([\#14303](https://github.com/matrix-org/synapse/issues/14303))
|
||||
- Use Poetry 1.2.0 in the Twisted Trunk CI job. ([\#14305](https://github.com/matrix-org/synapse/issues/14305))
|
||||
|
||||
<details>
|
||||
<summary>Dependency updates</summary>
|
||||
|
||||
Runtime:
|
||||
|
||||
- Bump anyhow from 1.0.65 to 1.0.66. ([\#14278](https://github.com/matrix-org/synapse/issues/14278))
|
||||
- Bump jinja2 from 3.0.3 to 3.1.2. ([\#14271](https://github.com/matrix-org/synapse/issues/14271))
|
||||
- Bump prometheus-client from 0.14.0 to 0.15.0. ([\#14274](https://github.com/matrix-org/synapse/issues/14274))
|
||||
- Bump psycopg2 from 2.9.4 to 2.9.5. ([\#14331](https://github.com/matrix-org/synapse/issues/14331))
|
||||
- Bump pysaml2 from 7.1.2 to 7.2.1. ([\#14270](https://github.com/matrix-org/synapse/issues/14270))
|
||||
- Bump sentry-sdk from 1.5.11 to 1.10.1. ([\#14330](https://github.com/matrix-org/synapse/issues/14330))
|
||||
- Bump serde from 1.0.145 to 1.0.147. ([\#14277](https://github.com/matrix-org/synapse/issues/14277))
|
||||
- Bump serde_json from 1.0.86 to 1.0.87. ([\#14279](https://github.com/matrix-org/synapse/issues/14279))
|
||||
|
||||
Tooling and CI:
|
||||
|
||||
- Bump black from 22.3.0 to 22.10.0. ([\#14328](https://github.com/matrix-org/synapse/issues/14328))
|
||||
- Bump flake8-bugbear from 21.3.2 to 22.9.23. ([\#14042](https://github.com/matrix-org/synapse/issues/14042))
|
||||
- Bump peaceiris/actions-gh-pages from 3.8.0 to 3.9.0. ([\#14276](https://github.com/matrix-org/synapse/issues/14276))
|
||||
- Bump peaceiris/actions-mdbook from 1.1.14 to 1.2.0. ([\#14275](https://github.com/matrix-org/synapse/issues/14275))
|
||||
- Bump setuptools-rust from 1.5.1 to 1.5.2. ([\#14273](https://github.com/matrix-org/synapse/issues/14273))
|
||||
- Bump twine from 3.8.0 to 4.0.1. ([\#14332](https://github.com/matrix-org/synapse/issues/14332))
|
||||
- Bump types-opentracing from 2.4.7 to 2.4.10. ([\#14133](https://github.com/matrix-org/synapse/issues/14133))
|
||||
- Bump types-requests from 2.28.11 to 2.28.11.2. ([\#14272](https://github.com/matrix-org/synapse/issues/14272))
|
||||
</details>
|
||||
|
||||
Synapse 1.70.1 (2022-10-28)
|
||||
===========================
|
||||
|
||||
This release fixes some regressions that were discovered in 1.70.0.
|
||||
|
||||
[#14300](https://github.com/matrix-org/synapse/issues/14300)
|
||||
was previously reported to be a regression in 1.70.0 as well. However, we have
|
||||
since concluded that it was limited to the reporter and thus have not needed
|
||||
to include any fix for it in 1.70.1.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where the access tokens sent to application services as headers were malformed. Application services which were obtaining access tokens from query parameters were not affected. ([\#14301](https://github.com/matrix-org/synapse/issues/14301))
|
||||
- Fix room creation being rate limited too aggressively since Synapse v1.69.0. ([\#14314](https://github.com/matrix-org/synapse/issues/14314))
|
||||
|
||||
|
||||
Synapse 1.70.0 (2022-10-26)
|
||||
===========================
|
||||
|
||||
No significant changes since 1.70.0rc2.
|
||||
|
||||
|
||||
Synapse 1.70.0rc2 (2022-10-25)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where the information returned from the `/threads` API could be stale when threaded events are redacted. ([\#14248](https://github.com/matrix-org/synapse/issues/14248))
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 leading to broken outbound federation when using Python 3.7. ([\#14280](https://github.com/matrix-org/synapse/issues/14280))
|
||||
- Fix a bug introduced in Synapse 1.70.0rc1 where edits to non-message events were aggregated by the homeserver. ([\#14283](https://github.com/matrix-org/synapse/issues/14283))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Build ABI3 wheels for CPython. ([\#14253](https://github.com/matrix-org/synapse/issues/14253))
|
||||
- For the aarch64 architecture, only build wheels for CPython manylinux. ([\#14259](https://github.com/matrix-org/synapse/issues/14259))
|
||||
|
||||
|
||||
Synapse 1.70.0rc1 (2022-10-19)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Support for [MSC3856](https://github.com/matrix-org/matrix-spec-proposals/pull/3856): threads list API. ([\#13394](https://github.com/matrix-org/synapse/issues/13394), [\#14171](https://github.com/matrix-org/synapse/issues/14171), [\#14175](https://github.com/matrix-org/synapse/issues/14175))
|
||||
- Support for thread-specific notifications & receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771) and [MSC3773](https://github.com/matrix-org/matrix-spec-proposals/pull/3773)). ([\#13776](https://github.com/matrix-org/synapse/issues/13776), [\#13824](https://github.com/matrix-org/synapse/issues/13824), [\#13877](https://github.com/matrix-org/synapse/issues/13877), [\#13878](https://github.com/matrix-org/synapse/issues/13878), [\#14050](https://github.com/matrix-org/synapse/issues/14050), [\#14140](https://github.com/matrix-org/synapse/issues/14140), [\#14159](https://github.com/matrix-org/synapse/issues/14159), [\#14163](https://github.com/matrix-org/synapse/issues/14163), [\#14174](https://github.com/matrix-org/synapse/issues/14174), [\#14222](https://github.com/matrix-org/synapse/issues/14222))
|
||||
- Stop fetching missing `prev_events` after we already know their signature is invalid. ([\#13816](https://github.com/matrix-org/synapse/issues/13816))
|
||||
- Send application service access tokens as a header (and query parameter). Implements [MSC2832](https://github.com/matrix-org/matrix-spec-proposals/pull/2832). ([\#13996](https://github.com/matrix-org/synapse/issues/13996))
|
||||
- Ignore server ACL changes when generating pushes. Implements [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786). ([\#13997](https://github.com/matrix-org/synapse/issues/13997))
|
||||
- Experimental support for redirecting to an implementation of a [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) HTTP rendezvous service. ([\#14018](https://github.com/matrix-org/synapse/issues/14018))
|
||||
- The `/relations` endpoint can now be used on workers. ([\#14028](https://github.com/matrix-org/synapse/issues/14028))
|
||||
- Advertise support for Matrix 1.3 and 1.4 on `/_matrix/client/versions`. ([\#14032](https://github.com/matrix-org/synapse/issues/14032), [\#14184](https://github.com/matrix-org/synapse/issues/14184))
|
||||
- Improve validation of request bodies for the [Device Management](https://spec.matrix.org/v1.4/client-server-api/#device-management) and [MSC2697 Device Dehyrdation](https://github.com/matrix-org/matrix-spec-proposals/pull/2697) client-server API endpoints. ([\#14054](https://github.com/matrix-org/synapse/issues/14054))
|
||||
- Experimental support for [MSC3874](https://github.com/matrix-org/matrix-spec-proposals/pull/3874): Filtering threads from the `/messages` endpoint. ([\#14148](https://github.com/matrix-org/synapse/issues/14148))
|
||||
- Improve the validation of the following PUT endpoints: [`/directory/room/{roomAlias}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directoryroomroomalias), [`/directory/list/room/{roomId}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3directorylistroomroomid) and [`/directory/list/appservice/{networkId}/{roomId}`](https://spec.matrix.org/v1.4/application-service-api/#put_matrixclientv3directorylistappservicenetworkidroomid). ([\#14179](https://github.com/matrix-org/synapse/issues/14179))
|
||||
- Build and publish binary wheels for `aarch64` platforms. ([\#14212](https://github.com/matrix-org/synapse/issues/14212))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
|
||||
- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
|
||||
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
|
||||
- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
|
||||
- Fix a bug introduced in Synapse 1.35.0 where errors parsing a `/send_join` or `/state` response would produce excessive, low-quality Sentry events. ([\#14065](https://github.com/matrix-org/synapse/issues/14065))
|
||||
- Fix a long-standing bug where Synapse would error on the optional 'invite_room_state' field not being provided to [`PUT /_matrix/federation/v2/invite/{roomId}/{eventId}`](https://spec.matrix.org/v1.4/server-server-api/#put_matrixfederationv2inviteroomideventid). ([\#14083](https://github.com/matrix-org/synapse/issues/14083))
|
||||
- Fix a bug where invalid oEmbed fields would cause the entire response to be discarded. Introduced in Synapse 1.18.0. ([\#14089](https://github.com/matrix-org/synapse/issues/14089))
|
||||
- Fix a bug introduced in Synapse 1.37.0 in which an incorrect key name was used for sending and receiving room metadata when knocking on a room. ([\#14102](https://github.com/matrix-org/synapse/issues/14102))
|
||||
- Fix a bug introduced in v1.69.0rc1 where the joined hosts for a given event were not being properly cached. ([\#14125](https://github.com/matrix-org/synapse/issues/14125))
|
||||
- Fix a bug introduced in Synapse 1.30.0 where purging and rejoining a room without restarting in-between would result in a broken room. ([\#14161](https://github.com/matrix-org/synapse/issues/14161), [\#14164](https://github.com/matrix-org/synapse/issues/14164))
|
||||
- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint returning potentially inaccurate closest events with `outliers` present. ([\#14215](https://github.com/matrix-org/synapse/issues/14215))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Update the version of frozendict in Docker images and Debian packages from 2.3.3 to 2.3.4, which may fix memory leak problems. ([\#13955](https://github.com/matrix-org/synapse/issues/13955))
|
||||
- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
|
||||
- Prevent a class of database sharding errors when using `Dockerfile-workers` to spawn multiple instances of the same worker. Contributed by Jason Little. ([\#14165](https://github.com/matrix-org/synapse/issues/14165))
|
||||
- Set `LD_PRELOAD` to use jemalloc memory allocator in Dockerfile-workers. ([\#14182](https://github.com/matrix-org/synapse/issues/14182))
|
||||
- Fix pre-startup logging being lost when using the `Dockerfile-workers` image. ([\#14195](https://github.com/matrix-org/synapse/issues/14195))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add sample worker files for `pusher` and `federation_sender`. ([\#14077](https://github.com/matrix-org/synapse/issues/14077))
|
||||
- Improve the listener example on the metrics documentation. ([\#14078](https://github.com/matrix-org/synapse/issues/14078))
|
||||
- Expand Google OpenID Connect example config to map email attribute. Contributed by @ptman. ([\#14081](https://github.com/matrix-org/synapse/issues/14081))
|
||||
- The changelog entry ending in a full stop or exclamation mark is not optional. ([\#14087](https://github.com/matrix-org/synapse/issues/14087))
|
||||
- Fix links to jemalloc documentation, which were broken in [#13491](https://github.com/matrix-org/synapse/pull/14124). ([\#14093](https://github.com/matrix-org/synapse/issues/14093))
|
||||
- Remove not needed `replication` listener in docker compose example. ([\#14107](https://github.com/matrix-org/synapse/issues/14107))
|
||||
- Fix name of `alias_creation_rules` option in the config manual documentation. ([\#14124](https://github.com/matrix-org/synapse/issues/14124))
|
||||
- Clarify comment on event contexts. ([\#14145](https://github.com/matrix-org/synapse/issues/14145))
|
||||
- Fix dead link to the [Admin Registration API](https://matrix-org.github.io/synapse/latest/admin_api/register_api.html). ([\#14189](https://github.com/matrix-org/synapse/issues/14189))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the experimental implementation of [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772). ([\#14094](https://github.com/matrix-org/synapse/issues/14094))
|
||||
- Remove the unstable identifier for [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#14106](https://github.com/matrix-org/synapse/issues/14106), [\#14146](https://github.com/matrix-org/synapse/issues/14146))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Optimise queries used to get a users rooms during sync. Contributed by Nick @ Beeper (@fizzadar). ([\#13991](https://github.com/matrix-org/synapse/issues/13991))
|
||||
- Update authlib from 0.15.5 to 1.1.0. ([\#14006](https://github.com/matrix-org/synapse/issues/14006))
|
||||
- Make `parse_server_name` consistent in handling invalid server names. ([\#14007](https://github.com/matrix-org/synapse/issues/14007))
|
||||
- Don't repeatedly wake up the same users for batched events. ([\#14033](https://github.com/matrix-org/synapse/issues/14033))
|
||||
- Complement test image: capture logs from nginx. ([\#14063](https://github.com/matrix-org/synapse/issues/14063))
|
||||
- Don't create noisy Sentry events when a requester drops connection to the metrics server mid-request. ([\#14072](https://github.com/matrix-org/synapse/issues/14072))
|
||||
- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14092](https://github.com/matrix-org/synapse/issues/14092))
|
||||
- Add debug logs to figure out why an event was filtered out of the client response. ([\#14095](https://github.com/matrix-org/synapse/issues/14095))
|
||||
- Indicate what endpoint came back with a JSON response we were unable to parse. ([\#14097](https://github.com/matrix-org/synapse/issues/14097))
|
||||
- Break up calls to fetch rooms for many users. Contributed by Nick @ Beeper (@fizzadar). ([\#14109](https://github.com/matrix-org/synapse/issues/14109))
|
||||
- Faster joins: prioritise the server we joined by when restarting a partial join resync. ([\#14126](https://github.com/matrix-org/synapse/issues/14126))
|
||||
- Cache Rust build cache when building docker images. ([\#14130](https://github.com/matrix-org/synapse/issues/14130))
|
||||
- Enable dependabot for Rust dependencies. ([\#14132](https://github.com/matrix-org/synapse/issues/14132))
|
||||
- Bump typing-extensions from 4.1.1 to 4.4.0. ([\#14134](https://github.com/matrix-org/synapse/issues/14134))
|
||||
- Use the `minimal` Rust profile when building Synapse. ([\#14141](https://github.com/matrix-org/synapse/issues/14141))
|
||||
- Remove unused configuration code. ([\#14142](https://github.com/matrix-org/synapse/issues/14142))
|
||||
- Prepare for the [`gotestfmt` repository move](https://github.com/GoTestTools/gotestfmt/discussions/46). ([\#14144](https://github.com/matrix-org/synapse/issues/14144))
|
||||
- Invalidate rooms for user caches on replicated event, fix sync cache race in synapse workers. Contributed by Nick @ Beeper (@fizzadar). ([\#14155](https://github.com/matrix-org/synapse/issues/14155))
|
||||
- Enable url previews when testing with complement. ([\#14198](https://github.com/matrix-org/synapse/issues/14198))
|
||||
- When authenticating batched events, check for auth events in batch as well as DB. ([\#14214](https://github.com/matrix-org/synapse/issues/14214))
|
||||
- Update CI config to avoid GitHub Actions deprecation warnings. ([\#14216](https://github.com/matrix-org/synapse/issues/14216), [\#14224](https://github.com/matrix-org/synapse/issues/14224))
|
||||
- Update dependency requirements to allow building with poetry-core 1.3.2. ([\#14217](https://github.com/matrix-org/synapse/issues/14217))
|
||||
- Rename the `cache_memory` extra to `cache-memory`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14221](https://github.com/matrix-org/synapse/issues/14221))
|
||||
- Specify dev-dependencies using lower bounds, to reduce the likelihood of a dependabot merge conflict. The lockfile continues to pin to specific versions. ([\#14227](https://github.com/matrix-org/synapse/issues/14227))
|
||||
|
||||
|
||||
Synapse 1.69.0 (2022-10-17)
|
||||
===========================
|
||||
|
||||
Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
|
||||
Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
|
||||
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
|
||||
|
||||
|
||||
No significant changes since 1.69.0rc4.
|
||||
|
||||
|
||||
Synapse 1.69.0rc4 (2022-10-14)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix poor performance of the `event_push_backfill_thread_id` background update, which was introduced in Synapse 1.68.0rc1. ([\#14172](https://github.com/matrix-org/synapse/issues/14172), [\#14181](https://github.com/matrix-org/synapse/issues/14181))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Fix docker build OOMing in CI for arm64 builds. ([\#14173](https://github.com/matrix-org/synapse/issues/14173))
|
||||
|
||||
|
||||
Synapse 1.69.0rc3 (2022-10-12)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix an issue with Docker images causing the Rust dependencies to not be pinned correctly. Introduced in v1.68.0 ([\#14129](https://github.com/matrix-org/synapse/issues/14129))
|
||||
- Fix a bug introduced in Synapse 1.69.0rc1 which would cause registration replication requests to fail if the worker sending the request is not running Synapse 1.69. ([\#14135](https://github.com/matrix-org/synapse/issues/14135))
|
||||
- Fix error in background update when rotating existing notifications. Introduced in v1.69.0rc2. ([\#14138](https://github.com/matrix-org/synapse/issues/14138))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Rename the `url_preview` extra to `url-preview`, for compatability with poetry-core 1.3.0 and [PEP 685](https://peps.python.org/pep-0685/). From-source installations using this extra will need to install using the new name. ([\#14085](https://github.com/matrix-org/synapse/issues/14085))
|
||||
|
||||
|
||||
Synapse 1.69.0rc2 (2022-10-06)
|
||||
==============================
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Deprecate the `generate_short_term_login_token` method in favor of an async `create_login_token` method in the Module API. ([\#13842](https://github.com/matrix-org/synapse/issues/13842))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Ensure Synapse v1.69 works with upcoming database changes in v1.70. ([\#14045](https://github.com/matrix-org/synapse/issues/14045))
|
||||
- Fix a bug introduced in Synapse v1.68.0 where messages could not be sent in rooms with non-integer `notifications` power level. ([\#14073](https://github.com/matrix-org/synapse/issues/14073))
|
||||
- Temporarily pin build-system requirements to workaround an incompatibility with poetry-core 1.3.0. This will be reverted before the v1.69.0 release proper, see [\#14079](https://github.com/matrix-org/synapse/issues/14079). ([\#14080](https://github.com/matrix-org/synapse/issues/14080))
|
||||
|
||||
|
||||
Synapse 1.69.0rc1 (2022-10-04)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
|
||||
- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
|
||||
- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
|
||||
- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
|
||||
- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868))
|
||||
- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
|
||||
- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
|
||||
- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
|
||||
- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
|
||||
- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
|
||||
- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
|
||||
- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
|
||||
- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
|
||||
- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
|
||||
- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
|
||||
- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
|
||||
- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
|
||||
- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
|
||||
- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
|
||||
- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
|
||||
- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
|
||||
- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
|
||||
- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
|
||||
- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
|
||||
- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
|
||||
- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
|
||||
- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
|
||||
- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
|
||||
- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
|
||||
- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
|
||||
- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
|
||||
- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
|
||||
- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
|
||||
- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
|
||||
- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
|
||||
- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
|
||||
- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
|
||||
- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
|
||||
- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
|
||||
- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
|
||||
- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
|
||||
- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
|
||||
- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
|
||||
- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
|
||||
- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
|
||||
- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
|
||||
- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
|
||||
- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
|
||||
- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
|
||||
- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
|
||||
- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
|
||||
- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
|
||||
- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
|
||||
- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
|
||||
- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
|
||||
- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
|
||||
- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
|
||||
- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
|
||||
- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
|
||||
- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
|
||||
- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
|
||||
- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
|
||||
- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
|
||||
- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
|
||||
- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
|
||||
- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
|
||||
- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023))
|
||||
|
||||
|
||||
Synapse 1.68.0 (2022-09-27)
|
||||
===========================
|
||||
|
||||
|
||||
198
Cargo.lock
generated
198
Cargo.lock
generated
@@ -2,6 +2,27 @@
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.66"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6"
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.1.0"
|
||||
@@ -82,26 +103,62 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.132"
|
||||
name = "itoa"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5"
|
||||
checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc"
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.135"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
version = "0.4.7"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53"
|
||||
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.13.1"
|
||||
name = "log"
|
||||
version = "0.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e"
|
||||
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.6.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
@@ -128,22 +185,24 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.43"
|
||||
version = "1.0.46"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab"
|
||||
checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3"
|
||||
version = "0.16.6"
|
||||
version = "0.17.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0220c44442c9b239dd4357aa856ac468a4f5e1f0df19ddb89b2522952eb4c6ca"
|
||||
checksum = "201b6887e5576bf2f945fe65172c1fcbf3fcf285b23e4d71eb171d9736e38d32"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cfg-if",
|
||||
"indoc",
|
||||
"libc",
|
||||
"memoffset",
|
||||
"parking_lot",
|
||||
"pyo3-build-config",
|
||||
"pyo3-ffi",
|
||||
@@ -153,9 +212,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-build-config"
|
||||
version = "0.16.6"
|
||||
version = "0.17.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c819d397859445928609d0ec5afc2da5204e0d0f73d6bf9e153b04e83c9cdc2"
|
||||
checksum = "bf0708c9ed01692635cbf056e286008e5a2927ab1a5e48cdd3aeb1ba5a6fef47"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"target-lexicon",
|
||||
@@ -163,19 +222,30 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-ffi"
|
||||
version = "0.16.6"
|
||||
version = "0.17.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ca882703ab55f54702d7bfe1189b41b0af10272389f04cae38fe4cd56c65f75f"
|
||||
checksum = "90352dea4f486932b72ddf776264d293f85b79a1d214de1d023927b41461132d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"pyo3-build-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.16.6"
|
||||
name = "pyo3-log"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "568749402955ad7be7bad9a09b8593851cd36e549ac90bfd44079cea500f3f21"
|
||||
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"log",
|
||||
"pyo3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros"
|
||||
version = "0.17.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7eb24b804a2d9e88bfcc480a5a6dd76f006c1e3edaf064e8250423336e2cd79d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"pyo3-macros-backend",
|
||||
@@ -185,15 +255,25 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pyo3-macros-backend"
|
||||
version = "0.16.6"
|
||||
version = "0.17.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "611f64e82d98f447787e82b8e7b0ebc681e1eb78fc1252668b2c605ffb4e1eb8"
|
||||
checksum = "f22bb49f6a7348c253d7ac67a6875f2dc65f36c2ae64a82c381d528972bea6d6"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pythonize"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
|
||||
dependencies = [
|
||||
"pyo3",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.21"
|
||||
@@ -212,6 +292,29 @@ dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.6.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
@@ -219,10 +322,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.9.0"
|
||||
name = "serde"
|
||||
version = "1.0.147"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
|
||||
checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.147"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.87"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
@@ -232,9 +366,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.99"
|
||||
version = "1.0.102"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13"
|
||||
checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -245,9 +379,17 @@ dependencies = [
|
||||
name = "synapse"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"blake2",
|
||||
"hex",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"pyo3",
|
||||
"pyo3-log",
|
||||
"pythonize",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -264,9 +406,9 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.3"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf"
|
||||
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
|
||||
|
||||
[[package]]
|
||||
name = "unindent"
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
@@ -15,6 +15,9 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
py_limited_api=True,
|
||||
# We force always building in release mode, as we can't tell the
|
||||
# difference between using `poetry` in development vs production.
|
||||
debug=False,
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
|
||||
@@ -94,20 +94,6 @@ worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
|
||||
### Add Workers to `instance_map`
|
||||
|
||||
Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers:
|
||||
|
||||
```yaml
|
||||
instance_map:
|
||||
synapse-generic-worker-1: # The worker_name setting in your worker configuration file
|
||||
host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file
|
||||
port: 8034 # The port assigned to the replication listener in your worker config file
|
||||
synapse-federation-sender-1:
|
||||
host: synapse-federation-sender-1
|
||||
port: 8034
|
||||
```
|
||||
|
||||
### Configure Federation Senders
|
||||
|
||||
This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them:
|
||||
@@ -122,4 +108,4 @@ federation_sender_instances:
|
||||
|
||||
## Other Worker types
|
||||
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers.
|
||||
|
||||
@@ -5,10 +5,4 @@ worker_name: synapse-federation-sender-1
|
||||
worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
|
||||
worker_log_config: /data/federation_sender.log.config
|
||||
|
||||
@@ -6,10 +6,6 @@ worker_replication_host: synapse
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8034
|
||||
resources:
|
||||
- names: [replication]
|
||||
- type: http
|
||||
port: 8081
|
||||
x_forwarded: true
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
|
||||
###
|
||||
### Prometheus Console Only
|
||||
### The following rules are only needed if you use the Prometheus Console
|
||||
### in contrib/prometheus/consoles/synapse.html
|
||||
###
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
@@ -15,7 +20,6 @@ groups:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
@@ -29,7 +33,6 @@ groups:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
# These 2 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
@@ -38,8 +41,16 @@ groups:
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
###
|
||||
### End of 'Prometheus Console Only' rules block
|
||||
###
|
||||
|
||||
# These 3 rules are used in the included Grafana dashboard
|
||||
|
||||
###
|
||||
### Grafana Only
|
||||
### The following rules are only needed if you use the Grafana dashboard
|
||||
### in contrib/grafana/synapse.json
|
||||
###
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
|
||||
labels:
|
||||
@@ -53,11 +64,11 @@ groups:
|
||||
labels:
|
||||
type: bridges
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
###
|
||||
### End of 'Grafana Only' rules block
|
||||
###
|
||||
|
||||
@@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple `
|
||||
#!/bin/bash
|
||||
for i in {1..5}
|
||||
do
|
||||
cat << EOF >> generic_worker$i.yaml
|
||||
cat << EOF > generic_worker$i.yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker$i
|
||||
|
||||
@@ -15,6 +15,8 @@ worker_name: generic_worker$i
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_main_http_uri: http://localhost:8008/
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 808$i
|
||||
|
||||
74
debian/changelog
vendored
74
debian/changelog
vendored
@@ -1,3 +1,77 @@
|
||||
matrix-synapse-py3 (1.71.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 08 Nov 2022 10:38:10 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 04 Nov 2022 12:00:33 +0000
|
||||
|
||||
matrix-synapse-py3 (1.71.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.71.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Nov 2022 12:10:17 +0000
|
||||
|
||||
matrix-synapse-py3 (1.70.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 28 Oct 2022 12:10:21 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 26 Oct 2022 11:11:50 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 25 Oct 2022 10:59:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.70.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.70.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 19 Oct 2022 14:11:57 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 Oct 2022 11:31:03 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc4) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0rc4.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 14 Oct 2022 15:04:47 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 12 Oct 2022 13:24:04 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.69.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 06 Oct 2022 14:45:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
|
||||
|
||||
* The man page for the hash_password script has been updated to reflect
|
||||
the correct default value of 'bcrypt_rounds'.
|
||||
* New Synapse release 1.69.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
|
||||
|
||||
matrix-synapse-py3 (1.68.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.68.0.
|
||||
|
||||
2
debian/hash_password.1
vendored
2
debian/hash_password.1
vendored
@@ -10,7 +10,7 @@
|
||||
.P
|
||||
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
||||
.P
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
|
||||
.P
|
||||
The hashed password is written on the \fBSTDOUT\fR\.
|
||||
.SH "FILES"
|
||||
|
||||
2
debian/hash_password.ronn
vendored
2
debian/hash_password.ronn
vendored
@@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
|
||||
|
||||
It accepts an YAML file which can be used to specify parameters like the
|
||||
number of rounds for bcrypt and password_config section having the pepper
|
||||
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
|
||||
value used for the hashing. By default `bcrypt_rounds` is set to **12**.
|
||||
|
||||
The hashed password is written on the `STDOUT`.
|
||||
|
||||
|
||||
@@ -106,7 +106,13 @@ ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not
|
||||
# set to true, so we expose it as a build-arg.
|
||||
ARG CARGO_NET_GIT_FETCH_WITH_CLI=false
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project, so that this layer in the Docker cache can be
|
||||
@@ -121,7 +127,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY rust /synapse/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml README.rst build_rust.py /synapse/
|
||||
COPY pyproject.toml README.rst build_rust.py Cargo.toml Cargo.lock /synapse/
|
||||
|
||||
# Repeat of earlier build argument declaration, as this is a new build stage.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
@@ -129,7 +135,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
# Install the synapse package itself.
|
||||
# If we have populated requirements.txt, we don't install any dependencies
|
||||
# as we should already have those from the previous `pip install` step.
|
||||
RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
RUN --mount=type=cache,target=/synapse/target,sharing=locked \
|
||||
--mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \
|
||||
if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
||||
pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \
|
||||
else \
|
||||
pip install --prefix="/install" --no-warn-script-location /synapse[all]; \
|
||||
|
||||
@@ -92,7 +92,7 @@ ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal
|
||||
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
|
||||
|
||||
@@ -40,7 +40,11 @@ FROM matrixdotorg/synapse:$SYNAPSE_VERSION
|
||||
COPY --from=deps_base /etc/nginx /etc/nginx
|
||||
RUN rm /etc/nginx/sites-enabled/default
|
||||
RUN mkdir /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/log/nginx /var/lib/nginx
|
||||
RUN chown www-data /var/lib/nginx
|
||||
|
||||
# have nginx log to stderr/out
|
||||
RUN ln -sf /dev/stdout /var/log/nginx/access.log
|
||||
RUN ln -sf /dev/stderr /var/log/nginx/error.log
|
||||
|
||||
# Copy Synapse worker, nginx and supervisord configuration template files
|
||||
COPY ./docker/conf-workers/* /conf/
|
||||
|
||||
@@ -241,4 +241,4 @@ healthcheck:
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse
|
||||
[README](https://github.com/matrix-org/synapse/blob/HEAD/README.rst#help-synapse-is-slow-and-eats-all-my-ram-cpu).
|
||||
[Admin FAQ](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#help-synapse-is-slow-and-eats-all-my-ramcpu).
|
||||
|
||||
@@ -8,19 +8,15 @@
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
|
||||
# first of all, we create a base image with a postgres server and database,
|
||||
# which we can copy into the target image. For repeated rebuilds, this is
|
||||
# much faster than apt installing postgres each time.
|
||||
#
|
||||
# This trick only works because (a) the Synapse image happens to have all the
|
||||
# shared libraries that postgres wants, (b) we use a postgres image based on
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
|
||||
# now build the final image, based on the Synapse image.
|
||||
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
# copy the postgres installation over from the image we built above
|
||||
# First of all, we copy postgres server from the official postgres image,
|
||||
# since for repeated rebuilds, this is much faster than apt installing
|
||||
# postgres each time.
|
||||
|
||||
# This trick only works because (a) the Synapse image happens to have all the
|
||||
# shared libraries that postgres wants, (b) we use a postgres image based on
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
|
||||
@@ -28,7 +24,7 @@ FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# initialise the database cluster in /var/lib/postgresql
|
||||
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
|
||||
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
|
||||
|
||||
# Configure a password and create a database for Synapse
|
||||
|
||||
@@ -57,6 +57,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
federation_reader, \
|
||||
federation_sender, \
|
||||
synchrotron, \
|
||||
client_reader, \
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
|
||||
@@ -12,6 +12,8 @@ trusted_key_servers: []
|
||||
enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
bcrypt_rounds: 4
|
||||
url_preview_enabled: true
|
||||
url_preview_ip_range_blacklist: []
|
||||
|
||||
## Registration ##
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
# continue to work if so.
|
||||
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
@@ -107,6 +108,34 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"client_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$",
|
||||
"^/_matrix/client/v1/rooms/.*/hierarchy$",
|
||||
"^/_matrix/client/(v1|unstable)/rooms/.*/relations/",
|
||||
"^/_matrix/client/v1/rooms/.*/threads$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/login$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/account/whoami$",
|
||||
"^/_matrix/client/versions$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/register$",
|
||||
"^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/search",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
},
|
||||
"federation_reader": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["federation"],
|
||||
@@ -201,24 +230,19 @@ upstream {upstream_worker_type} {{
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str) -> None:
|
||||
"""Log something to the stdout.
|
||||
|
||||
Args:
|
||||
txt: The text to log.
|
||||
"""
|
||||
print(txt)
|
||||
|
||||
|
||||
def error(txt: str) -> NoReturn:
|
||||
"""Log something and exit with an error code.
|
||||
|
||||
Args:
|
||||
txt: The text to log in error.
|
||||
"""
|
||||
log(txt)
|
||||
print(txt, file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def flush_buffers() -> None:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def convert(src: str, dst: str, **template_vars: object) -> None:
|
||||
"""Generate a file from a template
|
||||
|
||||
@@ -299,7 +323,7 @@ def generate_base_homeserver_config() -> None:
|
||||
# start.py already does this for us, so just call that.
|
||||
# note that this script is copied in in the official, monolith dockerfile
|
||||
os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT)
|
||||
subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"])
|
||||
subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True)
|
||||
|
||||
|
||||
def generate_worker_files(
|
||||
@@ -373,8 +397,8 @@ def generate_worker_files(
|
||||
# No workers, just the main process
|
||||
worker_types = []
|
||||
else:
|
||||
# Split type names by comma
|
||||
worker_types = worker_types_env.split(",")
|
||||
# Split type names by comma, ignoring whitespace.
|
||||
worker_types = [x.strip() for x in worker_types_env.split(",")]
|
||||
|
||||
# Create the worker configuration directory if it doesn't already exist
|
||||
os.makedirs("/conf/workers", exist_ok=True)
|
||||
@@ -393,8 +417,6 @@ def generate_worker_files(
|
||||
|
||||
# For each worker type specified by the user, create config values
|
||||
for worker_type in worker_types:
|
||||
worker_type = worker_type.strip()
|
||||
|
||||
worker_config = WORKERS_CONFIG.get(worker_type)
|
||||
if worker_config:
|
||||
worker_config = worker_config.copy()
|
||||
@@ -604,14 +626,24 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
|
||||
with open(mark_filepath, "w") as f:
|
||||
f.write("")
|
||||
|
||||
# Lifted right out of start.py
|
||||
jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),)
|
||||
|
||||
if os.path.isfile(jemallocpath):
|
||||
environ["LD_PRELOAD"] = jemallocpath
|
||||
else:
|
||||
log("Could not find %s, will not use" % (jemallocpath,))
|
||||
|
||||
# Start supervisord, which will start Synapse, all of the configured worker
|
||||
# processes, redis, nginx etc. according to the config we created above.
|
||||
log("Starting supervisord")
|
||||
os.execl(
|
||||
flush_buffers()
|
||||
os.execle(
|
||||
"/usr/local/bin/supervisord",
|
||||
"supervisord",
|
||||
"-c",
|
||||
"/etc/supervisor/supervisord.conf",
|
||||
environ,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -13,14 +13,19 @@ import jinja2
|
||||
|
||||
# Utility functions
|
||||
def log(txt: str) -> None:
|
||||
print(txt, file=sys.stderr)
|
||||
print(txt)
|
||||
|
||||
|
||||
def error(txt: str) -> NoReturn:
|
||||
log(txt)
|
||||
print(txt, file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def flush_buffers() -> None:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def convert(src: str, dst: str, environ: Mapping[str, object]) -> None:
|
||||
"""Generate a file from a template
|
||||
|
||||
@@ -131,10 +136,10 @@ def generate_config_from_template(
|
||||
|
||||
if ownership is not None:
|
||||
log(f"Setting ownership on /data to {ownership}")
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
subprocess.run(["chown", "-R", ownership, "/data"], check=True)
|
||||
args = ["gosu", ownership] + args
|
||||
|
||||
subprocess.check_output(args)
|
||||
subprocess.run(args, check=True)
|
||||
|
||||
|
||||
def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) -> None:
|
||||
@@ -158,7 +163,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
if ownership is not None:
|
||||
# make sure that synapse has perms to write to the data dir.
|
||||
log(f"Setting ownership on {data_dir} to {ownership}")
|
||||
subprocess.check_output(["chown", ownership, data_dir])
|
||||
subprocess.run(["chown", ownership, data_dir], check=True)
|
||||
|
||||
# create a suitable log config from our template
|
||||
log_config_file = "%s/%s.log.config" % (config_dir, server_name)
|
||||
@@ -185,6 +190,7 @@ def run_generate_config(environ: Mapping[str, str], ownership: Optional[str]) ->
|
||||
"--open-private-ports",
|
||||
]
|
||||
# log("running %s" % (args, ))
|
||||
flush_buffers()
|
||||
os.execv(sys.executable, args)
|
||||
|
||||
|
||||
@@ -267,8 +273,10 @@ running with 'migrate_config'. See the README for more details.
|
||||
args = [sys.executable] + args
|
||||
if ownership is not None:
|
||||
args = ["gosu", ownership] + args
|
||||
flush_buffers()
|
||||
os.execve("/usr/sbin/gosu", args, environ)
|
||||
else:
|
||||
flush_buffers()
|
||||
os.execve(sys.executable, args, environ)
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret)
|
||||
([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
|
||||
in the homeserver configuration), and a one-time nonce. If the registration
|
||||
shared secret is not configured, this API is not enabled.
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ It returns a JSON body like the following:
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
@@ -167,6 +168,7 @@ A response body like the following is returned:
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null,
|
||||
@@ -177,6 +179,7 @@ A response body like the following is returned:
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"erased": false,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>",
|
||||
@@ -247,6 +250,7 @@ The following fields are returned in the JSON response body:
|
||||
- `user_type` - string - Type of the user. Normal users are type `None`.
|
||||
This allows user type specific behaviour. There are also types `support` and `bot`.
|
||||
- `deactivated` - bool - Status if that user has been marked as deactivated.
|
||||
- `erased` - bool - Status if that user has been marked as erased.
|
||||
- `shadow_banned` - bool - Status if that user has been marked as shadow banned.
|
||||
- `displayname` - string - The user's display name if they have set one.
|
||||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
|
||||
@@ -167,6 +167,12 @@ was broken. They are slower than the linters but will typically catch more error
|
||||
poetry run trial tests
|
||||
```
|
||||
|
||||
You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
|
||||
|
||||
```sh
|
||||
poetry run trial -j4 tests
|
||||
```
|
||||
|
||||
If you wish to only run *some* unit tests, you may specify
|
||||
another module instead of `tests` - or a test class or a method:
|
||||
|
||||
@@ -327,7 +333,7 @@ SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/compleme
|
||||
### Prettier formatting with `gotestfmt`
|
||||
|
||||
If you want to format the output of the tests the same way as it looks in CI,
|
||||
install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt).
|
||||
install [gotestfmt](https://github.com/GoTestTools/gotestfmt).
|
||||
|
||||
You can then use this incantation to format the tests appropriately:
|
||||
|
||||
@@ -384,7 +390,7 @@ This file will become part of our [changelog](
|
||||
https://github.com/matrix-org/synapse/blob/master/CHANGES.md) at the next
|
||||
release, so the content of the file should be a short description of your
|
||||
change in the same style as the rest of the changelog. The file can contain Markdown
|
||||
formatting, and should end with a full stop (.) or an exclamation mark (!) for
|
||||
formatting, and must end with a full stop (.) or an exclamation mark (!) for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
|
||||
@@ -195,23 +195,24 @@ There are three separate aspects to this:
|
||||
|
||||
## `event_id` global uniqueness
|
||||
|
||||
In room versions `1` and `2` it's possible to end up with two events with the
|
||||
same `event_id` (in the same or different rooms). After room version `3`, that
|
||||
can only happen with a hash collision, which we basically hope will never
|
||||
happen.
|
||||
|
||||
There are several places in Synapse and even Matrix APIs like [`GET
|
||||
`event_id`'s can be considered globally unique although there has been a lot of
|
||||
debate on this topic in places like
|
||||
[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
|
||||
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
|
||||
has no resolution yet (as of 2022-09-01). There are several places in Synapse
|
||||
and even in the Matrix APIs like [`GET
|
||||
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
|
||||
where we assume that event IDs are globally unique.
|
||||
|
||||
But hash collisions are still possible, and by treating event IDs as room
|
||||
scoped, we can reduce the possibility of a hash collision. When scoping
|
||||
`event_id` in the database schema, it should be also accompanied by `room_id`
|
||||
(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
|
||||
`(room_id, event_id)`.
|
||||
When scoping `event_id` in a database schema, it is often nice to accompany it
|
||||
with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
|
||||
REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
|
||||
makes it very easy to find and clean up everything in a room when it needs to be
|
||||
purged (no need to use sub-`select` query or join from the `events` table).
|
||||
|
||||
A note on collisions: In room versions `1` and `2` it's possible to end up with
|
||||
two events with the same `event_id` (in the same or different rooms). After room
|
||||
version `3`, that can only happen with a hash collision, which we basically hope
|
||||
will never happen (SHA256 has a massive big key space).
|
||||
|
||||
There has been a lot of debate on this in places like
|
||||
https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
|
||||
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
|
||||
has no resolution yet (as of 2022-09-01).
|
||||
|
||||
|
||||
@@ -16,14 +16,21 @@
|
||||
There are two methods of enabling the metrics endpoint in Synapse.
|
||||
|
||||
The first serves the metrics as a part of the usual web server and
|
||||
can be enabled by adding the \"metrics\" resource to the existing
|
||||
listener as such:
|
||||
can be enabled by adding the `metrics` resource to the existing
|
||||
listener as such as in this example:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- metrics
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
|
||||
resources:
|
||||
# added "metrics" in this line
|
||||
- names: [client, federation, metrics]
|
||||
compress: false
|
||||
```
|
||||
|
||||
This provides a simple way of adding metrics to your Synapse
|
||||
@@ -37,14 +44,24 @@
|
||||
to just internal networks easier. The served metrics are available
|
||||
over HTTP only, and will be available at `/_synapse/metrics`.
|
||||
|
||||
Add a new listener to homeserver.yaml:
|
||||
Add a new listener to homeserver.yaml as in this example:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- type: metrics
|
||||
port: 9000
|
||||
bind_addresses:
|
||||
- '0.0.0.0'
|
||||
listeners:
|
||||
- port: 8008
|
||||
tls: false
|
||||
type: http
|
||||
x_forwarded: true
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
|
||||
# beginning of the new metrics listener
|
||||
- port: 9000
|
||||
type: metrics
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
```
|
||||
|
||||
1. Restart Synapse.
|
||||
@@ -135,6 +152,8 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
|
||||
convention of the upstream `prometheus_client`. The old names are
|
||||
considered deprecated and will be removed in a future version of
|
||||
Synapse.
|
||||
**The old names will be disabled by default in Synapse v1.71.0 and removed
|
||||
altogether in Synapse v1.73.0.**
|
||||
|
||||
| New Name | Old Name |
|
||||
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
@@ -146,6 +165,13 @@ Synapse.
|
||||
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
||||
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
||||
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
||||
| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
|
||||
| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
|
||||
| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
|
||||
| synapse_util_caches_cache | synapse_util_caches_cache:total |
|
||||
| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
|
||||
| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
|
||||
| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
|
||||
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
||||
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
||||
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
||||
@@ -183,6 +209,9 @@ Synapse.
|
||||
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||
| synapse_admin_mau_current | synapse_admin_mau:current |
|
||||
| synapse_admin_mau_max | synapse_admin_mau:max |
|
||||
| synapse_admin_mau_registered_reserved_users | synapse_admin_mau:registered_reserved_users |
|
||||
|
||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||
---------------------------------------------------------------------------------
|
||||
@@ -261,7 +290,7 @@ Standard Metric Names
|
||||
|
||||
As of synapse version 0.18.2, the format of the process-wide metrics has
|
||||
been changed to fit prometheus standard naming conventions. Additionally
|
||||
the units have been changed to seconds, from miliseconds.
|
||||
the units have been changed to seconds, from milliseconds.
|
||||
|
||||
| New name | Old name |
|
||||
| ---------------------------------------- | --------------------------------- |
|
||||
|
||||
@@ -49,6 +49,13 @@ setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## OIDC Back-Channel Logout
|
||||
|
||||
Synapse supports receiving [OpenID Connect Back-Channel Logout](https://openid.net/specs/openid-connect-backchannel-1_0.html) notifications.
|
||||
|
||||
This lets the OpenID Connect Provider notify Synapse when a user logs out, so that Synapse can end that user session.
|
||||
This feature can be enabled by setting the `backchannel_logout_enabled` property to `true` in the provider configuration, and setting the following URL as destination for Back-Channel Logout notifications in your OpenID Connect Provider: `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout`
|
||||
|
||||
## Sample configs
|
||||
|
||||
Here are a few configs for providers that should work with Synapse.
|
||||
@@ -123,6 +130,9 @@ oidc_providers:
|
||||
|
||||
[Keycloak][keycloak-idp] is an opensource IdP maintained by Red Hat.
|
||||
|
||||
Keycloak supports OIDC Back-Channel Logout, which sends logout notification to Synapse, so that Synapse users get logged out when they log out from Keycloak.
|
||||
This can be optionally enabled by setting `backchannel_logout_enabled` to `true` in the Synapse configuration, and by setting the "Backchannel Logout URL" in Keycloak.
|
||||
|
||||
Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to install Keycloak and set up a realm.
|
||||
|
||||
1. Click `Clients` in the sidebar and click `Create`
|
||||
@@ -144,6 +154,8 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
| Client Protocol | `openid-connect` |
|
||||
| Access Type | `confidential` |
|
||||
| Valid Redirect URIs | `[synapse public baseurl]/_synapse/client/oidc/callback` |
|
||||
| Backchannel Logout URL (optional) | `[synapse public baseurl]/_synapse/client/oidc/backchannel_logout` |
|
||||
| Backchannel Logout Session Required (optional) | `On` |
|
||||
|
||||
5. Click `Save`
|
||||
6. On the Credentials tab, update the fields:
|
||||
@@ -167,7 +179,9 @@ oidc_providers:
|
||||
config:
|
||||
localpart_template: "{{ user.preferred_username }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
backchannel_logout_enabled: true # Optional
|
||||
```
|
||||
|
||||
### Auth0
|
||||
|
||||
[Auth0][auth0] is a hosted SaaS IdP solution.
|
||||
@@ -336,11 +350,12 @@ oidc_providers:
|
||||
issuer: "https://accounts.google.com/"
|
||||
client_id: "your-client-id" # TO BE FILLED
|
||||
client_secret: "your-client-secret" # TO BE FILLED
|
||||
scopes: ["openid", "profile"]
|
||||
scopes: ["openid", "profile", "email"] # email is optional, read below
|
||||
user_mapping_provider:
|
||||
config:
|
||||
localpart_template: "{{ user.given_name|lower }}"
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ user.email }}" # needs "email" in scopes above
|
||||
```
|
||||
4. Back in the Google console, add this Authorized redirect URI: `[synapse
|
||||
public baseurl]/_synapse/client/oidc/callback`.
|
||||
@@ -423,7 +438,7 @@ Synapse config:
|
||||
user_mapping_provider:
|
||||
config:
|
||||
display_name_template: "{{ user.name }}"
|
||||
email_template: "{{ '{{ user.email }}' }}"
|
||||
email_template: "{{ user.email }}"
|
||||
```
|
||||
|
||||
Relevant documents:
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
|
||||
@@ -181,7 +181,7 @@ doas pkg_add synapse
|
||||
#### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/synapse.nix>
|
||||
|
||||
|
||||
### Installing as a Python module from PyPI
|
||||
|
||||
@@ -73,8 +73,8 @@ A custom mapping provider must specify the following methods:
|
||||
* `async def map_user_attributes(self, userinfo, token, failures)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- `userinfo` - An [`authlib.oidc.core.claims.UserInfo`](https://docs.authlib.org/en/latest/specs/oidc.html#authlib.oidc.core.UserInfo)
|
||||
object to extract user information from.
|
||||
- `token` - A dictionary which includes information necessary to make
|
||||
further requests to the OpenID provider.
|
||||
- `failures` - An `int` that represents the amount of times the returned
|
||||
@@ -91,7 +91,13 @@ A custom mapping provider must specify the following methods:
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
- `displayname`: An optional string, the display name for the user.
|
||||
- `confirm_localpart`: A boolean. If set to `True`, when a `localpart`
|
||||
string is returned from this method, Synapse will prompt the user to
|
||||
either accept this localpart or pick their own username. Otherwise this
|
||||
option has no effect. If omitted, defaults to `False`.
|
||||
- `display_name`: An optional string, the display name for the user.
|
||||
- `emails`: A list of strings, the email address(es) to associate with
|
||||
this user. If omitted, defaults to an empty list.
|
||||
* `async def get_extra_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
|
||||
8
docs/systemd-with-workers/workers/federation_sender.yaml
Normal file
8
docs/systemd-with-workers/workers/federation_sender.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
14
docs/systemd-with-workers/workers/media_worker.yaml
Normal file
14
docs/systemd-with-workers/workers/media_worker.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
resources:
|
||||
- names: [media]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/media-worker-log.yaml
|
||||
8
docs/systemd-with-workers/workers/pusher_worker.yaml
Normal file
8
docs/systemd-with-workers/workers/pusher_worker.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
146
docs/upgrade.md
146
docs/upgrade.md
@@ -88,13 +88,147 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.71.0
|
||||
|
||||
## Removal of the `generate_short_term_login_token` module API method
|
||||
|
||||
As announced with the release of [Synapse 1.69.0](#deprecation-of-the-generate_short_term_login_token-module-api-method), the deprecated `generate_short_term_login_token` module method has been removed.
|
||||
|
||||
Modules relying on it can instead use the `create_login_token` method.
|
||||
|
||||
|
||||
## Changes to the events received by application services (interest)
|
||||
|
||||
To align with spec (changed in
|
||||
[MSC3905](https://github.com/matrix-org/matrix-spec-proposals/pull/3905)), Synapse now
|
||||
only considers local users to be interesting. In other words, the `users` namespace
|
||||
regex is only be applied against local users of the homeserver.
|
||||
|
||||
Please note, this probably doesn't affect the expected behavior of your application
|
||||
service, since an interesting local user in a room still means all messages in the room
|
||||
(from local or remote users) will still be considered interesting. And matching a room
|
||||
with the `rooms` or `aliases` namespace regex will still consider all events sent in the
|
||||
room to be interesting to the application service.
|
||||
|
||||
If one of your application service's `users` regex was intending to match a remote user,
|
||||
this will no longer match as you expect. The behavioral mismatch between matching all
|
||||
local users and some remote users is why the spec was changed/clarified and this
|
||||
caveat is no longer supported.
|
||||
|
||||
|
||||
## Legacy Prometheus metric names are now disabled by default
|
||||
|
||||
Synapse v1.71.0 disables legacy Prometheus metric names by default.
|
||||
For administrators that still rely on them and have not yet had chance to update their
|
||||
uses of the metrics, it's still possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and at that point,
|
||||
it will no longer be possible to re-enable them.
|
||||
|
||||
If you do not use metrics or you have already updated your Grafana dashboard(s),
|
||||
Prometheus console(s) and alerting rule(s), there is no action needed.
|
||||
|
||||
See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names).
|
||||
|
||||
|
||||
# Upgrading to v1.69.0
|
||||
|
||||
## Changes to the receipts replication streams
|
||||
|
||||
Synapse now includes information indicating if a receipt applies to a thread when
|
||||
replicating it to other workers. This is a forwards- and backwards-incompatible
|
||||
change: v1.68 and workers cannot process receipts replicated by v1.69 workers, and
|
||||
vice versa.
|
||||
|
||||
Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
|
||||
replication will resume as normal.
|
||||
|
||||
|
||||
## Deprecation of legacy Prometheus metric names
|
||||
|
||||
In current versions of Synapse, some Prometheus metrics are emitted under two different names,
|
||||
with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
|
||||
and one of the names being newer but compliant.
|
||||
|
||||
Synapse v1.71.0 will turn the old metric names off *by default*.
|
||||
For administrators that still rely on them and have not had chance to update their
|
||||
uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
|
||||
the configuration to re-enable them temporarily.
|
||||
|
||||
Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
|
||||
be possible to re-enable them.
|
||||
|
||||
The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
|
||||
in the `contrib` directory in the Synapse repository have been updated to no longer
|
||||
rely on the legacy names. These can be used on a current version of Synapse
|
||||
because current versions of Synapse emit both old and new names.
|
||||
|
||||
You may need to update your alerting rules or any other rules that depend on
|
||||
the names of Prometheus metrics.
|
||||
If you want to test your changes before legacy names are disabled by default,
|
||||
you may specify `enable_legacy_metrics: false` in your homeserver configuration.
|
||||
|
||||
A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
|
||||
|
||||
|
||||
## Deprecation of the `generate_short_term_login_token` module API method
|
||||
|
||||
The following method of the module API has been deprecated, and is scheduled to
|
||||
be remove in v1.71.0:
|
||||
|
||||
```python
|
||||
def generate_short_term_login_token(
|
||||
self,
|
||||
user_id: str,
|
||||
duration_in_ms: int = (2 * 60 * 1000),
|
||||
auth_provider_id: str = "",
|
||||
auth_provider_session_id: Optional[str] = None,
|
||||
) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
It has been replaced by an asynchronous equivalent:
|
||||
|
||||
```python
|
||||
async def create_login_token(
|
||||
self,
|
||||
user_id: str,
|
||||
duration_in_ms: int = (2 * 60 * 1000),
|
||||
auth_provider_id: Optional[str] = None,
|
||||
auth_provider_session_id: Optional[str] = None,
|
||||
) -> str:
|
||||
...
|
||||
```
|
||||
|
||||
Synapse will log a warning when a module uses the deprecated method, to help
|
||||
administrators find modules using it.
|
||||
|
||||
|
||||
# Upgrading to v1.68.0
|
||||
|
||||
As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite
|
||||
version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse
|
||||
now require a recent Rust compiler.
|
||||
Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
|
||||
|
||||
Installations using
|
||||
## SQLite version requirement
|
||||
|
||||
Synapse now requires a SQLite version of 3.27.0 or higher if SQLite is configured as
|
||||
Synapse's database.
|
||||
|
||||
Installations using
|
||||
|
||||
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
|
||||
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
|
||||
- a PostgreSQL database
|
||||
|
||||
are not affected.
|
||||
|
||||
## Rust requirement when building from source.
|
||||
|
||||
Building from a source checkout of Synapse now requires a recent Rust compiler
|
||||
(currently Rust 1.58.1, but see also the
|
||||
[Platform Dependency Policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html)).
|
||||
|
||||
Installations using
|
||||
|
||||
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
|
||||
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
|
||||
@@ -134,12 +268,12 @@ The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
|
||||
|
||||
## SQLite version requirement in the next release
|
||||
|
||||
From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
|
||||
From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
|
||||
higher. Synapse v1.67.0 will be the last major release supporting SQLite
|
||||
versions 3.22 to 3.26.
|
||||
|
||||
Those using Docker images or Debian packages from Matrix.org will not be
|
||||
affected. If you have installed from source, you should check the version of
|
||||
affected. If you have installed from source, you should check the version of
|
||||
SQLite used by Python with:
|
||||
|
||||
```shell
|
||||
|
||||
@@ -73,12 +73,12 @@ When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMI
|
||||
|
||||
Synapse records several different prometheus metrics for MAU.
|
||||
|
||||
`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
|
||||
`synapse_admin_mau_current` records the current MAU figure for native (non-application-service) users.
|
||||
|
||||
`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
`synapse_admin_mau_max` records the maximum MAU as dictated by the `max_mau_value` config value.
|
||||
|
||||
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
|
||||
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
|
||||
|
||||
`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
`synapse_admin_mau_registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
|
||||
registered accounts on the homeserver.
|
||||
|
||||
@@ -99,7 +99,7 @@ modules:
|
||||
config: {}
|
||||
```
|
||||
---
|
||||
## Server ##
|
||||
## Server
|
||||
|
||||
Define your homeserver name and other base options.
|
||||
|
||||
@@ -159,7 +159,7 @@ including _matrix/...). This is the same URL a user might enter into the
|
||||
'Custom Homeserver URL' field on their client. If you use Synapse with a
|
||||
reverse proxy, this should be the URL to reach Synapse via the proxy.
|
||||
Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
|
||||
'listeners' below).
|
||||
['listeners'](#listeners) below).
|
||||
|
||||
Defaults to `https://<server_name>/`.
|
||||
|
||||
@@ -179,7 +179,7 @@ This will tell other servers to send traffic to port 443 instead.
|
||||
|
||||
This option currently defaults to false.
|
||||
|
||||
See https://matrix-org.github.io/synapse/latest/delegate.html for more
|
||||
See [Delegation of incoming federation traffic](../../delegate.md) for more
|
||||
information.
|
||||
|
||||
Example configuration:
|
||||
@@ -570,7 +570,7 @@ Example configuration:
|
||||
delete_stale_devices_after: 1y
|
||||
```
|
||||
|
||||
## Homeserver blocking ##
|
||||
## Homeserver blocking
|
||||
Useful options for Synapse admins.
|
||||
|
||||
---
|
||||
@@ -922,7 +922,7 @@ retention:
|
||||
interval: 1d
|
||||
```
|
||||
---
|
||||
## TLS ##
|
||||
## TLS
|
||||
|
||||
Options related to TLS.
|
||||
|
||||
@@ -1012,7 +1012,7 @@ federation_custom_ca_list:
|
||||
- myCA3.pem
|
||||
```
|
||||
---
|
||||
## Federation ##
|
||||
## Federation
|
||||
|
||||
Options related to federation.
|
||||
|
||||
@@ -1071,7 +1071,7 @@ Example configuration:
|
||||
allow_device_name_lookup_over_federation: true
|
||||
```
|
||||
---
|
||||
## Caching ##
|
||||
## Caching
|
||||
|
||||
Options related to caching.
|
||||
|
||||
@@ -1139,7 +1139,7 @@ number of entries that can be stored.
|
||||
|
||||
* `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and
|
||||
`min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory
|
||||
usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu)
|
||||
usage and cache entry availability. You must be using [jemalloc](../administration/admin_faq.md#help-synapse-is-slow-and-eats-all-my-ramcpu)
|
||||
to utilize this option, and all three of the options must be specified for this feature to work. This option
|
||||
defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work
|
||||
and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided.
|
||||
@@ -1185,7 +1185,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
|
||||
`systemctl reload matrix-synapse`.
|
||||
|
||||
---
|
||||
## Database ##
|
||||
## Database
|
||||
Config options related to database settings.
|
||||
|
||||
---
|
||||
@@ -1332,20 +1332,21 @@ databases:
|
||||
cp_max: 10
|
||||
```
|
||||
---
|
||||
## Logging ##
|
||||
## Logging
|
||||
Config options related to logging.
|
||||
|
||||
---
|
||||
### `log_config`
|
||||
|
||||
This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema).
|
||||
This option specifies a yaml python logging config file as described
|
||||
[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
log_config: "CONFDIR/SERVERNAME.log.config"
|
||||
```
|
||||
---
|
||||
## Ratelimiting ##
|
||||
## Ratelimiting
|
||||
Options related to ratelimiting in Synapse.
|
||||
|
||||
Each ratelimiting configuration is made of two parameters:
|
||||
@@ -1576,7 +1577,7 @@ Example configuration:
|
||||
federation_rr_transactions_per_room_per_second: 40
|
||||
```
|
||||
---
|
||||
## Media Store ##
|
||||
## Media Store
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
@@ -1766,7 +1767,7 @@ url_preview_ip_range_blacklist:
|
||||
- 'ff00::/8'
|
||||
- 'fec0::/10'
|
||||
```
|
||||
----
|
||||
---
|
||||
### `url_preview_ip_range_whitelist`
|
||||
|
||||
This option sets a list of IP address CIDR ranges that the URL preview spider is allowed
|
||||
@@ -1860,7 +1861,7 @@ Example configuration:
|
||||
- 'fr;q=0.8'
|
||||
- '*;q=0.7'
|
||||
```
|
||||
----
|
||||
---
|
||||
### `oembed`
|
||||
|
||||
oEmbed allows for easier embedding content from a website. It can be
|
||||
@@ -1877,7 +1878,7 @@ oembed:
|
||||
- oembed/my_providers.json
|
||||
```
|
||||
---
|
||||
## Captcha ##
|
||||
## Captcha
|
||||
|
||||
See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
|
||||
|
||||
@@ -1926,7 +1927,7 @@ Example configuration:
|
||||
recaptcha_siteverify_api: "https://my.recaptcha.site"
|
||||
```
|
||||
---
|
||||
## TURN ##
|
||||
## TURN
|
||||
Options related to adding a TURN server to Synapse.
|
||||
|
||||
---
|
||||
@@ -1947,7 +1948,7 @@ Example configuration:
|
||||
```yaml
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
```
|
||||
----
|
||||
---
|
||||
### `turn_username` and `turn_password`
|
||||
|
||||
The Username and password if the TURN server needs them and does not use a token.
|
||||
@@ -2088,7 +2089,7 @@ set.
|
||||
|
||||
This is primarily intended for use with the `register_new_matrix_user` script
|
||||
(see [Registering a user](../../setup/installation.md#registering-a-user));
|
||||
however, the interface is [documented](../admin_api/register_api.html).
|
||||
however, the interface is [documented](../../admin_api/register_api.html).
|
||||
|
||||
See also [`registration_shared_secret_path`](#registration_shared_secret_path).
|
||||
|
||||
@@ -2229,6 +2230,9 @@ homeserver. If the room already exists, make certain it is a publicly joinable
|
||||
room, i.e. the join rule of the room must be set to 'public'. You can find more options
|
||||
relating to auto-joining rooms below.
|
||||
|
||||
As Spaces are just rooms under the hood, Space aliases may also be
|
||||
used.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
auto_join_rooms:
|
||||
@@ -2240,7 +2244,7 @@ auto_join_rooms:
|
||||
|
||||
Where `auto_join_rooms` are specified, setting this flag ensures that
|
||||
the rooms exist by creating them when the first user on the
|
||||
homeserver registers.
|
||||
homeserver registers. This option will not create Spaces.
|
||||
|
||||
By default the auto-created rooms are publicly joinable from any federated
|
||||
server. Use the `autocreate_auto_join_rooms_federated` and
|
||||
@@ -2258,7 +2262,7 @@ autocreate_auto_join_rooms: false
|
||||
---
|
||||
### `autocreate_auto_join_rooms_federated`
|
||||
|
||||
Whether the rooms listen in `auto_join_rooms` that are auto-created are available
|
||||
Whether the rooms listed in `auto_join_rooms` that are auto-created are available
|
||||
via federation. Only has an effect if `autocreate_auto_join_rooms` is true.
|
||||
|
||||
Note that whether a room is federated cannot be modified after
|
||||
@@ -2363,7 +2367,7 @@ Example configuration:
|
||||
```yaml
|
||||
session_lifetime: 24h
|
||||
```
|
||||
----
|
||||
---
|
||||
### `refresh_access_token_lifetime`
|
||||
|
||||
Time that an access token remains valid for, if the session is using refresh tokens.
|
||||
@@ -2419,7 +2423,7 @@ nonrefreshable_access_token_lifetime: 24h
|
||||
```
|
||||
|
||||
---
|
||||
## Metrics ###
|
||||
## Metrics
|
||||
Config options related to metrics.
|
||||
|
||||
---
|
||||
@@ -2433,6 +2437,31 @@ Example configuration:
|
||||
enable_metrics: true
|
||||
```
|
||||
---
|
||||
### `enable_legacy_metrics`
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `false`. Has no effect if `enable_metrics` is `false`.
|
||||
**In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
|
||||
**Will be removed in v1.73.0.**
|
||||
---
|
||||
### `sentry`
|
||||
|
||||
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
|
||||
@@ -2491,7 +2520,7 @@ Example configuration:
|
||||
report_stats_endpoint: https://example.com/report-usage-stats/push
|
||||
```
|
||||
---
|
||||
## API Configuration ##
|
||||
## API Configuration
|
||||
Config settings related to the client/server API
|
||||
|
||||
---
|
||||
@@ -2591,7 +2620,7 @@ Example configuration:
|
||||
form_secret: <PRIVATE STRING>
|
||||
```
|
||||
---
|
||||
## Signing Keys ##
|
||||
## Signing Keys
|
||||
Config options relating to signing keys
|
||||
|
||||
---
|
||||
@@ -2652,6 +2681,12 @@ is still supported for backwards-compatibility, but it is deprecated.
|
||||
warning on start-up. To suppress this warning, set
|
||||
`suppress_key_server_warning` to true.
|
||||
|
||||
If the use of a trusted key server has to be deactivated, e.g. in a private
|
||||
federation or for privacy reasons, this can be realised by setting
|
||||
an empty array (`trusted_key_servers: []`). Then Synapse will request the keys
|
||||
directly from the server that owns the keys. If Synapse does not get keys directly
|
||||
from the server, the events of this server will be rejected.
|
||||
|
||||
Options for each entry in the list include:
|
||||
* `server_name`: the name of the server. Required.
|
||||
* `verify_keys`: an optional map from key id to base64-encoded public key.
|
||||
@@ -2700,7 +2735,7 @@ Example configuration:
|
||||
key_server_signing_keys_path: "key_server_signing_keys.key"
|
||||
```
|
||||
---
|
||||
## Single sign-on integration ##
|
||||
## Single sign-on integration
|
||||
|
||||
The following settings can be used to make Synapse use a single sign-on
|
||||
provider for authentication, instead of its internal password database.
|
||||
@@ -2949,7 +2984,7 @@ Options for each entry include:
|
||||
|
||||
* `module`: The class name of a custom mapping module. Default is
|
||||
`synapse.handlers.oidc.JinjaOidcMappingProvider`.
|
||||
See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
|
||||
See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers)
|
||||
for information on implementing a custom mapping provider.
|
||||
|
||||
* `config`: Configuration for the mapping provider module. This section will
|
||||
@@ -2986,6 +3021,15 @@ Options for each entry include:
|
||||
which is set to the claims returned by the UserInfo Endpoint and/or
|
||||
in the ID Token.
|
||||
|
||||
* `backchannel_logout_enabled`: set to `true` to process OIDC Back-Channel Logout notifications.
|
||||
Those notifications are expected to be received on `/_synapse/client/oidc/backchannel_logout`.
|
||||
Defaults to `false`.
|
||||
|
||||
* `backchannel_logout_ignore_sub`: by default, the OIDC Back-Channel Logout feature checks that the
|
||||
`sub` claim matches the subject claim received during login. This check can be disabled by setting
|
||||
this to `true`. Defaults to `false`.
|
||||
|
||||
You might want to disable this if the `subject_claim` returned by the mapping provider is not `sub`.
|
||||
|
||||
It is possible to configure Synapse to only allow logins if certain attributes
|
||||
match particular values in the OIDC userinfo. The requirements can be listed under
|
||||
@@ -3320,7 +3364,7 @@ email:
|
||||
email_validation: "[%(server_name)s] Validate your email"
|
||||
```
|
||||
---
|
||||
## Push ##
|
||||
## Push
|
||||
Configuration settings related to push notifications
|
||||
|
||||
---
|
||||
@@ -3353,11 +3397,11 @@ push:
|
||||
group_unread_count_by_room: false
|
||||
```
|
||||
---
|
||||
## Rooms ##
|
||||
## Rooms
|
||||
Config options relating to rooms.
|
||||
|
||||
---
|
||||
### `encryption_enabled_by_default`
|
||||
### `encryption_enabled_by_default_for_room_type`
|
||||
|
||||
Controls whether locally-created rooms should be end-to-end encrypted by
|
||||
default.
|
||||
@@ -3390,13 +3434,15 @@ This option has the following sub-options:
|
||||
the user directory. If false, search results will only contain users
|
||||
visible in public rooms and users sharing a room with the requester.
|
||||
Defaults to false.
|
||||
|
||||
NB. If you set this to true, and the last time the user_directory search
|
||||
indexes were (re)built was before Synapse 1.44, you'll have to
|
||||
rebuild the indexes in order to search through all known users.
|
||||
|
||||
These indexes are built the first time Synapse starts; admins can
|
||||
manually trigger a rebuild via API following the instructions at
|
||||
https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
|
||||
Set to true to return search results containing all known users, even if that
|
||||
manually trigger a rebuild via the API following the instructions
|
||||
[for running background updates](../administration/admin_api/background_updates.md#run),
|
||||
set to true to return search results containing all known users, even if that
|
||||
user does not share a room with the requester.
|
||||
* `prefer_local_users`: Defines whether to prefer local users in search query results.
|
||||
If set to true, local users are more likely to appear above remote users when searching the
|
||||
@@ -3511,9 +3557,9 @@ Example configuration:
|
||||
enable_room_list_search: false
|
||||
```
|
||||
---
|
||||
### `alias_creation`
|
||||
### `alias_creation_rules`
|
||||
|
||||
The `alias_creation` option controls who is allowed to create aliases
|
||||
The `alias_creation_rules` option controls who is allowed to create aliases
|
||||
on this server.
|
||||
|
||||
The format of this option is a list of rules that contain globs that
|
||||
@@ -3597,7 +3643,7 @@ default_power_level_content_override:
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing ##
|
||||
## Opentracing
|
||||
Configuration options related to Opentracing support.
|
||||
|
||||
---
|
||||
@@ -3640,14 +3686,71 @@ opentracing:
|
||||
false
|
||||
```
|
||||
---
|
||||
## Workers ##
|
||||
Configuration options related to workers.
|
||||
## Coordinating workers
|
||||
Configuration options related to workers which belong in the main config file
|
||||
(usually called `homeserver.yaml`).
|
||||
A Synapse deployment can scale horizontally by running multiple Synapse processes
|
||||
called _workers_. Incoming requests are distributed between workers to handle higher
|
||||
loads. Some workers are privileged and can accept requests from other workers.
|
||||
|
||||
As a result, the worker configuration is divided into two parts.
|
||||
|
||||
1. The first part (in this section of the manual) defines which shardable tasks
|
||||
are delegated to privileged workers. This allows unprivileged workers to make
|
||||
request a privileged worker to act on their behalf.
|
||||
1. [The second part](#individual-worker-configuration)
|
||||
controls the behaviour of individual workers in isolation.
|
||||
|
||||
For guidance on setting up workers, see the [worker documentation](../../workers.md).
|
||||
|
||||
---
|
||||
### `worker_replication_secret`
|
||||
|
||||
A shared secret used by the replication APIs on the main process to authenticate
|
||||
HTTP requests from workers.
|
||||
|
||||
The default, this value is omitted (equivalently `null`), which means that
|
||||
traffic between the workers and the main process is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
---
|
||||
### `start_pushers`
|
||||
|
||||
Controls sending of push notifications on the main process. Set to `false`
|
||||
if using a [pusher worker](../../workers.md#synapseapppusher). Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
start_pushers: false
|
||||
```
|
||||
---
|
||||
### `pusher_instances`
|
||||
|
||||
It is possible to run multiple [pusher workers](../../workers.md#synapseapppusher),
|
||||
in which case the work is balanced across them. Use this setting to list the pushers by
|
||||
[`worker_name`](#worker_name). Ensure the main process and all pusher workers are
|
||||
restarted after changing this option.
|
||||
|
||||
If no or only one pusher worker is configured, this setting is not necessary.
|
||||
The main process will send out push notifications by default if you do not disable
|
||||
it by setting [`start_pushers: false`](#start_pushers).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
---
|
||||
### `send_federation`
|
||||
|
||||
Controls sending of outbound federation transactions on the main process.
|
||||
Set to false if using a federation sender worker. Defaults to true.
|
||||
Set to `false` if using a [federation sender worker](../../workers.md#synapseappfederation_sender).
|
||||
Defaults to `true`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3656,8 +3759,9 @@ send_federation: false
|
||||
---
|
||||
### `federation_sender_instances`
|
||||
|
||||
It is possible to run multiple federation sender workers, in which case the
|
||||
work is balanced across them. Use this setting to list the senders.
|
||||
It is possible to run multiple
|
||||
[federation sender worker](../../workers.md#synapseappfederation_sender), in which
|
||||
case the work is balanced across them. Use this setting to list the senders.
|
||||
|
||||
This configuration setting must be shared between all federation sender workers, and if
|
||||
changed all federation sender workers must be stopped at the same time and then
|
||||
@@ -3666,14 +3770,19 @@ events may be dropped).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
```
|
||||
---
|
||||
### `instance_map`
|
||||
|
||||
When using workers this should be a map from worker name to the
|
||||
When using workers this should be a map from [`worker_name`](#worker_name) to the
|
||||
HTTP replication listener of the worker, if configured.
|
||||
Each worker declared under [`stream_writers`](../../workers.md#stream-writers) needs
|
||||
a HTTP replication listener, and that listener should be included in the `instance_map`.
|
||||
(The main process also needs an HTTP replication listener, but it should not be
|
||||
listed in the `instance_map`.)
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3686,8 +3795,11 @@ instance_map:
|
||||
### `stream_writers`
|
||||
|
||||
Experimental: When using workers you can define which workers should
|
||||
handle event persistence and typing notifications. Any worker
|
||||
specified here must also be in the `instance_map`.
|
||||
handle writing to streams such as event persistence and typing notifications.
|
||||
Any worker specified here must also be in the [`instance_map`](#instance_map).
|
||||
|
||||
See the list of available streams in the
|
||||
[worker documentation](../../workers.md#stream-writers).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -3698,29 +3810,18 @@ stream_writers:
|
||||
---
|
||||
### `run_background_tasks_on`
|
||||
|
||||
The worker that is used to run background tasks (e.g. cleaning up expired
|
||||
data). If not provided this defaults to the main process.
|
||||
The [worker](../../workers.md#background-tasks) that is used to run
|
||||
background tasks (e.g. cleaning up expired data). If not provided this
|
||||
defaults to the main process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
run_background_tasks_on: worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_secret`
|
||||
|
||||
A shared secret used by the replication APIs to authenticate HTTP requests
|
||||
from workers.
|
||||
|
||||
By default this is unused and traffic is not authenticated.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_secret: "secret_secret"
|
||||
```
|
||||
### `redis`
|
||||
|
||||
Configuration for Redis when using workers. This *must* be enabled when
|
||||
using workers (unless using old style direct TCP configuration).
|
||||
Configuration for Redis when using workers. This *must* be enabled when using workers.
|
||||
This setting has the following sub-options:
|
||||
* `enabled`: whether to use Redis support. Defaults to false.
|
||||
* `host` and `port`: Optional host and port to use to connect to redis. Defaults to
|
||||
@@ -3735,7 +3836,123 @@ redis:
|
||||
port: 6379
|
||||
password: <secret_password>
|
||||
```
|
||||
## Background Updates ##
|
||||
---
|
||||
## Individual worker configuration
|
||||
These options configure an individual worker, in its worker configuration file.
|
||||
They should be not be provided when configuring the main process.
|
||||
|
||||
Note also the configuration above for
|
||||
[coordinating a cluster of workers](#coordinating-workers).
|
||||
|
||||
For guidance on setting up workers, see the [worker documentation](../../workers.md).
|
||||
|
||||
---
|
||||
### `worker_app`
|
||||
|
||||
The type of worker. The currently available worker applications are listed
|
||||
in [worker documentation](../../workers.md#available-worker-applications).
|
||||
|
||||
The most common worker is the
|
||||
[`synapse.app.generic_worker`](../../workers.md#synapseappgeneric_worker).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
```
|
||||
---
|
||||
### `worker_name`
|
||||
|
||||
A unique name for the worker. The worker needs a name to be addressed in
|
||||
further parameters and identification in log files. We strongly recommend
|
||||
giving each worker a unique `worker_name`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
must be declared, in the same way as the [`listeners` option](#listeners)
|
||||
in the shared config.
|
||||
|
||||
Workers declared in [`stream_writers`](#stream_writers) will need to include a
|
||||
`replication` listener here, in order to accept internal HTTP requests from
|
||||
other workers.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8083
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
---
|
||||
### `worker_daemonize`
|
||||
|
||||
Specifies whether the worker should be started as a daemon process.
|
||||
If Synapse is being managed by [systemd](../../systemd-with-workers/README.md), this option
|
||||
must be omitted or set to `false`.
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_daemonize: true
|
||||
```
|
||||
---
|
||||
### `worker_pid_file`
|
||||
|
||||
When running a worker as a daemon, we need a place to store the
|
||||
[PID](https://en.wikipedia.org/wiki/Process_identifier) of the worker.
|
||||
This option defines the location of that "pid file".
|
||||
|
||||
This option is required if `worker_daemonize` is `true` and ignored
|
||||
otherwise. It has no default.
|
||||
|
||||
See also the [`pid_file` option](#pid_file) option for the main Synapse process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_pid_file: DATADIR/generic_worker1.pid
|
||||
```
|
||||
---
|
||||
### `worker_log_config`
|
||||
|
||||
This option specifies a yaml python logging config file as described
|
||||
[here](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema).
|
||||
See also the [`log_config` option](#log_config) option for the main Synapse process.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
```
|
||||
---
|
||||
## Background Updates
|
||||
Configuration settings related to background updates.
|
||||
|
||||
---
|
||||
|
||||
@@ -24,6 +24,11 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,6 +131,18 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -309,4 +321,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
78
docs/website_files/version-picker.css
Normal file
78
docs/website_files/version-picker.css
Normal file
@@ -0,0 +1,78 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
127
docs/website_files/version-picker.js
Normal file
127
docs/website_files/version-picker.js
Normal file
@@ -0,0 +1,127 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
1
docs/website_files/version.js
Normal file
1
docs/website_files/version.js
Normal file
@@ -0,0 +1 @@
|
||||
window.SYNAPSE_VERSION = 'v1.71';
|
||||
136
docs/workers.md
136
docs/workers.md
@@ -88,11 +88,12 @@ shared configuration file.
|
||||
### Shared configuration
|
||||
|
||||
Normally, only a couple of changes are needed to make an existing configuration
|
||||
file suitable for use with workers. First, you need to enable an "HTTP replication
|
||||
listener" for the main process; and secondly, you need to enable redis-based
|
||||
replication. Optionally, a shared secret can be used to authenticate HTTP
|
||||
traffic between workers. For example:
|
||||
|
||||
file suitable for use with workers. First, you need to enable an
|
||||
["HTTP replication listener"](usage/configuration/config_documentation.md#listeners)
|
||||
for the main process; and secondly, you need to enable
|
||||
[redis-based replication](usage/configuration/config_documentation.md#redis).
|
||||
Optionally, a [shared secret](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
can be used to authenticate HTTP traffic between workers. For example:
|
||||
|
||||
```yaml
|
||||
# extend the existing `listeners` section. This defines the ports that the
|
||||
@@ -112,24 +113,28 @@ redis:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md)
|
||||
for the full documentation of each option.
|
||||
|
||||
Under **no circumstances** should the replication listener be exposed to the
|
||||
public internet; replication traffic is:
|
||||
|
||||
* always unencrypted
|
||||
* unauthenticated, unless `worker_replication_secret` is configured
|
||||
* unauthenticated, unless [`worker_replication_secret`](usage/configuration/config_documentation.md#worker_replication_secret)
|
||||
is configured
|
||||
|
||||
|
||||
### Worker configuration
|
||||
|
||||
In the config file for each worker, you must specify:
|
||||
* The type of worker (`worker_app`). The currently available worker applications are listed below.
|
||||
* A unique name for the worker (`worker_name`).
|
||||
* The type of worker ([`worker_app`](usage/configuration/config_documentation.md#worker_app)).
|
||||
The currently available worker applications are listed [below](#available-worker-applications).
|
||||
* A unique name for the worker ([`worker_name`](usage/configuration/config_documentation.md#worker_name)).
|
||||
* The HTTP replication endpoint that it should talk to on the main synapse process
|
||||
(`worker_replication_host` and `worker_replication_http_port`)
|
||||
* If handling HTTP requests, a `worker_listeners` option with an `http`
|
||||
listener, in the same way as the `listeners` option in the shared config.
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
|
||||
* If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
|
||||
with an `http` listener.
|
||||
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`).
|
||||
|
||||
@@ -146,7 +151,6 @@ plain HTTP endpoint on port 8083 separately serving various endpoints, e.g.
|
||||
Obviously you should configure your reverse-proxy to route the relevant
|
||||
endpoints to the worker (`localhost:8083` in the above example).
|
||||
|
||||
|
||||
### Running Synapse with workers
|
||||
|
||||
Finally, you need to start your worker processes. This can be done with either
|
||||
@@ -203,6 +207,8 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/v1/rooms/.*/hierarchy$
|
||||
^/_matrix/client/(v1|unstable)/rooms/.*/relations/
|
||||
^/_matrix/client/v1/rooms/.*/threads$
|
||||
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
|
||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
||||
@@ -285,8 +291,10 @@ For multiple workers not handling the SSO endpoints properly, see
|
||||
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
||||
[#9427](https://github.com/matrix-org/synapse/issues/9427).
|
||||
|
||||
Note that a HTTP listener with `client` and `federation` resources must be
|
||||
configured in the `worker_listeners` option in the worker config.
|
||||
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
|
||||
with `client` and `federation` `resources` must be configured in the
|
||||
[`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners)
|
||||
option in the worker config.
|
||||
|
||||
#### Load balancing
|
||||
|
||||
@@ -326,10 +334,12 @@ effects of bursts of events from that bridge on events sent by normal users.
|
||||
Additionally, the writing of specific streams (such as events) can be moved off
|
||||
of the main process to a particular worker.
|
||||
|
||||
To enable this, the worker must have a HTTP replication listener configured,
|
||||
have a `worker_name` and be listed in the `instance_map` config. The same worker
|
||||
can handle multiple streams, but unless otherwise documented, each stream can only
|
||||
have a single writer.
|
||||
To enable this, the worker must have a
|
||||
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
||||
have a [`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and be listed in the [`instance_map`](usage/configuration/config_documentation.md#instance_map)
|
||||
config. The same worker can handle multiple streams, but unless otherwise documented,
|
||||
each stream can only have a single writer.
|
||||
|
||||
For example, to move event persistence off to a dedicated worker, the shared
|
||||
configuration would include:
|
||||
@@ -356,9 +366,26 @@ streams and the endpoints associated with them:
|
||||
|
||||
##### The `events` stream
|
||||
|
||||
The `events` stream experimentally supports having multiple writers, where work
|
||||
is sharded between them by room ID. Note that you *must* restart all worker
|
||||
instances when adding or removing event persisters. An example `stream_writers`
|
||||
The `events` stream experimentally supports having multiple writer workers, where load
|
||||
is sharded between them by room ID. Each writer is called an _event persister_. They are
|
||||
responsible for
|
||||
- receiving new events,
|
||||
- linking them to those already in the room [DAG](development/room-dag-concepts.md),
|
||||
- persisting them to the DB, and finally
|
||||
- updating the events stream.
|
||||
|
||||
Because load is sharded in this way, you *must* restart all worker instances when
|
||||
adding or removing event persisters.
|
||||
|
||||
An `event_persister` should not be mistaken for an `event_creator`.
|
||||
An `event_creator` listens for requests from clients to create new events and does
|
||||
so. It will then pass those events over HTTP replication to any configured event
|
||||
persisters (or the main process if none are configured).
|
||||
|
||||
Note that `event_creator`s and `event_persister`s are implemented using the same
|
||||
[`synapse.app.generic_worker`](#synapse.app.generic_worker).
|
||||
|
||||
An example [`stream_writers`](usage/configuration/config_documentation.md#stream_writers)
|
||||
configuration with multiple writers:
|
||||
|
||||
```yaml
|
||||
@@ -410,18 +437,20 @@ the stream writer for the `presence` stream:
|
||||
There is also support for moving background tasks to a separate
|
||||
worker. Background tasks are run periodically or started via replication. Exactly
|
||||
which tasks are configured to run depends on your Synapse configuration (e.g. if
|
||||
stats is enabled).
|
||||
stats is enabled). This worker doesn't handle any REST endpoints itself.
|
||||
|
||||
To enable this, the worker must have a `worker_name` and can be configured to run
|
||||
background tasks. For example, to move background tasks to a dedicated worker,
|
||||
the shared configuration would include:
|
||||
To enable this, the worker must have a unique
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name)
|
||||
and can be configured to run background tasks. For example, to move background tasks
|
||||
to a dedicated worker, the shared configuration would include:
|
||||
|
||||
```yaml
|
||||
run_background_tasks_on: background_worker
|
||||
```
|
||||
|
||||
You might also wish to investigate the `update_user_directory_from_worker` and
|
||||
`media_instance_running_background_jobs` settings.
|
||||
You might also wish to investigate the
|
||||
[`update_user_directory_from_worker`](#updating-the-user-directory) and
|
||||
[`media_instance_running_background_jobs`](#synapseappmedia_repository) settings.
|
||||
|
||||
An example for a dedicated background worker instance:
|
||||
|
||||
@@ -457,8 +486,8 @@ worker application type.
|
||||
#### Notifying Application Services
|
||||
|
||||
You can designate one generic worker to send output traffic to Application Services.
|
||||
|
||||
Specify its name in the shared configuration as follows:
|
||||
Doesn't handle any REST endpoints itself, but you should specify its name in the
|
||||
shared configuration as follows:
|
||||
|
||||
```yaml
|
||||
notify_appservices_from_worker: worker_name
|
||||
@@ -474,18 +503,28 @@ worker application type.
|
||||
### `synapse.app.pusher`
|
||||
|
||||
Handles sending push notifications to sygnal and email. Doesn't handle any
|
||||
REST endpoints itself, but you should set `start_pushers: False` in the
|
||||
REST endpoints itself, but you should set
|
||||
[`start_pushers: false`](usage/configuration/config_documentation.md#start_pushers) in the
|
||||
shared configuration file to stop the main synapse sending push notifications.
|
||||
|
||||
To run multiple instances at once the `pusher_instances` option should list all
|
||||
pusher instances by their worker name, e.g.:
|
||||
To run multiple instances at once the
|
||||
[`pusher_instances`](usage/configuration/config_documentation.md#pusher_instances)
|
||||
option should list all pusher instances by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name), e.g.:
|
||||
|
||||
```yaml
|
||||
start_pushers: false
|
||||
pusher_instances:
|
||||
- pusher_worker1
|
||||
- pusher_worker2
|
||||
```
|
||||
|
||||
An example for a pusher instance:
|
||||
|
||||
```yaml
|
||||
{{#include systemd-with-workers/workers/pusher_worker.yaml}}
|
||||
```
|
||||
|
||||
|
||||
### `synapse.app.appservice`
|
||||
|
||||
@@ -502,20 +541,31 @@ Note this worker cannot be load-balanced: only one instance should be active.
|
||||
### `synapse.app.federation_sender`
|
||||
|
||||
Handles sending federation traffic to other servers. Doesn't handle any
|
||||
REST endpoints itself, but you should set `send_federation: False` in the
|
||||
shared configuration file to stop the main synapse sending this traffic.
|
||||
REST endpoints itself, but you should set
|
||||
[`send_federation: false`](usage/configuration/config_documentation.md#send_federation)
|
||||
in the shared configuration file to stop the main synapse sending this traffic.
|
||||
|
||||
If running multiple federation senders then you must list each
|
||||
instance in the `federation_sender_instances` option by their `worker_name`.
|
||||
instance in the
|
||||
[`federation_sender_instances`](usage/configuration/config_documentation.md#federation_sender_instances)
|
||||
option by their
|
||||
[`worker_name`](usage/configuration/config_documentation.md#worker_name).
|
||||
All instances must be stopped and started when adding or removing instances.
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
send_federation: false
|
||||
federation_sender_instances:
|
||||
- federation_sender1
|
||||
- federation_sender2
|
||||
```
|
||||
|
||||
An example for a federation sender instance:
|
||||
|
||||
```yaml
|
||||
{{#include systemd-with-workers/workers/federation_sender.yaml}}
|
||||
```
|
||||
|
||||
### `synapse.app.media_repository`
|
||||
|
||||
Handles the media repository. It can handle all endpoints starting with:
|
||||
@@ -531,21 +581,19 @@ Handles the media repository. It can handle all endpoints starting with:
|
||||
^/_synapse/admin/v1/quarantine_media/.*$
|
||||
^/_synapse/admin/v1/users/.*/media$
|
||||
|
||||
You should also set `enable_media_repo: False` in the shared configuration
|
||||
You should also set
|
||||
[`enable_media_repo: False`](usage/configuration/config_documentation.md#enable_media_repo)
|
||||
in the shared configuration
|
||||
file to stop the main synapse running background jobs related to managing the
|
||||
media repository. Note that doing so will prevent the main process from being
|
||||
able to handle the above endpoints.
|
||||
|
||||
In the `media_repository` worker configuration file, configure the http listener to
|
||||
In the `media_repository` worker configuration file, configure the
|
||||
[HTTP listener](usage/configuration/config_documentation.md#listeners) to
|
||||
expose the `media` resource. For example:
|
||||
|
||||
```yaml
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
resources:
|
||||
- names:
|
||||
- media
|
||||
{{#include systemd-with-workers/workers/media_worker.yaml}}
|
||||
```
|
||||
|
||||
Note that if running multiple media repositories they must be on the same server
|
||||
|
||||
7
mypy.ini
7
mypy.ini
@@ -56,7 +56,6 @@ exclude = (?x)
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
@@ -106,6 +105,12 @@ disallow_untyped_defs = False
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.metrics.test_background_process_metrics]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.push.test_bulk_push_rule_evaluator]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.test_server]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
952
poetry.lock
generated
952
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.68.0"
|
||||
version = "1.71.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -192,7 +192,7 @@ psycopg2 = { version = ">=2.8", markers = "platform_python_implementation != 'Py
|
||||
psycopg2cffi = { version = ">=2.8", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
psycopg2cffi-compat = { version = "==1.1", markers = "platform_python_implementation == 'PyPy'", optional = true }
|
||||
pysaml2 = { version = ">=4.5.0", optional = true }
|
||||
authlib = { version = ">=0.14.0", optional = true }
|
||||
authlib = { version = ">=0.15.1", optional = true }
|
||||
# systemd-python is necessary for logging to the systemd journal via
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
@@ -219,7 +219,7 @@ oidc = ["authlib"]
|
||||
# `systemd.journal.JournalHandler`, as is documented in
|
||||
# `contrib/systemd/log_config.yaml`.
|
||||
systemd = ["systemd-python"]
|
||||
url_preview = ["lxml"]
|
||||
url-preview = ["lxml"]
|
||||
sentry = ["sentry-sdk"]
|
||||
opentracing = ["jaeger-client", "opentracing"]
|
||||
jwt = ["authlib"]
|
||||
@@ -227,7 +227,7 @@ jwt = ["authlib"]
|
||||
# (if it is not installed, we fall back to slow code.)
|
||||
redis = ["txredisapi", "hiredis"]
|
||||
# Required to use experimental `caches.track_memory_usage` config option.
|
||||
cache_memory = ["pympler"]
|
||||
cache-memory = ["pympler"]
|
||||
test = ["parameterized", "idna"]
|
||||
|
||||
# The duplication here is awful. I hate hate hate hate hate it. However, for now I want
|
||||
@@ -250,7 +250,7 @@ all = [
|
||||
"pysaml2",
|
||||
# oidc and jwt
|
||||
"authlib",
|
||||
# url_preview
|
||||
# url-preview
|
||||
"lxml",
|
||||
# sentry
|
||||
"sentry-sdk",
|
||||
@@ -258,7 +258,7 @@ all = [
|
||||
"jaeger-client", "opentracing",
|
||||
# redis
|
||||
"txredisapi", "hiredis",
|
||||
# cache_memory
|
||||
# cache-memory
|
||||
"pympler",
|
||||
# omitted:
|
||||
# - test: it's useful to have this separate from dev deps in the olddeps job
|
||||
@@ -267,10 +267,10 @@ all = [
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
## We pin black so that our tests don't start failing on new releases.
|
||||
isort = "==5.7.0"
|
||||
black = "==22.3.0"
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
flake8-comprehensions = "*"
|
||||
flake8-bugbear = "==21.3.2"
|
||||
flake8-bugbear = ">=21.3.2"
|
||||
flake8 = "*"
|
||||
|
||||
# Typechecking
|
||||
@@ -296,27 +296,32 @@ parameterized = ">=0.7.4"
|
||||
idna = ">=2.5"
|
||||
|
||||
# The following are used by the release script
|
||||
click = "==8.1.1"
|
||||
click = ">=8.1.3"
|
||||
# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
|
||||
GitPython = ">=3.1.20"
|
||||
commonmark = "==0.9.1"
|
||||
pygithub = "==1.55"
|
||||
commonmark = ">=0.9.1"
|
||||
pygithub = ">=1.55"
|
||||
# The following are executed as commands by the release script.
|
||||
twine = "*"
|
||||
# Towncrier min version comes from #3425. Rationale unclear.
|
||||
towncrier = ">=18.6.0rc1"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0", "setuptools_rust>=1.3"]
|
||||
# The upper bounds here are defensive, intended to prevent situations like
|
||||
# #13849 and #14079 where we see buildtime or runtime errors caused by build
|
||||
# system changes.
|
||||
# We are happy to raise these upper bounds upon request,
|
||||
# provided we check that it's safe to do so (i.e. that CI passes).
|
||||
requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686"
|
||||
skip = "cp36* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y"
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
@@ -325,3 +330,12 @@ environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
|
||||
[tool.cibuildwheel.linux]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}"
|
||||
|
||||
[tool.cibuildwheel.macos]
|
||||
# Wrap the repair command to correctly rename the built cpython wheels as ABI3.
|
||||
repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}"
|
||||
|
||||
@@ -11,14 +11,24 @@ rust-version = "1.58.1"
|
||||
|
||||
[lib]
|
||||
name = "synapse"
|
||||
crate-type = ["cdylib"]
|
||||
# We generate a `cdylib` for Python and a standard `lib` for running
|
||||
# tests/benchmarks.
|
||||
crate-type = ["lib", "cdylib"]
|
||||
|
||||
[package.metadata.maturin]
|
||||
# This is where we tell maturin where to place the built library.
|
||||
name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] }
|
||||
anyhow = "1.0.66"
|
||||
lazy_static = "1.4.0"
|
||||
log = "0.4.17"
|
||||
pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
|
||||
pyo3-log = "0.7.0"
|
||||
pythonize = "0.17.0"
|
||||
regex = "1.6.0"
|
||||
serde = { version = "1.0.147", features = ["derive"] }
|
||||
serde_json = "1.0.87"
|
||||
|
||||
[build-dependencies]
|
||||
blake2 = "0.10.4"
|
||||
|
||||
149
rust/benches/evaluator.rs
Normal file
149
rust/benches/evaluator.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
use synapse::push::{
|
||||
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules,
|
||||
};
|
||||
use test::Bencher;
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[bench]
|
||||
fn bench_match_exact(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
("type".to_string(), "m.text".to_string()),
|
||||
("room_id".to_string(), "!room:server".to_string()),
|
||||
("content.body".to_string(), "test message".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: "room_id".into(),
|
||||
pattern: Some("!room:server".into()),
|
||||
pattern_type: None,
|
||||
},
|
||||
));
|
||||
|
||||
let matched = eval.match_condition(&condition, None, None).unwrap();
|
||||
assert!(matched, "Didn't match");
|
||||
|
||||
b.iter(|| eval.match_condition(&condition, None, None).unwrap());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_match_word(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
("type".to_string(), "m.text".to_string()),
|
||||
("room_id".to_string(), "!room:server".to_string()),
|
||||
("content.body".to_string(), "test message".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: "content.body".into(),
|
||||
pattern: Some("test".into()),
|
||||
pattern_type: None,
|
||||
},
|
||||
));
|
||||
|
||||
let matched = eval.match_condition(&condition, None, None).unwrap();
|
||||
assert!(matched, "Didn't match");
|
||||
|
||||
b.iter(|| eval.match_condition(&condition, None, None).unwrap());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_match_word_miss(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
("type".to_string(), "m.text".to_string()),
|
||||
("room_id".to_string(), "!room:server".to_string()),
|
||||
("content.body".to_string(), "test message".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: "content.body".into(),
|
||||
pattern: Some("foobar".into()),
|
||||
pattern_type: None,
|
||||
},
|
||||
));
|
||||
|
||||
let matched = eval.match_condition(&condition, None, None).unwrap();
|
||||
assert!(!matched, "Didn't match");
|
||||
|
||||
b.iter(|| eval.match_condition(&condition, None, None).unwrap());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_eval_message(b: &mut Bencher) {
|
||||
let flattened_keys = [
|
||||
("type".to_string(), "m.text".to_string()),
|
||||
("room_id".to_string(), "!room:server".to_string()),
|
||||
("content.body".to_string(), "test message".to_string()),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
let eval = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
0,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let rules =
|
||||
FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false);
|
||||
|
||||
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
|
||||
}
|
||||
40
rust/benches/glob.rs
Normal file
40
rust/benches/glob.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![feature(test)]
|
||||
|
||||
use synapse::push::utils::{glob_to_regex, GlobMatchType};
|
||||
use test::Bencher;
|
||||
|
||||
extern crate test;
|
||||
|
||||
#[bench]
|
||||
fn bench_whole(b: &mut Bencher) {
|
||||
b.iter(|| glob_to_regex("test", GlobMatchType::Whole));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_word(b: &mut Bencher) {
|
||||
b.iter(|| glob_to_regex("test", GlobMatchType::Word));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_whole_wildcard_run(b: &mut Bencher) {
|
||||
b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_word_wildcard_run(b: &mut Bencher) {
|
||||
b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
|
||||
}
|
||||
@@ -22,7 +22,7 @@ fn main() -> Result<(), std::io::Error> {
|
||||
|
||||
for entry in entries {
|
||||
if entry.is_dir() {
|
||||
dirs.push(entry)
|
||||
dirs.push(entry);
|
||||
} else {
|
||||
paths.push(entry.to_str().expect("valid rust paths").to_string());
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use pyo3::prelude::*;
|
||||
|
||||
pub mod push;
|
||||
|
||||
/// Returns the hash of all the rust source files at the time it was compiled.
|
||||
///
|
||||
/// Used by python to detect if the rust library is outdated.
|
||||
@@ -17,8 +19,13 @@ fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
|
||||
|
||||
/// The entry point for defining the Python module.
|
||||
#[pymodule]
|
||||
fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
pyo3_log::init();
|
||||
|
||||
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
|
||||
|
||||
push::register_module(py, m)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
340
rust/src/push/base_rules.rs
Normal file
340
rust/src/push/base_rules.rs
Normal file
@@ -0,0 +1,340 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Contains the definitions of the "base" push rules.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use serde_json::Value;
|
||||
|
||||
use super::KnownCondition;
|
||||
use crate::push::Action;
|
||||
use crate::push::Condition;
|
||||
use crate::push::EventMatchCondition;
|
||||
use crate::push::PushRule;
|
||||
use crate::push::RelatedEventMatchCondition;
|
||||
use crate::push::SetTweak;
|
||||
use crate::push::TweakValue;
|
||||
|
||||
const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
|
||||
set_tweak: Cow::Borrowed("highlight"),
|
||||
value: None,
|
||||
other_keys: Value::Null,
|
||||
});
|
||||
|
||||
const HIGHLIGHT_FALSE_ACTION: Action = Action::SetTweak(SetTweak {
|
||||
set_tweak: Cow::Borrowed("highlight"),
|
||||
value: Some(TweakValue::Other(Value::Bool(false))),
|
||||
other_keys: Value::Null,
|
||||
});
|
||||
|
||||
const SOUND_ACTION: Action = Action::SetTweak(SetTweak {
|
||||
set_tweak: Cow::Borrowed("sound"),
|
||||
value: Some(TweakValue::String(Cow::Borrowed("default"))),
|
||||
other_keys: Value::Null,
|
||||
});
|
||||
|
||||
const RING_ACTION: Action = Action::SetTweak(SetTweak {
|
||||
set_tweak: Cow::Borrowed("sound"),
|
||||
value: Some(TweakValue::String(Cow::Borrowed("ring"))),
|
||||
other_keys: Value::Null,
|
||||
});
|
||||
|
||||
pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: false,
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("content.msgtype"),
|
||||
pattern: Some(Cow::Borrowed("m.notice")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.invite_for_me"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.member")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("content.membership"),
|
||||
pattern: Some(Cow::Borrowed("invite")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("state_key"),
|
||||
pattern: None,
|
||||
pattern_type: Some(Cow::Borrowed("user_id")),
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.member_event"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.member")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch(
|
||||
RelatedEventMatchCondition {
|
||||
key: Some(Cow::Borrowed("sender")),
|
||||
pattern: None,
|
||||
pattern_type: Some(Cow::Borrowed("user_id")),
|
||||
rel_type: Cow::Borrowed("m.in_reply_to"),
|
||||
include_fallbacks: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::ContainsDisplayName)]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::SenderNotificationPermission {
|
||||
key: Cow::Borrowed("room"),
|
||||
}),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("content.body"),
|
||||
pattern: Some(Cow::Borrowed("@room")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.tombstone"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.tombstone")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("state_key"),
|
||||
pattern: Some(Cow::Borrowed("")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.reaction"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.reaction")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::DontNotify]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/override/.m.rule.room.server_acl"),
|
||||
priority_class: 5,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.server_acl")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("state_key"),
|
||||
pattern: Some(Cow::Borrowed("")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
|
||||
rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"),
|
||||
priority_class: 4,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("content.body"),
|
||||
pattern: None,
|
||||
pattern_type: Some(Cow::Borrowed("user_localpart")),
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
}];
|
||||
|
||||
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.call.invite")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.message")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.encrypted")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::RoomMemberCount {
|
||||
is: Some(Cow::Borrowed("2")),
|
||||
}),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.message")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
|
||||
EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("m.room.encrypted")),
|
||||
pattern_type: None,
|
||||
},
|
||||
))]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
PushRule {
|
||||
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
|
||||
priority_class: 1,
|
||||
conditions: Cow::Borrowed(&[
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("type"),
|
||||
pattern: Some(Cow::Borrowed("im.vector.modular.widgets")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("content.type"),
|
||||
pattern: Some(Cow::Borrowed("jitsi")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: Cow::Borrowed("state_key"),
|
||||
pattern: Some(Cow::Borrowed("*")),
|
||||
pattern_type: None,
|
||||
})),
|
||||
]),
|
||||
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
|
||||
default: true,
|
||||
default_enabled: true,
|
||||
},
|
||||
];
|
||||
|
||||
lazy_static! {
|
||||
pub static ref BASE_RULES_BY_ID: HashMap<&'static str, &'static PushRule> =
|
||||
BASE_PREPEND_OVERRIDE_RULES
|
||||
.iter()
|
||||
.chain(BASE_APPEND_OVERRIDE_RULES.iter())
|
||||
.chain(BASE_APPEND_CONTENT_RULES.iter())
|
||||
.chain(BASE_APPEND_UNDERRIDE_RULES.iter())
|
||||
.map(|rule| { (&*rule.rule_id, rule) })
|
||||
.collect();
|
||||
}
|
||||
370
rust/src/push/evaluator.rs
Normal file
370
rust/src/push/evaluator.rs
Normal file
@@ -0,0 +1,370 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use anyhow::{Context, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use log::warn;
|
||||
use pyo3::prelude::*;
|
||||
use regex::Regex;
|
||||
|
||||
use super::{
|
||||
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
|
||||
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
|
||||
RelatedEventMatchCondition,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
/// Used to parse the `is` clause in the room member count condition.
|
||||
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
|
||||
}
|
||||
|
||||
/// Allows running a set of push rules against a particular event.
|
||||
#[pyclass]
|
||||
pub struct PushRuleEvaluator {
|
||||
/// A mapping of "flattened" keys to string values in the event, e.g.
|
||||
/// includes things like "type" and "content.msgtype".
|
||||
flattened_keys: BTreeMap<String, String>,
|
||||
|
||||
/// The "content.body", if any.
|
||||
body: String,
|
||||
|
||||
/// The number of users in the room.
|
||||
room_member_count: u64,
|
||||
|
||||
/// The `notifications` section of the current power levels in the room.
|
||||
notification_power_levels: BTreeMap<String, i64>,
|
||||
|
||||
/// The power level of the sender of the event, or None if event is an
|
||||
/// outlier.
|
||||
sender_power_level: Option<i64>,
|
||||
|
||||
/// The related events, indexed by relation type. Flattened in the same manner as
|
||||
/// `flattened_keys`.
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
|
||||
/// If msc3664, push rules for related events, is enabled.
|
||||
related_event_match_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl PushRuleEvaluator {
|
||||
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
|
||||
#[new]
|
||||
pub fn py_new(
|
||||
flattened_keys: BTreeMap<String, String>,
|
||||
room_member_count: u64,
|
||||
sender_power_level: Option<i64>,
|
||||
notification_power_levels: BTreeMap<String, i64>,
|
||||
related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
|
||||
related_event_match_enabled: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let body = flattened_keys
|
||||
.get("content.body")
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(PushRuleEvaluator {
|
||||
flattened_keys,
|
||||
body,
|
||||
room_member_count,
|
||||
notification_power_levels,
|
||||
sender_power_level,
|
||||
related_events_flattened,
|
||||
related_event_match_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
/// Run the evaluator with the given push rules, for the given user ID and
|
||||
/// display name of the user.
|
||||
///
|
||||
/// Passing in None will skip evaluating rules matching user ID and display
|
||||
/// name.
|
||||
///
|
||||
/// Returns the set of actions, if any, that match (filtering out any
|
||||
/// `dont_notify` actions).
|
||||
pub fn run(
|
||||
&self,
|
||||
push_rules: &FilteredPushRules,
|
||||
user_id: Option<&str>,
|
||||
display_name: Option<&str>,
|
||||
) -> Vec<Action> {
|
||||
'outer: for (push_rule, enabled) in push_rules.iter() {
|
||||
if !enabled {
|
||||
continue;
|
||||
}
|
||||
|
||||
for condition in push_rule.conditions.iter() {
|
||||
match self.match_condition(condition, user_id, display_name) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => continue 'outer,
|
||||
Err(err) => {
|
||||
warn!("Condition match failed {err}");
|
||||
continue 'outer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let actions = push_rule
|
||||
.actions
|
||||
.iter()
|
||||
// Filter out "dont_notify" actions, as we don't store them.
|
||||
.filter(|a| **a != Action::DontNotify)
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
return actions;
|
||||
}
|
||||
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Check if the given condition matches.
|
||||
fn matches(
|
||||
&self,
|
||||
condition: Condition,
|
||||
user_id: Option<&str>,
|
||||
display_name: Option<&str>,
|
||||
) -> bool {
|
||||
match self.match_condition(&condition, user_id, display_name) {
|
||||
Ok(true) => true,
|
||||
Ok(false) => false,
|
||||
Err(err) => {
|
||||
warn!("Condition match failed {err}");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PushRuleEvaluator {
|
||||
/// Match a given `Condition` for a push rule.
|
||||
pub fn match_condition(
|
||||
&self,
|
||||
condition: &Condition,
|
||||
user_id: Option<&str>,
|
||||
display_name: Option<&str>,
|
||||
) -> Result<bool, Error> {
|
||||
let known_condition = match condition {
|
||||
Condition::Known(known) => known,
|
||||
Condition::Unknown(_) => {
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
let result = match known_condition {
|
||||
KnownCondition::EventMatch(event_match) => {
|
||||
self.match_event_match(event_match, user_id)?
|
||||
}
|
||||
KnownCondition::RelatedEventMatch(event_match) => {
|
||||
self.match_related_event_match(event_match, user_id)?
|
||||
}
|
||||
KnownCondition::ContainsDisplayName => {
|
||||
if let Some(dn) = display_name {
|
||||
if !dn.is_empty() {
|
||||
get_glob_matcher(dn, GlobMatchType::Word)?.is_match(&self.body)?
|
||||
} else {
|
||||
// We specifically ignore empty display names, as otherwise
|
||||
// they would always match.
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
KnownCondition::RoomMemberCount { is } => {
|
||||
if let Some(is) = is {
|
||||
self.match_member_count(is)?
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
KnownCondition::SenderNotificationPermission { key } => {
|
||||
if let Some(sender_power_level) = &self.sender_power_level {
|
||||
let required_level = self
|
||||
.notification_power_levels
|
||||
.get(key.as_ref())
|
||||
.copied()
|
||||
.unwrap_or(50);
|
||||
|
||||
*sender_power_level >= required_level
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Evaluates a `event_match` condition.
|
||||
fn match_event_match(
|
||||
&self,
|
||||
event_match: &EventMatchCondition,
|
||||
user_id: Option<&str>,
|
||||
) -> Result<bool, Error> {
|
||||
let pattern = if let Some(pattern) = &event_match.pattern {
|
||||
pattern
|
||||
} else if let Some(pattern_type) = &event_match.pattern_type {
|
||||
// The `pattern_type` can either be "user_id" or "user_localpart",
|
||||
// either way if we don't have a `user_id` then the condition can't
|
||||
// match.
|
||||
let user_id = if let Some(user_id) = user_id {
|
||||
user_id
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
match &**pattern_type {
|
||||
"user_id" => user_id,
|
||||
"user_localpart" => get_localpart_from_id(user_id)?,
|
||||
_ => return Ok(false),
|
||||
}
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
|
||||
haystack
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// For the content.body we match against "words", but for everything
|
||||
// else we match against the entire value.
|
||||
let match_type = if event_match.key == "content.body" {
|
||||
GlobMatchType::Word
|
||||
} else {
|
||||
GlobMatchType::Whole
|
||||
};
|
||||
|
||||
let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
|
||||
compiled_pattern.is_match(haystack)
|
||||
}
|
||||
|
||||
/// Evaluates a `related_event_match` condition. (MSC3664)
|
||||
fn match_related_event_match(
|
||||
&self,
|
||||
event_match: &RelatedEventMatchCondition,
|
||||
user_id: Option<&str>,
|
||||
) -> Result<bool, Error> {
|
||||
// First check if related event matching is enabled...
|
||||
if !self.related_event_match_enabled {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// get the related event, fail if there is none.
|
||||
let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) {
|
||||
event
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// If we are not matching fallbacks, don't match if our special key indicating this is a
|
||||
// fallback relation is not present.
|
||||
if !event_match.include_fallbacks.unwrap_or(false)
|
||||
&& event.contains_key("im.vector.is_falling_back")
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// if we have no key, accept the event as matching, if it existed without matching any
|
||||
// fields.
|
||||
let key = if let Some(key) = &event_match.key {
|
||||
key
|
||||
} else {
|
||||
return Ok(true);
|
||||
};
|
||||
|
||||
let pattern = if let Some(pattern) = &event_match.pattern {
|
||||
pattern
|
||||
} else if let Some(pattern_type) = &event_match.pattern_type {
|
||||
// The `pattern_type` can either be "user_id" or "user_localpart",
|
||||
// either way if we don't have a `user_id` then the condition can't
|
||||
// match.
|
||||
let user_id = if let Some(user_id) = user_id {
|
||||
user_id
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
match &**pattern_type {
|
||||
"user_id" => user_id,
|
||||
"user_localpart" => get_localpart_from_id(user_id)?,
|
||||
_ => return Ok(false),
|
||||
}
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let haystack = if let Some(haystack) = event.get(&**key) {
|
||||
haystack
|
||||
} else {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
// For the content.body we match against "words", but for everything
|
||||
// else we match against the entire value.
|
||||
let match_type = if key == "content.body" {
|
||||
GlobMatchType::Word
|
||||
} else {
|
||||
GlobMatchType::Whole
|
||||
};
|
||||
|
||||
let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
|
||||
compiled_pattern.is_match(haystack)
|
||||
}
|
||||
|
||||
/// Match the member count against an 'is' condition
|
||||
/// The `is` condition can be things like '>2', '==3' or even just '4'.
|
||||
fn match_member_count(&self, is: &str) -> Result<bool, Error> {
|
||||
let captures = INEQUALITY_EXPR.captures(is).context("bad 'is' clause")?;
|
||||
let ineq = captures.get(1).map_or("==", |m| m.as_str());
|
||||
let rhs: u64 = captures
|
||||
.get(2)
|
||||
.context("missing number")?
|
||||
.as_str()
|
||||
.parse()?;
|
||||
|
||||
let matches = match ineq {
|
||||
"" | "==" => self.room_member_count == rhs,
|
||||
"<" => self.room_member_count < rhs,
|
||||
">" => self.room_member_count > rhs,
|
||||
">=" => self.room_member_count >= rhs,
|
||||
"<=" => self.room_member_count <= rhs,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
Ok(matches)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_rule_evaluator() {
|
||||
let mut flattened_keys = BTreeMap::new();
|
||||
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
|
||||
let evaluator = PushRuleEvaluator::py_new(
|
||||
flattened_keys,
|
||||
10,
|
||||
Some(0),
|
||||
BTreeMap::new(),
|
||||
BTreeMap::new(),
|
||||
true,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
|
||||
assert_eq!(result.len(), 3);
|
||||
}
|
||||
522
rust/src/push/mod.rs
Normal file
522
rust/src/push/mod.rs
Normal file
@@ -0,0 +1,522 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! An implementation of Matrix push rules.
|
||||
//!
|
||||
//! The `Cow<_>` type is used extensively within this module to allow creating
|
||||
//! the base rules as constants (in Rust constants can't require explicit
|
||||
//! allocation atm).
|
||||
//!
|
||||
//! ---
|
||||
//!
|
||||
//! Push rules is the system used to determine which events trigger a push (and a
|
||||
//! bump in notification counts).
|
||||
//!
|
||||
//! This consists of a list of "push rules" for each user, where a push rule is a
|
||||
//! pair of "conditions" and "actions". When a user receives an event Synapse
|
||||
//! iterates over the list of push rules until it finds one where all the conditions
|
||||
//! match the event, at which point "actions" describe the outcome (e.g. notify,
|
||||
//! highlight, etc).
|
||||
//!
|
||||
//! Push rules are split up into 5 different "kinds" (aka "priority classes"), which
|
||||
//! are run in order:
|
||||
//! 1. Override — highest priority rules, e.g. always ignore notices
|
||||
//! 2. Content — content specific rules, e.g. @ notifications
|
||||
//! 3. Room — per room rules, e.g. enable/disable notifications for all messages
|
||||
//! in a room
|
||||
//! 4. Sender — per sender rules, e.g. never notify for messages from a given
|
||||
//! user
|
||||
//! 5. Underride — the lowest priority "default" rules, e.g. notify for every
|
||||
//! message.
|
||||
//!
|
||||
//! The set of "base rules" are the list of rules that every user has by default. A
|
||||
//! user can modify their copy of the push rules in one of three ways:
|
||||
//! 1. Adding a new push rule of a certain kind
|
||||
//! 2. Changing the actions of a base rule
|
||||
//! 3. Enabling/disabling a base rule.
|
||||
//!
|
||||
//! The base rules are split into whether they come before or after a particular
|
||||
//! kind, so the order of push rule evaluation would be: base rules for before
|
||||
//! "override" kind, user defined "override" rules, base rules after "override"
|
||||
//! kind, etc, etc.
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
|
||||
use anyhow::{Context, Error};
|
||||
use log::warn;
|
||||
use pyo3::prelude::*;
|
||||
use pythonize::{depythonize, pythonize};
|
||||
use serde::de::Error as _;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use self::evaluator::PushRuleEvaluator;
|
||||
|
||||
mod base_rules;
|
||||
pub mod evaluator;
|
||||
pub mod utils;
|
||||
|
||||
/// Called when registering modules with python.
|
||||
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
let child_module = PyModule::new(py, "push")?;
|
||||
child_module.add_class::<PushRule>()?;
|
||||
child_module.add_class::<PushRules>()?;
|
||||
child_module.add_class::<FilteredPushRules>()?;
|
||||
child_module.add_class::<PushRuleEvaluator>()?;
|
||||
child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
|
||||
|
||||
m.add_submodule(child_module)?;
|
||||
|
||||
// We need to manually add the module to sys.modules to make `from
|
||||
// synapse.synapse_rust import push` work.
|
||||
py.import("sys")?
|
||||
.getattr("modules")?
|
||||
.set_item("synapse.synapse_rust.push", child_module)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[pyfunction]
|
||||
fn get_base_rule_ids() -> HashSet<&'static str> {
|
||||
base_rules::BASE_RULES_BY_ID.keys().copied().collect()
|
||||
}
|
||||
|
||||
/// A single push rule for a user.
|
||||
#[derive(Debug, Clone)]
|
||||
#[pyclass(frozen)]
|
||||
pub struct PushRule {
|
||||
/// A unique ID for this rule
|
||||
pub rule_id: Cow<'static, str>,
|
||||
/// The "kind" of push rule this is (see `PRIORITY_CLASS_MAP` in Python)
|
||||
#[pyo3(get)]
|
||||
pub priority_class: i32,
|
||||
/// The conditions that must all match for actions to be applied
|
||||
pub conditions: Cow<'static, [Condition]>,
|
||||
/// The actions to apply if all conditions are met
|
||||
pub actions: Cow<'static, [Action]>,
|
||||
/// Whether this is a base rule
|
||||
#[pyo3(get)]
|
||||
pub default: bool,
|
||||
/// Whether this is enabled by default
|
||||
#[pyo3(get)]
|
||||
pub default_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl PushRule {
|
||||
#[staticmethod]
|
||||
pub fn from_db(
|
||||
rule_id: String,
|
||||
priority_class: i32,
|
||||
conditions: &str,
|
||||
actions: &str,
|
||||
) -> Result<PushRule, Error> {
|
||||
let conditions = serde_json::from_str(conditions).context("parsing conditions")?;
|
||||
let actions = serde_json::from_str(actions).context("parsing actions")?;
|
||||
|
||||
Ok(PushRule {
|
||||
rule_id: Cow::Owned(rule_id),
|
||||
priority_class,
|
||||
conditions,
|
||||
actions,
|
||||
default: false,
|
||||
default_enabled: true,
|
||||
})
|
||||
}
|
||||
|
||||
#[getter]
|
||||
fn rule_id(&self) -> &str {
|
||||
&self.rule_id
|
||||
}
|
||||
|
||||
#[getter]
|
||||
fn actions(&self) -> Vec<Action> {
|
||||
self.actions.clone().into_owned()
|
||||
}
|
||||
|
||||
#[getter]
|
||||
fn conditions(&self) -> Vec<Condition> {
|
||||
self.conditions.clone().into_owned()
|
||||
}
|
||||
|
||||
fn __repr__(&self) -> String {
|
||||
format!(
|
||||
"<PushRule rule_id={}, conditions={:?}, actions={:?}>",
|
||||
self.rule_id, self.conditions, self.actions
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// The "action" Synapse should perform for a matching push rule.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Action {
|
||||
DontNotify,
|
||||
Notify,
|
||||
Coalesce,
|
||||
SetTweak(SetTweak),
|
||||
|
||||
// An unrecognized custom action.
|
||||
Unknown(Value),
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Action {
|
||||
fn into_py(self, py: Python<'_>) -> PyObject {
|
||||
// When we pass the `Action` struct to Python we want it to be converted
|
||||
// to a dict. We use `pythonize`, which converts the struct using the
|
||||
// `serde` serialization.
|
||||
pythonize(py, &self).expect("valid action")
|
||||
}
|
||||
}
|
||||
|
||||
/// The body of a `SetTweak` push action.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
pub struct SetTweak {
|
||||
set_tweak: Cow<'static, str>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
value: Option<TweakValue>,
|
||||
|
||||
// This picks up any other fields that may have been added by clients.
|
||||
// These get added when we convert the `Action` to a python object.
|
||||
#[serde(flatten)]
|
||||
other_keys: Value,
|
||||
}
|
||||
|
||||
/// The value of a `set_tweak`.
|
||||
///
|
||||
/// We need this (rather than using `TweakValue` directly) so that we can use
|
||||
/// `&'static str` in the value when defining the constant base rules.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
|
||||
#[serde(untagged)]
|
||||
pub enum TweakValue {
|
||||
String(Cow<'static, str>),
|
||||
Other(Value),
|
||||
}
|
||||
|
||||
impl Serialize for Action {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
match self {
|
||||
Action::DontNotify => serializer.serialize_str("dont_notify"),
|
||||
Action::Notify => serializer.serialize_str("notify"),
|
||||
Action::Coalesce => serializer.serialize_str("coalesce"),
|
||||
Action::SetTweak(tweak) => tweak.serialize(serializer),
|
||||
Action::Unknown(value) => value.serialize(serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple helper class for deserializing Action from JSON.
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum ActionDeserializeHelper {
|
||||
Str(String),
|
||||
SetTweak(SetTweak),
|
||||
Unknown(Value),
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Action {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let helper: ActionDeserializeHelper = Deserialize::deserialize(deserializer)?;
|
||||
match helper {
|
||||
ActionDeserializeHelper::Str(s) => match &*s {
|
||||
"dont_notify" => Ok(Action::DontNotify),
|
||||
"notify" => Ok(Action::Notify),
|
||||
"coalesce" => Ok(Action::Coalesce),
|
||||
_ => Err(D::Error::custom("unrecognized action")),
|
||||
},
|
||||
ActionDeserializeHelper::SetTweak(set_tweak) => Ok(Action::SetTweak(set_tweak)),
|
||||
ActionDeserializeHelper::Unknown(value) => Ok(Action::Unknown(value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A condition used in push rules to match against an event.
|
||||
///
|
||||
/// We need this split as `serde` doesn't give us the ability to have a
|
||||
/// "catchall" variant in tagged enums.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
#[serde(untagged)]
|
||||
pub enum Condition {
|
||||
/// A recognized condition that we can match against
|
||||
Known(KnownCondition),
|
||||
/// An unrecognized condition that we ignore.
|
||||
Unknown(Value),
|
||||
}
|
||||
|
||||
/// The set of "known" conditions that we can handle.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[serde(tag = "kind")]
|
||||
pub enum KnownCondition {
|
||||
EventMatch(EventMatchCondition),
|
||||
#[serde(rename = "im.nheko.msc3664.related_event_match")]
|
||||
RelatedEventMatch(RelatedEventMatchCondition),
|
||||
ContainsDisplayName,
|
||||
RoomMemberCount {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
is: Option<Cow<'static, str>>,
|
||||
},
|
||||
SenderNotificationPermission {
|
||||
key: Cow<'static, str>,
|
||||
},
|
||||
}
|
||||
|
||||
impl IntoPy<PyObject> for Condition {
|
||||
fn into_py(self, py: Python<'_>) -> PyObject {
|
||||
pythonize(py, &self).expect("valid condition")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'source> FromPyObject<'source> for Condition {
|
||||
fn extract(ob: &'source PyAny) -> PyResult<Self> {
|
||||
Ok(depythonize(ob)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// The body of a [`Condition::EventMatch`]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct EventMatchCondition {
|
||||
pub key: Cow<'static, str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern_type: Option<Cow<'static, str>>,
|
||||
}
|
||||
|
||||
/// The body of a [`Condition::RelatedEventMatch`]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct RelatedEventMatchCondition {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub key: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern: Option<Cow<'static, str>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pattern_type: Option<Cow<'static, str>>,
|
||||
pub rel_type: Cow<'static, str>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub include_fallbacks: Option<bool>,
|
||||
}
|
||||
|
||||
/// The collection of push rules for a user.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[pyclass(frozen)]
|
||||
pub struct PushRules {
|
||||
/// Custom push rules that override a base rule.
|
||||
overridden_base_rules: HashMap<Cow<'static, str>, PushRule>,
|
||||
|
||||
/// Custom rules that come between the prepend/append override base rules.
|
||||
override_rules: Vec<PushRule>,
|
||||
/// Custom rules that come before the base content rules.
|
||||
content: Vec<PushRule>,
|
||||
/// Custom rules that come before the base room rules.
|
||||
room: Vec<PushRule>,
|
||||
/// Custom rules that come before the base sender rules.
|
||||
sender: Vec<PushRule>,
|
||||
/// Custom rules that come before the base underride rules.
|
||||
underride: Vec<PushRule>,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl PushRules {
|
||||
#[new]
|
||||
pub fn new(rules: Vec<PushRule>) -> PushRules {
|
||||
let mut push_rules: PushRules = Default::default();
|
||||
|
||||
for rule in rules {
|
||||
if let Some(&o) = base_rules::BASE_RULES_BY_ID.get(&*rule.rule_id) {
|
||||
push_rules.overridden_base_rules.insert(
|
||||
rule.rule_id.clone(),
|
||||
PushRule {
|
||||
actions: rule.actions.clone(),
|
||||
..o.clone()
|
||||
},
|
||||
);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
match rule.priority_class {
|
||||
5 => push_rules.override_rules.push(rule),
|
||||
4 => push_rules.content.push(rule),
|
||||
3 => push_rules.room.push(rule),
|
||||
2 => push_rules.sender.push(rule),
|
||||
1 => push_rules.underride.push(rule),
|
||||
_ => {
|
||||
warn!(
|
||||
"Unrecognized priority class for rule {}: {}",
|
||||
rule.rule_id, rule.priority_class
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
push_rules
|
||||
}
|
||||
|
||||
/// Returns the list of all rules, including base rules, in the order they
|
||||
/// should be executed in.
|
||||
fn rules(&self) -> Vec<PushRule> {
|
||||
self.iter().cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl PushRules {
|
||||
/// Iterates over all the rules, including base rules, in the order they
|
||||
/// should be executed in.
|
||||
pub fn iter(&self) -> impl Iterator<Item = &PushRule> {
|
||||
base_rules::BASE_PREPEND_OVERRIDE_RULES
|
||||
.iter()
|
||||
.chain(self.override_rules.iter())
|
||||
.chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter())
|
||||
.chain(self.content.iter())
|
||||
.chain(base_rules::BASE_APPEND_CONTENT_RULES.iter())
|
||||
.chain(self.room.iter())
|
||||
.chain(self.sender.iter())
|
||||
.chain(self.underride.iter())
|
||||
.chain(base_rules::BASE_APPEND_UNDERRIDE_RULES.iter())
|
||||
.map(|rule| {
|
||||
self.overridden_base_rules
|
||||
.get(&*rule.rule_id)
|
||||
.unwrap_or(rule)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around `PushRules` that checks the enabled state of rules and
|
||||
/// filters out disabled experimental rules.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[pyclass(frozen)]
|
||||
pub struct FilteredPushRules {
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
}
|
||||
|
||||
#[pymethods]
|
||||
impl FilteredPushRules {
|
||||
#[new]
|
||||
pub fn py_new(
|
||||
push_rules: PushRules,
|
||||
enabled_map: BTreeMap<String, bool>,
|
||||
msc3664_enabled: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
push_rules,
|
||||
enabled_map,
|
||||
msc3664_enabled,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the list of all rules and their enabled state, including base
|
||||
/// rules, in the order they should be executed in.
|
||||
fn rules(&self) -> Vec<(PushRule, bool)> {
|
||||
self.iter().map(|(r, e)| (r.clone(), e)).collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl FilteredPushRules {
|
||||
/// Iterates over all the rules and their enabled state, including base
|
||||
/// rules, in the order they should be executed in.
|
||||
fn iter(&self) -> impl Iterator<Item = (&PushRule, bool)> {
|
||||
self.push_rules
|
||||
.iter()
|
||||
.filter(|rule| {
|
||||
// Ignore disabled experimental push rules
|
||||
if !self.msc3664_enabled
|
||||
&& rule.rule_id == "global/override/.im.nheko.msc3664.reply"
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
})
|
||||
.map(|r| {
|
||||
let enabled = *self
|
||||
.enabled_map
|
||||
.get(&*r.rule_id)
|
||||
.unwrap_or(&r.default_enabled);
|
||||
(r, enabled)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_condition() {
|
||||
let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
|
||||
key: "content.body".into(),
|
||||
pattern: Some("coffee".into()),
|
||||
pattern_type: None,
|
||||
}));
|
||||
|
||||
let json = serde_json::to_string(&condition).unwrap();
|
||||
assert_eq!(
|
||||
json,
|
||||
r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_condition() {
|
||||
let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#;
|
||||
|
||||
let _: Condition = serde_json::from_str(json).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_unstable_msc3664_condition() {
|
||||
let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern":"coffee","rel_type":"m.in_reply_to"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(
|
||||
condition,
|
||||
Condition::Known(KnownCondition::RelatedEventMatch(_))
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_custom_condition() {
|
||||
let json = r#"{"kind":"custom_tag"}"#;
|
||||
|
||||
let condition: Condition = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(condition, Condition::Unknown(_)));
|
||||
|
||||
let new_json = serde_json::to_string(&condition).unwrap();
|
||||
assert_eq!(json, new_json);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_action() {
|
||||
let _: Action = serde_json::from_str(r#""notify""#).unwrap();
|
||||
let _: Action = serde_json::from_str(r#""dont_notify""#).unwrap();
|
||||
let _: Action = serde_json::from_str(r#""coalesce""#).unwrap();
|
||||
let _: Action = serde_json::from_str(r#"{"set_tweak": "highlight"}"#).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_action() {
|
||||
let json = r#"{"some_custom":"action_fields"}"#;
|
||||
|
||||
let action: Action = serde_json::from_str(json).unwrap();
|
||||
assert!(matches!(action, Action::Unknown(_)));
|
||||
|
||||
let new_json = serde_json::to_string(&action).unwrap();
|
||||
assert_eq!(json, new_json);
|
||||
}
|
||||
215
rust/src/push/utils.rs
Normal file
215
rust/src/push/utils.rs
Normal file
@@ -0,0 +1,215 @@
|
||||
// Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use anyhow::bail;
|
||||
use anyhow::Context;
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use regex;
|
||||
use regex::Regex;
|
||||
use regex::RegexBuilder;
|
||||
|
||||
lazy_static! {
|
||||
/// Matches runs of non-wildcard characters followed by wildcard characters.
|
||||
static ref WILDCARD_RUN: Regex = Regex::new(r"([^\?\*]*)([\?\*]*)").expect("valid regex");
|
||||
}
|
||||
|
||||
/// Extract the localpart from a Matrix style ID
|
||||
pub(crate) fn get_localpart_from_id(id: &str) -> Result<&str, Error> {
|
||||
let (localpart, _) = id
|
||||
.split_once(':')
|
||||
.with_context(|| format!("ID does not contain colon: {id}"))?;
|
||||
|
||||
// We need to strip off the first character, which is the ID type.
|
||||
if localpart.is_empty() {
|
||||
bail!("Invalid ID {id}");
|
||||
}
|
||||
|
||||
Ok(&localpart[1..])
|
||||
}
|
||||
|
||||
/// Used by `glob_to_regex` to specify what to match the regex against.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum GlobMatchType {
|
||||
/// The generated regex will match against the entire input.
|
||||
Whole,
|
||||
/// The generated regex will match against words.
|
||||
Word,
|
||||
}
|
||||
|
||||
/// Convert a "glob" style expression to a regex, anchoring either to the entire
|
||||
/// input or to individual words.
|
||||
pub fn glob_to_regex(glob: &str, match_type: GlobMatchType) -> Result<Regex, Error> {
|
||||
let mut chunks = Vec::new();
|
||||
|
||||
// Patterns with wildcards must be simplified to avoid performance cliffs
|
||||
// - The glob `?**?**?` is equivalent to the glob `???*`
|
||||
// - The glob `???*` is equivalent to the regex `.{3,}`
|
||||
for captures in WILDCARD_RUN.captures_iter(glob) {
|
||||
if let Some(chunk) = captures.get(1) {
|
||||
chunks.push(regex::escape(chunk.as_str()));
|
||||
}
|
||||
|
||||
if let Some(wildcards) = captures.get(2) {
|
||||
if wildcards.as_str() == "" {
|
||||
continue;
|
||||
}
|
||||
|
||||
let question_marks = wildcards.as_str().chars().filter(|c| *c == '?').count();
|
||||
|
||||
if wildcards.as_str().contains('*') {
|
||||
chunks.push(format!(".{{{question_marks},}}"));
|
||||
} else {
|
||||
chunks.push(format!(".{{{question_marks}}}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let joined = chunks.join("");
|
||||
|
||||
let regex_str = match match_type {
|
||||
GlobMatchType::Whole => format!(r"\A{joined}\z"),
|
||||
|
||||
// `^|\W` and `\W|$` handle the case where `pattern` starts or ends with a non-word
|
||||
// character.
|
||||
GlobMatchType::Word => format!(r"(?:^|\b|\W){joined}(?:\b|\W|$)"),
|
||||
};
|
||||
|
||||
Ok(RegexBuilder::new(®ex_str)
|
||||
.case_insensitive(true)
|
||||
.build()?)
|
||||
}
|
||||
|
||||
/// Compiles the glob into a `Matcher`.
|
||||
pub fn get_glob_matcher(glob: &str, match_type: GlobMatchType) -> Result<Matcher, Error> {
|
||||
// There are a number of shortcuts we can make if the glob doesn't contain a
|
||||
// wild card.
|
||||
let matcher = if glob.contains(['*', '?']) {
|
||||
let regex = glob_to_regex(glob, match_type)?;
|
||||
Matcher::Regex(regex)
|
||||
} else if match_type == GlobMatchType::Whole {
|
||||
// If there aren't any wildcards and we're matching the whole thing,
|
||||
// then we simply can do a case-insensitive string match.
|
||||
Matcher::Whole(glob.to_lowercase())
|
||||
} else {
|
||||
// Otherwise, if we're matching against words then can first check
|
||||
// if the haystack contains the glob at all.
|
||||
Matcher::Word {
|
||||
word: glob.to_lowercase(),
|
||||
regex: None,
|
||||
}
|
||||
};
|
||||
|
||||
Ok(matcher)
|
||||
}
|
||||
|
||||
/// Matches against a glob
|
||||
pub enum Matcher {
|
||||
/// Plain regex matching.
|
||||
Regex(Regex),
|
||||
|
||||
/// Case-insensitive equality.
|
||||
Whole(String),
|
||||
|
||||
/// Word matching. `regex` is a cache of calling [`glob_to_regex`] on word.
|
||||
Word { word: String, regex: Option<Regex> },
|
||||
}
|
||||
|
||||
impl Matcher {
|
||||
/// Checks if the glob matches the given haystack.
|
||||
pub fn is_match(&mut self, haystack: &str) -> Result<bool, Error> {
|
||||
// We want to to do case-insensitive matching, so we convert to
|
||||
// lowercase first.
|
||||
let haystack = haystack.to_lowercase();
|
||||
|
||||
match self {
|
||||
Matcher::Regex(regex) => Ok(regex.is_match(&haystack)),
|
||||
Matcher::Whole(whole) => Ok(whole == &haystack),
|
||||
Matcher::Word { word, regex } => {
|
||||
// If we're looking for a literal word, then we first check if
|
||||
// the haystack contains the word as a substring.
|
||||
if !haystack.contains(&*word) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// If it does contain the word as a substring, then we need to
|
||||
// check if it is an actual word by testing it against the regex.
|
||||
let regex = if let Some(regex) = regex {
|
||||
regex
|
||||
} else {
|
||||
let compiled_regex = glob_to_regex(word, GlobMatchType::Word)?;
|
||||
regex.insert(compiled_regex)
|
||||
};
|
||||
|
||||
Ok(regex.is_match(&haystack))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_domain_from_id() {
|
||||
get_localpart_from_id("").unwrap_err();
|
||||
get_localpart_from_id(":").unwrap_err();
|
||||
get_localpart_from_id(":asd").unwrap_err();
|
||||
get_localpart_from_id("::as::asad").unwrap_err();
|
||||
|
||||
assert_eq!(get_localpart_from_id("@test:foo").unwrap(), "test");
|
||||
assert_eq!(get_localpart_from_id("@:").unwrap(), "");
|
||||
assert_eq!(get_localpart_from_id("@test:foo:907").unwrap(), "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tset_glob() -> Result<(), Error> {
|
||||
assert_eq!(
|
||||
glob_to_regex("simple", GlobMatchType::Whole)?.as_str(),
|
||||
r"\Asimple\z"
|
||||
);
|
||||
assert_eq!(
|
||||
glob_to_regex("simple*", GlobMatchType::Whole)?.as_str(),
|
||||
r"\Asimple.{0,}\z"
|
||||
);
|
||||
assert_eq!(
|
||||
glob_to_regex("simple?", GlobMatchType::Whole)?.as_str(),
|
||||
r"\Asimple.{1}\z"
|
||||
);
|
||||
assert_eq!(
|
||||
glob_to_regex("simple?*?*", GlobMatchType::Whole)?.as_str(),
|
||||
r"\Asimple.{2,}\z"
|
||||
);
|
||||
assert_eq!(
|
||||
glob_to_regex("simple???", GlobMatchType::Whole)?.as_str(),
|
||||
r"\Asimple.{3}\z"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
glob_to_regex("escape.", GlobMatchType::Whole)?.as_str(),
|
||||
r"\Aescape\.\z"
|
||||
);
|
||||
|
||||
assert!(glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simple"));
|
||||
assert!(!glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simples"));
|
||||
assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simples"));
|
||||
assert!(glob_to_regex("simple?", GlobMatchType::Whole)?.is_match("simples"));
|
||||
assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simple"));
|
||||
|
||||
assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("some simple."));
|
||||
assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("simple"));
|
||||
assert!(!glob_to_regex("simple", GlobMatchType::Word)?.is_match("simples"));
|
||||
|
||||
assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("Some @user:foo test"));
|
||||
assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("@user:foo"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -88,10 +88,9 @@ def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
|
||||
|
||||
@functools.wraps(factory)
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
# type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668
|
||||
if "strict" not in kwargs: # type: ignore[attr-defined]
|
||||
if "strict" not in kwargs:
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
if not kwargs["strict"]: # type: ignore[index]
|
||||
if not kwargs["strict"]:
|
||||
raise MissingStrictInConstrainedTypeException(factory.__name__)
|
||||
return factory(*args, **kwargs)
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
|
||||
test_tags="synapse_blacklist,msc3787"
|
||||
|
||||
# All environment variables starting with PASS_ will be shared.
|
||||
# (The prefix is stripped off before reaching the container.)
|
||||
@@ -158,7 +158,10 @@ else
|
||||
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
test_tags="$test_tags,faster_joins"
|
||||
#
|
||||
# The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
|
||||
# also only pass with monoliths, currently.
|
||||
test_tags="$test_tags,faster_joins,msc2716,msc3030"
|
||||
fi
|
||||
|
||||
|
||||
|
||||
@@ -2,23 +2,16 @@
|
||||
#
|
||||
# This script generates SQL files for creating a brand new Synapse DB with the latest
|
||||
# schema, on both SQLite3 and Postgres.
|
||||
#
|
||||
# It does so by having Synapse generate an up-to-date SQLite DB, then running
|
||||
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
|
||||
|
||||
export PGHOST="localhost"
|
||||
POSTGRES_DB_NAME="synapse_full_schema.$$"
|
||||
|
||||
SQLITE_SCHEMA_FILE="schema.sql.sqlite"
|
||||
SQLITE_ROWS_FILE="rows.sql.sqlite"
|
||||
POSTGRES_SCHEMA_FILE="full.sql.postgres"
|
||||
POSTGRES_ROWS_FILE="rows.sql.postgres"
|
||||
|
||||
POSTGRES_MAIN_DB_NAME="synapse_full_schema_main.$$"
|
||||
POSTGRES_COMMON_DB_NAME="synapse_full_schema_common.$$"
|
||||
POSTGRES_STATE_DB_NAME="synapse_full_schema_state.$$"
|
||||
REQUIRED_DEPS=("matrix-synapse" "psycopg2")
|
||||
|
||||
usage() {
|
||||
echo
|
||||
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]"
|
||||
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n <schema number>] [-h]"
|
||||
echo
|
||||
echo "-p <postgres_username>"
|
||||
echo " Username to connect to local postgres instance. The password will be requested"
|
||||
@@ -27,11 +20,19 @@ usage() {
|
||||
echo " CI mode. Prints every command that the script runs."
|
||||
echo "-o <path>"
|
||||
echo " Directory to output full schema files to."
|
||||
echo "-n <schema number>"
|
||||
echo " Schema number for the new snapshot. Used to set the location of files within "
|
||||
echo " the output directory, mimicking that of synapse/storage/schemas."
|
||||
echo " Defaults to 9999."
|
||||
echo "-h"
|
||||
echo " Display this help text."
|
||||
echo ""
|
||||
echo " NB: make sure to run this against the *oldest* supported version of postgres,"
|
||||
echo " or else pg_dump might output non-backwards-compatible syntax."
|
||||
}
|
||||
|
||||
while getopts "p:co:h" opt; do
|
||||
SCHEMA_NUMBER="9999"
|
||||
while getopts "p:co:hn:" opt; do
|
||||
case $opt in
|
||||
p)
|
||||
export PGUSER=$OPTARG
|
||||
@@ -48,6 +49,9 @@ while getopts "p:co:h" opt; do
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
n)
|
||||
SCHEMA_NUMBER="$OPTARG"
|
||||
;;
|
||||
\?)
|
||||
echo "ERROR: Invalid option: -$OPTARG" >&2
|
||||
usage
|
||||
@@ -95,12 +99,21 @@ cd "$(dirname "$0")/.."
|
||||
TMPDIR=$(mktemp -d)
|
||||
KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
|
||||
SQLITE_CONFIG=$TMPDIR/sqlite.conf
|
||||
SQLITE_DB=$TMPDIR/homeserver.db
|
||||
SQLITE_MAIN_DB=$TMPDIR/main.db
|
||||
SQLITE_STATE_DB=$TMPDIR/state.db
|
||||
SQLITE_COMMON_DB=$TMPDIR/common.db
|
||||
POSTGRES_CONFIG=$TMPDIR/postgres.conf
|
||||
|
||||
# Ensure these files are delete on script exit
|
||||
# TODO: the trap should also drop the temp postgres DB
|
||||
trap 'rm -rf $TMPDIR' EXIT
|
||||
cleanup() {
|
||||
echo "Cleaning up temporary sqlite database and config files..."
|
||||
rm -r "$TMPDIR"
|
||||
echo "Cleaning up temporary Postgres database..."
|
||||
dropdb --if-exists "$POSTGRES_COMMON_DB_NAME"
|
||||
dropdb --if-exists "$POSTGRES_MAIN_DB_NAME"
|
||||
dropdb --if-exists "$POSTGRES_STATE_DB_NAME"
|
||||
}
|
||||
trap 'cleanup' EXIT
|
||||
|
||||
cat > "$SQLITE_CONFIG" <<EOF
|
||||
server_name: "test"
|
||||
@@ -110,10 +123,22 @@ macaroon_secret_key: "abcde"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: "$SQLITE_DB"
|
||||
databases:
|
||||
common:
|
||||
name: "sqlite3"
|
||||
data_stores: []
|
||||
args:
|
||||
database: "$SQLITE_COMMON_DB"
|
||||
main:
|
||||
name: "sqlite3"
|
||||
data_stores: ["main"]
|
||||
args:
|
||||
database: "$SQLITE_MAIN_DB"
|
||||
state:
|
||||
name: "sqlite3"
|
||||
data_stores: ["state"]
|
||||
args:
|
||||
database: "$SQLITE_STATE_DB"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
@@ -127,13 +152,32 @@ macaroon_secret_key: "abcde"
|
||||
|
||||
report_stats: false
|
||||
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
database: "$POSTGRES_DB_NAME"
|
||||
databases:
|
||||
common:
|
||||
name: "psycopg2"
|
||||
data_stores: []
|
||||
args:
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
database: "$POSTGRES_COMMON_DB_NAME"
|
||||
main:
|
||||
name: "psycopg2"
|
||||
data_stores: ["main"]
|
||||
args:
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
database: "$POSTGRES_MAIN_DB_NAME"
|
||||
state:
|
||||
name: "psycopg2"
|
||||
data_stores: ["state"]
|
||||
args:
|
||||
user: "$PGUSER"
|
||||
host: "$PGHOST"
|
||||
password: "$PGPASSWORD"
|
||||
database: "$POSTGRES_STATE_DB_NAME"
|
||||
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers: []
|
||||
@@ -148,33 +192,105 @@ echo "Running db background jobs..."
|
||||
synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
|
||||
echo "Creating postgres databases..."
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_COMMON_DB_NAME"
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAME"
|
||||
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME"
|
||||
|
||||
echo "Running db background jobs..."
|
||||
synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates
|
||||
|
||||
|
||||
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
|
||||
# Also delete any shadow tables from fts4
|
||||
echo "Dropping unwanted db tables..."
|
||||
SQL="
|
||||
|
||||
# Some common tables are created and updated by Synapse itself and do not belong in the
|
||||
# schema.
|
||||
DROP_APP_MANAGED_TABLES="
|
||||
DROP TABLE schema_version;
|
||||
DROP TABLE schema_compat_version;
|
||||
DROP TABLE applied_schema_deltas;
|
||||
DROP TABLE applied_module_schemas;
|
||||
"
|
||||
sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
|
||||
# Other common tables are not created by Synapse and do belong in the schema.
|
||||
# TODO: we could derive DROP_COMMON_TABLES from the dump of the common-only DB. But
|
||||
# since there's only one table there, I haven't bothered to do so.
|
||||
DROP_COMMON_TABLES="$DROP_APP_MANAGED_TABLES
|
||||
DROP TABLE background_updates;
|
||||
"
|
||||
|
||||
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_SCHEMA_FILE' and '$OUTPUT_DIR/$SQLITE_ROWS_FILE'..."
|
||||
sqlite3 "$SQLITE_DB" ".schema --indent" > "$OUTPUT_DIR/$SQLITE_SCHEMA_FILE"
|
||||
sqlite3 "$SQLITE_DB" ".dump --data-only --nosys" > "$OUTPUT_DIR/$SQLITE_ROWS_FILE"
|
||||
sqlite3 "$SQLITE_COMMON_DB" <<< "$DROP_APP_MANAGED_TABLES"
|
||||
sqlite3 "$SQLITE_MAIN_DB" <<< "$DROP_COMMON_TABLES"
|
||||
sqlite3 "$SQLITE_STATE_DB" <<< "$DROP_COMMON_TABLES"
|
||||
psql "$POSTGRES_COMMON_DB_NAME" -w <<< "$DROP_APP_MANAGED_TABLES"
|
||||
psql "$POSTGRES_MAIN_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
|
||||
psql "$POSTGRES_STATE_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
|
||||
|
||||
echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE' and '$OUTPUT_DIR/$POSTGRES_ROWS_FILE'..."
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE"
|
||||
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_ROWS_FILE"
|
||||
# For Reasons(TM), SQLite's `.schema` also dumps out "shadow tables", the implementation
|
||||
# details behind full text search tables. Omit these from the dumps.
|
||||
|
||||
echo "Cleaning up temporary Postgres database..."
|
||||
dropdb $POSTGRES_DB_NAME
|
||||
sqlite3 "$SQLITE_MAIN_DB" <<< "
|
||||
DROP TABLE event_search_content;
|
||||
DROP TABLE event_search_segments;
|
||||
DROP TABLE event_search_segdir;
|
||||
DROP TABLE event_search_docsize;
|
||||
DROP TABLE event_search_stat;
|
||||
DROP TABLE user_directory_search_content;
|
||||
DROP TABLE user_directory_search_segments;
|
||||
DROP TABLE user_directory_search_segdir;
|
||||
DROP TABLE user_directory_search_docsize;
|
||||
DROP TABLE user_directory_search_stat;
|
||||
"
|
||||
|
||||
echo "Dumping SQLite3 schema..."
|
||||
|
||||
mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schemas/$SCHEMA_NUMBER"
|
||||
sqlite3 "$SQLITE_COMMON_DB" ".schema" > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
|
||||
sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
|
||||
sqlite3 "$SQLITE_MAIN_DB" ".schema" > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
|
||||
sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
|
||||
sqlite3 "$SQLITE_STATE_DB" ".schema" > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
|
||||
sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
|
||||
|
||||
cleanup_pg_schema() {
|
||||
# Cleanup as follows:
|
||||
# - Remove empty lines. pg_dump likes to output a lot of these.
|
||||
# - Remove comment-only lines. pg_dump also likes to output a lot of these to visually
|
||||
# separate tables etc.
|
||||
# - Remove "public." prefix --- the schema name.
|
||||
# - Remove "SET" commands. Last time I ran this, the output commands were
|
||||
# SET statement_timeout = 0;
|
||||
# SET lock_timeout = 0;
|
||||
# SET idle_in_transaction_session_timeout = 0;
|
||||
# SET client_encoding = 'UTF8';
|
||||
# SET standard_conforming_strings = on;
|
||||
# SET check_function_bodies = false;
|
||||
# SET xmloption = content;
|
||||
# SET client_min_messages = warning;
|
||||
# SET row_security = off;
|
||||
# SET default_table_access_method = heap;
|
||||
# - Very carefully remove specific SELECT statements. We CANNOT blanket remove all
|
||||
# SELECT statements because some of those have side-effects which we do want in the
|
||||
# schema. Last time I ran this, the only SELECTS were
|
||||
# SELECT pg_catalog.set_config('search_path', '', false);
|
||||
# and
|
||||
# SELECT pg_catalog.setval(text, bigint, bool);
|
||||
# We do want to remove the former, but the latter is important. If the last argument
|
||||
# is `true` or omitted, this marks the given integer as having been consumed and
|
||||
# will NOT appear as the nextval.
|
||||
sed -e '/^$/d' \
|
||||
-e '/^--/d' \
|
||||
-e 's/public\.//g' \
|
||||
-e '/^SET /d' \
|
||||
-e '/^SELECT pg_catalog.set_config/d'
|
||||
}
|
||||
|
||||
echo "Dumping Postgres schema..."
|
||||
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
|
||||
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
|
||||
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
|
||||
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
|
||||
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
|
||||
|
||||
echo "Done! Files dumped to: $OUTPUT_DIR"
|
||||
|
||||
@@ -29,7 +29,7 @@ class SynapsePlugin(Plugin):
|
||||
self, fullname: str
|
||||
) -> Optional[Callable[[MethodSigContext], CallableType]]:
|
||||
if fullname.startswith(
|
||||
"synapse.util.caches.descriptors._CachedFunction.__call__"
|
||||
"synapse.util.caches.descriptors.CachedFunction.__call__"
|
||||
) or fullname.startswith(
|
||||
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
|
||||
):
|
||||
@@ -38,7 +38,7 @@ class SynapsePlugin(Plugin):
|
||||
|
||||
|
||||
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
"""Fixes the `_CachedFunction.__call__` signature to be correct.
|
||||
"""Fixes the `CachedFunction.__call__` signature to be correct.
|
||||
|
||||
It already has *almost* the correct signature, except:
|
||||
|
||||
|
||||
50
stubs/synapse/synapse_rust/push.pyi
Normal file
50
stubs/synapse/synapse_rust/push.pyi
Normal file
@@ -0,0 +1,50 @@
|
||||
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
class PushRule:
|
||||
@property
|
||||
def rule_id(self) -> str: ...
|
||||
@property
|
||||
def priority_class(self) -> int: ...
|
||||
@property
|
||||
def conditions(self) -> Sequence[Mapping[str, str]]: ...
|
||||
@property
|
||||
def actions(self) -> Sequence[Union[Mapping[str, Any], str]]: ...
|
||||
@property
|
||||
def default(self) -> bool: ...
|
||||
@property
|
||||
def default_enabled(self) -> bool: ...
|
||||
@staticmethod
|
||||
def from_db(
|
||||
rule_id: str, priority_class: int, conditions: str, actions: str
|
||||
) -> "PushRule": ...
|
||||
|
||||
class PushRules:
|
||||
def __init__(self, rules: Collection[PushRule]): ...
|
||||
def rules(self) -> Collection[PushRule]: ...
|
||||
|
||||
class FilteredPushRules:
|
||||
def __init__(
|
||||
self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
|
||||
): ...
|
||||
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
|
||||
|
||||
def get_base_rule_ids() -> Collection[str]: ...
|
||||
|
||||
class PushRuleEvaluator:
|
||||
def __init__(
|
||||
self,
|
||||
flattened_keys: Mapping[str, str],
|
||||
room_member_count: int,
|
||||
sender_power_level: Optional[int],
|
||||
notification_power_levels: Mapping[str, int],
|
||||
related_events_flattened: Mapping[str, Mapping[str, str]],
|
||||
related_event_match_enabled: bool,
|
||||
): ...
|
||||
def run(
|
||||
self,
|
||||
push_rules: FilteredPushRules,
|
||||
user_id: Optional[str],
|
||||
display_name: Optional[str],
|
||||
) -> Collection[dict]: ...
|
||||
@@ -21,6 +21,7 @@ import os
|
||||
import sys
|
||||
|
||||
from synapse.util.rust import check_rust_lib_up_to_date
|
||||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
@@ -28,25 +29,22 @@ if sys.version_info < (3, 7):
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)):
|
||||
try:
|
||||
from incremental import Version
|
||||
if strtobool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", "0")):
|
||||
from incremental import Version
|
||||
|
||||
import twisted
|
||||
import twisted
|
||||
|
||||
# We need a bugfix that is included in Twisted 21.2.0:
|
||||
# https://twistedmatrix.com/trac/ticket/9787
|
||||
if twisted.version < Version("Twisted", 21, 2, 0):
|
||||
print("Using asyncio reactor requires Twisted>=21.2.0")
|
||||
sys.exit(1)
|
||||
# We need a bugfix that is included in Twisted 21.2.0:
|
||||
# https://twistedmatrix.com/trac/ticket/9787
|
||||
if twisted.version < Version("Twisted", 21, 2, 0):
|
||||
print("Using asyncio reactor requires Twisted>=21.2.0")
|
||||
sys.exit(1)
|
||||
|
||||
import asyncio
|
||||
import asyncio
|
||||
|
||||
from twisted.internet import asyncioreactor
|
||||
from twisted.internet import asyncioreactor
|
||||
|
||||
asyncioreactor.install(asyncio.get_event_loop())
|
||||
except ImportError:
|
||||
pass
|
||||
asyncioreactor.install(asyncio.get_event_loop())
|
||||
|
||||
# Twisted and canonicaljson will fail to import when this file is executed to
|
||||
# get the __version__ during a fresh install. That's OK and subsequent calls to
|
||||
|
||||
@@ -72,6 +72,7 @@ from synapse.storage.databases.main.registration import (
|
||||
RegistrationBackgroundUpdateStore,
|
||||
find_max_generated_user_id_localpart,
|
||||
)
|
||||
from synapse.storage.databases.main.relations import RelationsWorkerStore
|
||||
from synapse.storage.databases.main.room import RoomBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.search import SearchBackgroundUpdateStore
|
||||
@@ -107,10 +108,11 @@ BOOLEAN_COLUMNS = {
|
||||
"redactions": ["have_censored"],
|
||||
"room_stats_state": ["is_federatable"],
|
||||
"local_media_repository": ["safe_from_quarantine"],
|
||||
"users": ["shadow_banned"],
|
||||
"users": ["shadow_banned", "approved"],
|
||||
"e2e_fallback_keys_json": ["used"],
|
||||
"access_tokens": ["used"],
|
||||
"device_lists_changes_in_room": ["converted_to_destinations"],
|
||||
"pushers": ["enabled"],
|
||||
}
|
||||
|
||||
|
||||
@@ -205,6 +207,7 @@ class Store(
|
||||
PusherWorkerStore,
|
||||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
12
synapse/_scripts/update_synapse_database.py
Executable file → Normal file
12
synapse/_scripts/update_synapse_database.py
Executable file → Normal file
@@ -15,7 +15,6 @@
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from typing import cast
|
||||
|
||||
import yaml
|
||||
@@ -48,10 +47,13 @@ class MockHomeserver(HomeServer):
|
||||
|
||||
|
||||
def run_background_updates(hs: HomeServer) -> None:
|
||||
store = hs.get_datastores().main
|
||||
main = hs.get_datastores().main
|
||||
state = hs.get_datastores().state
|
||||
|
||||
async def run_background_updates() -> None:
|
||||
await store.db_pool.updates.run_background_updates(sleep=False)
|
||||
await main.db_pool.updates.run_background_updates(sleep=False)
|
||||
if state:
|
||||
await state.db_pool.updates.run_background_updates(sleep=False)
|
||||
# Stop the reactor to exit the script once every background update is run.
|
||||
reactor.stop()
|
||||
|
||||
@@ -97,10 +99,6 @@ def main() -> None:
|
||||
# Load, process and sanity-check the config.
|
||||
hs_config = yaml.safe_load(args.database_config)
|
||||
|
||||
if "database" not in hs_config:
|
||||
sys.stderr.write("The configuration file must have a 'database' section.\n")
|
||||
sys.exit(4)
|
||||
|
||||
config = HomeServerConfig()
|
||||
config.parse_config_dict(hs_config, "", "")
|
||||
|
||||
|
||||
@@ -31,6 +31,9 @@ MAX_ALIAS_LENGTH = 255
|
||||
# the maximum length for a user id is 255 characters
|
||||
MAX_USERID_LENGTH = 255
|
||||
|
||||
# Constant value used for the pseudo-thread which is the main timeline.
|
||||
MAIN_TIMELINE: Final = "main"
|
||||
|
||||
|
||||
class Membership:
|
||||
|
||||
@@ -269,3 +272,14 @@ class PublicRoomsFilterFields:
|
||||
|
||||
GENERIC_SEARCH_TERM: Final = "generic_search_term"
|
||||
ROOM_TYPES: Final = "room_types"
|
||||
|
||||
|
||||
class ApprovalNoticeMedium:
|
||||
"""Identifier for the medium this server will use to serve notice of approval for a
|
||||
specific user's registration.
|
||||
|
||||
As defined in https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/m_not_approved/proposals/3866-user-not-approved-error.md
|
||||
"""
|
||||
|
||||
NONE = "org.matrix.msc3866.none"
|
||||
EMAIL = "org.matrix.msc3866.email"
|
||||
|
||||
@@ -100,6 +100,14 @@ class Codes(str, Enum):
|
||||
|
||||
UNREDACTED_CONTENT_DELETED = "FI.MAU.MSC2815_UNREDACTED_CONTENT_DELETED"
|
||||
|
||||
# Returned for federation requests where we can't process a request as we
|
||||
# can't ensure the sending server is in a room which is partial-stated on
|
||||
# our side.
|
||||
# Part of MSC3895.
|
||||
UNABLE_DUE_TO_PARTIAL_STATE = "ORG.MATRIX.MSC3895_UNABLE_DUE_TO_PARTIAL_STATE"
|
||||
|
||||
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
"""An exception with integer code and message string attributes.
|
||||
@@ -147,7 +155,13 @@ class RedirectException(CodeMessageException):
|
||||
|
||||
class SynapseError(CodeMessageException):
|
||||
"""A base exception type for matrix errors which have an errcode and error
|
||||
message (as well as an HTTP status code).
|
||||
message (as well as an HTTP status code). These often bubble all the way up to the
|
||||
client API response so the error code and status often reach the client directly as
|
||||
defined here. If the error doesn't make sense to present to a client, then it
|
||||
probably shouldn't be a `SynapseError`. For example, if we contact another
|
||||
homeserver over federation, we shouldn't automatically ferry response errors back to
|
||||
the client on our end (a 500 from a remote server does not make sense to a client
|
||||
when our server did not experience a 500).
|
||||
|
||||
Attributes:
|
||||
errcode: Matrix error code e.g 'M_FORBIDDEN'
|
||||
@@ -560,6 +574,20 @@ class UnredactedContentDeletedError(SynapseError):
|
||||
return cs_error(self.msg, self.errcode, **extra)
|
||||
|
||||
|
||||
class NotApprovedError(SynapseError):
|
||||
def __init__(
|
||||
self,
|
||||
msg: str,
|
||||
approval_notice_medium: str,
|
||||
):
|
||||
super().__init__(
|
||||
code=403,
|
||||
msg=msg,
|
||||
errcode=Codes.USER_AWAITING_APPROVAL,
|
||||
additional_fields={"approval_notice_medium": approval_notice_medium},
|
||||
)
|
||||
|
||||
|
||||
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
|
||||
"""Utility method for constructing an error response for client-server
|
||||
interactions.
|
||||
@@ -578,8 +606,20 @@ def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
|
||||
|
||||
|
||||
class FederationError(RuntimeError):
|
||||
"""This class is used to inform remote homeservers about erroneous
|
||||
PDUs they sent us.
|
||||
"""
|
||||
Raised when we process an erroneous PDU.
|
||||
|
||||
There are two kinds of scenarios where this exception can be raised:
|
||||
|
||||
1. We may pull an invalid PDU from a remote homeserver (e.g. during backfill). We
|
||||
raise this exception to signal an error to the rest of the application.
|
||||
2. We may be pushed an invalid PDU as part of a `/send` transaction from a remote
|
||||
homeserver. We raise so that we can respond to the transaction and include the
|
||||
error string in the "PDU Processing Result". The message which will likely be
|
||||
ignored by the remote homeserver and is not machine parse-able since it's just a
|
||||
string.
|
||||
|
||||
TODO: In the future, we should split these usage scenarios into their own error types.
|
||||
|
||||
FATAL: The remote server could not interpret the source event.
|
||||
(e.g., it was missing a required field)
|
||||
@@ -618,6 +658,27 @@ class FederationError(RuntimeError):
|
||||
}
|
||||
|
||||
|
||||
class FederationPullAttemptBackoffError(RuntimeError):
|
||||
"""
|
||||
Raised to indicate that we are are deliberately not attempting to pull the given
|
||||
event over federation because we've already done so recently and are backing off.
|
||||
|
||||
Attributes:
|
||||
event_id: The event_id which we are refusing to pull
|
||||
message: A custom error message that gives more context
|
||||
"""
|
||||
|
||||
def __init__(self, event_ids: List[str], message: Optional[str]):
|
||||
self.event_ids = event_ids
|
||||
|
||||
if message:
|
||||
error_message = message
|
||||
else:
|
||||
error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)."
|
||||
|
||||
super().__init__(error_message)
|
||||
|
||||
|
||||
class HttpResponseException(CodeMessageException):
|
||||
"""
|
||||
Represents an HTTP-level failure of an outbound request
|
||||
|
||||
@@ -36,7 +36,7 @@ from jsonschema import FormatChecker
|
||||
from synapse.api.constants import EduTypes, EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
from synapse.events import EventBase, relation_from_event
|
||||
from synapse.types import JsonDict, RoomID, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -53,6 +53,12 @@ FILTER_SCHEMA = {
|
||||
# check types are valid event types
|
||||
"types": {"type": "array", "items": {"type": "string"}},
|
||||
"not_types": {"type": "array", "items": {"type": "string"}},
|
||||
# MSC3874, filtering /messages.
|
||||
"org.matrix.msc3874.rel_types": {"type": "array", "items": {"type": "string"}},
|
||||
"org.matrix.msc3874.not_rel_types": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -84,6 +90,8 @@ ROOM_EVENT_FILTER_SCHEMA = {
|
||||
"contains_url": {"type": "boolean"},
|
||||
"lazy_load_members": {"type": "boolean"},
|
||||
"include_redundant_members": {"type": "boolean"},
|
||||
"unread_thread_notifications": {"type": "boolean"},
|
||||
"org.matrix.msc3773.unread_thread_notifications": {"type": "boolean"},
|
||||
# Include or exclude events with the provided labels.
|
||||
# cf https://github.com/matrix-org/matrix-doc/pull/2326
|
||||
"org.matrix.labels": {"type": "array", "items": {"type": "string"}},
|
||||
@@ -240,6 +248,9 @@ class FilterCollection:
|
||||
def include_redundant_members(self) -> bool:
|
||||
return self._room_state_filter.include_redundant_members
|
||||
|
||||
def unread_thread_notifications(self) -> bool:
|
||||
return self._room_timeline_filter.unread_thread_notifications
|
||||
|
||||
async def filter_presence(
|
||||
self, events: Iterable[UserPresenceState]
|
||||
) -> List[UserPresenceState]:
|
||||
@@ -304,6 +315,16 @@ class Filter:
|
||||
self.include_redundant_members = filter_json.get(
|
||||
"include_redundant_members", False
|
||||
)
|
||||
self.unread_thread_notifications: bool = filter_json.get(
|
||||
"unread_thread_notifications", False
|
||||
)
|
||||
if (
|
||||
not self.unread_thread_notifications
|
||||
and hs.config.experimental.msc3773_enabled
|
||||
):
|
||||
self.unread_thread_notifications = filter_json.get(
|
||||
"org.matrix.msc3773.unread_thread_notifications", False
|
||||
)
|
||||
|
||||
self.types = filter_json.get("types", None)
|
||||
self.not_types = filter_json.get("not_types", [])
|
||||
@@ -319,8 +340,15 @@ class Filter:
|
||||
self.labels = filter_json.get("org.matrix.labels", None)
|
||||
self.not_labels = filter_json.get("org.matrix.not_labels", [])
|
||||
|
||||
self.related_by_senders = self.filter_json.get("related_by_senders", None)
|
||||
self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None)
|
||||
self.related_by_senders = filter_json.get("related_by_senders", None)
|
||||
self.related_by_rel_types = filter_json.get("related_by_rel_types", None)
|
||||
|
||||
# For compatibility with _check_fields.
|
||||
self.rel_types = None
|
||||
self.not_rel_types = []
|
||||
if hs.config.experimental.msc3874_enabled:
|
||||
self.rel_types = filter_json.get("org.matrix.msc3874.rel_types", None)
|
||||
self.not_rel_types = filter_json.get("org.matrix.msc3874.not_rel_types", [])
|
||||
|
||||
def filters_all_types(self) -> bool:
|
||||
return "*" in self.not_types
|
||||
@@ -371,11 +399,19 @@ class Filter:
|
||||
# check if there is a string url field in the content for filtering purposes
|
||||
labels = content.get(EventContentFields.LABELS, [])
|
||||
|
||||
# Check if the event has a relation.
|
||||
rel_type = None
|
||||
if isinstance(event, EventBase):
|
||||
relation = relation_from_event(event)
|
||||
if relation:
|
||||
rel_type = relation.rel_type
|
||||
|
||||
field_matchers = {
|
||||
"rooms": lambda v: room_id == v,
|
||||
"senders": lambda v: sender == v,
|
||||
"types": lambda v: _matches_wildcard(ev_type, v),
|
||||
"labels": lambda v: v in labels,
|
||||
"rel_types": lambda v: rel_type == v,
|
||||
}
|
||||
|
||||
result = self._check_fields(field_matchers)
|
||||
|
||||
@@ -343,6 +343,7 @@ class RequestRatelimiter:
|
||||
requester: Requester,
|
||||
update: bool = True,
|
||||
is_admin_redaction: bool = False,
|
||||
n_actions: int = 1,
|
||||
) -> None:
|
||||
"""Ratelimits requests.
|
||||
|
||||
@@ -355,6 +356,8 @@ class RequestRatelimiter:
|
||||
is_admin_redaction: Whether this is a room admin/moderator
|
||||
redacting an event. If so then we may apply different
|
||||
ratelimits depending on config.
|
||||
n_actions: Multiplier for the number of actions to apply to the
|
||||
rate limiter at once.
|
||||
|
||||
Raises:
|
||||
LimitExceededError if the request should be ratelimited
|
||||
@@ -383,7 +386,9 @@ class RequestRatelimiter:
|
||||
if is_admin_redaction and self.admin_redaction_ratelimiter:
|
||||
# If we have separate config for admin redactions, use a separate
|
||||
# ratelimiter as to not have user_ids clash
|
||||
await self.admin_redaction_ratelimiter.ratelimit(requester, update=update)
|
||||
await self.admin_redaction_ratelimiter.ratelimit(
|
||||
requester, update=update, n_actions=n_actions
|
||||
)
|
||||
else:
|
||||
# Override rate and burst count per-user
|
||||
await self.request_ratelimiter.ratelimit(
|
||||
@@ -391,4 +396,5 @@ class RequestRatelimiter:
|
||||
rate_hz=messages_per_second,
|
||||
burst_count=burst_count,
|
||||
update=update,
|
||||
n_actions=n_actions,
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
|
||||
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
|
||||
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
|
||||
STATIC_PREFIX = "/_matrix/static"
|
||||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
SERVER_KEY_PREFIX = "/_matrix/key"
|
||||
MEDIA_R0_PREFIX = "/_matrix/media/r0"
|
||||
MEDIA_V3_PREFIX = "/_matrix/media/v3"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
@@ -98,9 +98,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
||||
func: Function to be called when sent a SIGHUP signal.
|
||||
*args, **kwargs: args and kwargs to be passed to the target function.
|
||||
"""
|
||||
# This type-ignore should be redundant once we use a mypy release with
|
||||
# https://github.com/python/mypy/pull/12668.
|
||||
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
|
||||
_sighup_callbacks.append((func, args, kwargs))
|
||||
|
||||
|
||||
def start_worker_reactor(
|
||||
@@ -560,7 +558,7 @@ def reload_cache_config(config: HomeServerConfig) -> None:
|
||||
logger.warning(f)
|
||||
else:
|
||||
logger.debug(
|
||||
"New cache config. Was:\n %s\nNow:\n",
|
||||
"New cache config. Was:\n %s\nNow:\n %s",
|
||||
previous_cache_config.__dict__,
|
||||
config.caches.__dict__,
|
||||
)
|
||||
|
||||
@@ -53,9 +53,9 @@ logger = logging.getLogger("synapse.app.admin_cmd")
|
||||
|
||||
class AdminCmdSlavedStore(
|
||||
SlavedFilteringStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedDeviceStore,
|
||||
TagsWorkerStore,
|
||||
DeviceInboxWorkerStore,
|
||||
AccountDataWorkerStore,
|
||||
|
||||
@@ -51,11 +51,18 @@ import argparse
|
||||
import importlib
|
||||
import itertools
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from typing import Any, Callable, List
|
||||
from types import FrameType
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
|
||||
# a list of the original signal handlers, before we installed our custom ones.
|
||||
# We restore these in our child processes.
|
||||
_original_signal_handlers: Dict[int, Any] = {}
|
||||
|
||||
|
||||
class ProxiedReactor:
|
||||
"""
|
||||
@@ -105,6 +112,11 @@ def _worker_entrypoint(
|
||||
|
||||
sys.argv = args
|
||||
|
||||
# reset the custom signal handlers that we installed, so that the children start
|
||||
# from a clean slate.
|
||||
for sig, handler in _original_signal_handlers.items():
|
||||
signal.signal(sig, handler)
|
||||
|
||||
from twisted.internet.epollreactor import EPollReactor
|
||||
|
||||
proxy_reactor._install_real_reactor(EPollReactor())
|
||||
@@ -167,13 +179,29 @@ def main() -> None:
|
||||
update_proc.join()
|
||||
print("===== PREPARED DATABASE =====", file=sys.stderr)
|
||||
|
||||
processes: List[multiprocessing.Process] = []
|
||||
|
||||
# Install signal handlers to propagate signals to all our children, so that they
|
||||
# shut down cleanly. This also inhibits our own exit, but that's good: we want to
|
||||
# wait until the children have exited.
|
||||
def handle_signal(signum: int, frame: Optional[FrameType]) -> None:
|
||||
print(
|
||||
f"complement_fork_starter: Caught signal {signum}. Stopping children.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
for p in processes:
|
||||
if p.pid:
|
||||
os.kill(p.pid, signum)
|
||||
|
||||
for sig in (signal.SIGINT, signal.SIGTERM):
|
||||
_original_signal_handlers[sig] = signal.signal(sig, handle_signal)
|
||||
|
||||
# At this point, we've imported all the main entrypoints for all the workers.
|
||||
# Now we basically just fork() out to create the workers we need.
|
||||
# Because we're using fork(), all the workers get a clone of this launcher's
|
||||
# memory space and don't need to repeat the work of loading the code!
|
||||
# Instead of using fork() directly, we use the multiprocessing library,
|
||||
# which uses fork() on Unix platforms.
|
||||
processes = []
|
||||
for (func, worker_args) in zip(worker_functions, args_by_worker):
|
||||
process = multiprocessing.Process(
|
||||
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
|
||||
|
||||
@@ -28,7 +28,7 @@ from synapse.api.urls import (
|
||||
LEGACY_MEDIA_PREFIX,
|
||||
MEDIA_R0_PREFIX,
|
||||
MEDIA_V3_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
SERVER_KEY_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
from synapse.app._base import (
|
||||
@@ -65,6 +65,7 @@ from synapse.rest.client import (
|
||||
push_rule,
|
||||
read_marker,
|
||||
receipts,
|
||||
relations,
|
||||
room,
|
||||
room_batch,
|
||||
room_keys,
|
||||
@@ -88,7 +89,7 @@ from synapse.rest.client.register import (
|
||||
RegistrationTokenValidityRestServlet,
|
||||
)
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
from synapse.rest.well_known import well_known_resource
|
||||
from synapse.server import HomeServer
|
||||
@@ -177,13 +178,13 @@ class KeyUploadServlet(RestServlet):
|
||||
# Proxy headers from the original request, such as the auth headers
|
||||
# (in case the access token is there) and the original IP /
|
||||
# User-Agent of the request.
|
||||
headers = {
|
||||
header: request.requestHeaders.getRawHeaders(header, [])
|
||||
headers: Dict[bytes, List[bytes]] = {
|
||||
header: list(request.requestHeaders.getRawHeaders(header, []))
|
||||
for header in (b"Authorization", b"User-Agent")
|
||||
}
|
||||
# Add the previous hop to the X-Forwarded-For header.
|
||||
x_forwarded_for = request.requestHeaders.getRawHeaders(
|
||||
b"X-Forwarded-For", []
|
||||
x_forwarded_for = list(
|
||||
request.requestHeaders.getRawHeaders(b"X-Forwarded-For", [])
|
||||
)
|
||||
# we use request.client here, since we want the previous hop, not the
|
||||
# original client (as returned by request.getClientAddress()).
|
||||
@@ -308,6 +309,7 @@ class GenericWorkerServer(HomeServer):
|
||||
sync.register_servlets(self, resource)
|
||||
events.register_servlets(self, resource)
|
||||
room.register_servlets(self, resource, is_worker=True)
|
||||
relations.register_servlets(self, resource)
|
||||
room.register_deprecated_servlets(self, resource)
|
||||
initial_sync.register_servlets(self, resource)
|
||||
room_batch.register_servlets(self, resource)
|
||||
@@ -323,13 +325,13 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
presence.register_servlets(self, resource)
|
||||
|
||||
resources.update({CLIENT_API_PREFIX: resource})
|
||||
resources[CLIENT_API_PREFIX] = resource
|
||||
|
||||
resources.update(build_synapse_client_resource_tree(self))
|
||||
resources.update({"/.well-known": well_known_resource(self)})
|
||||
resources["/.well-known"] = well_known_resource(self)
|
||||
|
||||
elif name == "federation":
|
||||
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(self)
|
||||
elif name == "media":
|
||||
if self.config.media.can_load_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
@@ -357,16 +359,12 @@ class GenericWorkerServer(HomeServer):
|
||||
# Only load the openid resource separately if federation resource
|
||||
# is not specified since federation resource includes openid
|
||||
# resource.
|
||||
resources.update(
|
||||
{
|
||||
FEDERATION_PREFIX: TransportLayerServer(
|
||||
self, servlet_groups=["openid"]
|
||||
)
|
||||
}
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(
|
||||
self, servlet_groups=["openid"]
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
resources[SERVER_KEY_PREFIX] = KeyResource(self)
|
||||
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||
|
||||
@@ -31,7 +31,7 @@ from synapse.api.urls import (
|
||||
LEGACY_MEDIA_PREFIX,
|
||||
MEDIA_R0_PREFIX,
|
||||
MEDIA_V3_PREFIX,
|
||||
SERVER_KEY_V2_PREFIX,
|
||||
SERVER_KEY_PREFIX,
|
||||
STATIC_PREFIX,
|
||||
)
|
||||
from synapse.app import _base
|
||||
@@ -60,7 +60,7 @@ from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||
from synapse.rest import ClientRestResource
|
||||
from synapse.rest.admin import AdminRestResource
|
||||
from synapse.rest.health import HealthResource
|
||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||
from synapse.rest.key.v2 import KeyResource
|
||||
from synapse.rest.synapse.client import build_synapse_client_resource_tree
|
||||
from synapse.rest.well_known import well_known_resource
|
||||
from synapse.server import HomeServer
|
||||
@@ -215,30 +215,22 @@ class SynapseHomeServer(HomeServer):
|
||||
consent_resource: Resource = ConsentResource(self)
|
||||
if compress:
|
||||
consent_resource = gz_wrap(consent_resource)
|
||||
resources.update({"/_matrix/consent": consent_resource})
|
||||
resources["/_matrix/consent"] = consent_resource
|
||||
|
||||
if name == "federation":
|
||||
federation_resource: Resource = TransportLayerServer(self)
|
||||
if compress:
|
||||
federation_resource = gz_wrap(federation_resource)
|
||||
resources.update({FEDERATION_PREFIX: federation_resource})
|
||||
resources[FEDERATION_PREFIX] = federation_resource
|
||||
|
||||
if name == "openid":
|
||||
resources.update(
|
||||
{
|
||||
FEDERATION_PREFIX: TransportLayerServer(
|
||||
self, servlet_groups=["openid"]
|
||||
)
|
||||
}
|
||||
resources[FEDERATION_PREFIX] = TransportLayerServer(
|
||||
self, servlet_groups=["openid"]
|
||||
)
|
||||
|
||||
if name in ["static", "client"]:
|
||||
resources.update(
|
||||
{
|
||||
STATIC_PREFIX: StaticResource(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
)
|
||||
}
|
||||
resources[STATIC_PREFIX] = StaticResource(
|
||||
os.path.join(os.path.dirname(synapse.__file__), "static")
|
||||
)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
@@ -257,7 +249,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["keys", "federation"]:
|
||||
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
|
||||
resources[SERVER_KEY_PREFIX] = KeyResource(self)
|
||||
|
||||
if name == "metrics" and self.config.metrics.enable_metrics:
|
||||
metrics_resource: Resource = MetricsResource(RegistryProxy)
|
||||
|
||||
@@ -172,12 +172,24 @@ class ApplicationService:
|
||||
Returns:
|
||||
True if this service would like to know about this room.
|
||||
"""
|
||||
member_list = await store.get_users_in_room(
|
||||
# We can use `get_local_users_in_room(...)` here because an application service
|
||||
# can only be interested in local users of the server it's on (ignore any remote
|
||||
# users that might match the user namespace regex).
|
||||
#
|
||||
# In the future, we can consider re-using
|
||||
# `store.get_app_service_users_in_room` which is very similar to this
|
||||
# function but has a slightly worse performance than this because we
|
||||
# have an early escape-hatch if we find a single user that the
|
||||
# appservice is interested in. The juice would be worth the squeeze if
|
||||
# `store.get_app_service_users_in_room` was used in more places besides
|
||||
# an experimental MSC. But for now we can avoid doing more work and
|
||||
# barely using it later.
|
||||
local_user_ids = await store.get_local_users_in_room(
|
||||
room_id, on_invalidate=cache_context.invalidate
|
||||
)
|
||||
|
||||
# check joined member events
|
||||
for user_id in member_list:
|
||||
for user_id in local_user_ids:
|
||||
if self.is_interested_in_user(user_id):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -120,7 +120,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||
try:
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
@@ -140,7 +144,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
|
||||
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||
try:
|
||||
response = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if response is not None: # just an empty json object
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
@@ -181,7 +189,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(uri, args=args)
|
||||
response = await self.get_json(
|
||||
uri,
|
||||
args=args,
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
@@ -217,7 +229,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
info = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
info = await self.get_json(
|
||||
uri,
|
||||
{"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
@@ -313,6 +329,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
uri=uri,
|
||||
json_body=body,
|
||||
args={"access_token": service.hs_token},
|
||||
headers={"Authorization": [f"Bearer {service.hs_token}"]},
|
||||
)
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logger.debug(
|
||||
|
||||
@@ -159,7 +159,7 @@ class CacheConfig(Config):
|
||||
|
||||
self.track_memory_usage = cache_config.get("track_memory_usage", False)
|
||||
if self.track_memory_usage:
|
||||
check_requirements("cache_memory")
|
||||
check_requirements("cache-memory")
|
||||
|
||||
expire_caches = cache_config.get("expire_caches", True)
|
||||
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
|
||||
|
||||
@@ -12,12 +12,27 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any
|
||||
from typing import Any, Optional
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
||||
class MSC3866Config:
|
||||
"""Configuration for MSC3866 (mandating approval for new users)"""
|
||||
|
||||
# Whether the base support for the approval process is enabled. This includes the
|
||||
# ability for administrators to check and update the approval of users, even if no
|
||||
# approval is currently required.
|
||||
enabled: bool = False
|
||||
# Whether to require that new users are approved by an admin before their account
|
||||
# can be used. Note that this setting is ignored if 'enabled' is false.
|
||||
require_approval_for_new_accounts: bool = False
|
||||
|
||||
|
||||
class ExperimentalConfig(Config):
|
||||
"""Config section for enabling experimental features"""
|
||||
|
||||
@@ -63,7 +78,8 @@ class ExperimentalConfig(Config):
|
||||
# MSC3706 (server-side support for partial state in /send_join responses)
|
||||
self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
|
||||
|
||||
# experimental support for faster joins over federation (msc2775, msc3706)
|
||||
# experimental support for faster joins over federation
|
||||
# (MSC2775, MSC3706, MSC3895)
|
||||
# requires a target server with msc3706_enabled enabled.
|
||||
self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
|
||||
|
||||
@@ -79,17 +95,36 @@ class ExperimentalConfig(Config):
|
||||
# MSC2815 (allow room moderators to view redacted event content)
|
||||
self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False)
|
||||
|
||||
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
|
||||
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
|
||||
# MSC3773: Thread notifications
|
||||
self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False)
|
||||
|
||||
# MSC3772: A push rule for mutual relations.
|
||||
self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
|
||||
|
||||
# MSC3715: dir param on /relations.
|
||||
self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
|
||||
# MSC3664: Pushrules to match on related events
|
||||
self.msc3664_enabled: bool = experimental.get("msc3664_enabled", False)
|
||||
|
||||
# MSC3848: Introduce errcodes for specific event sending failures
|
||||
self.msc3848_enabled: bool = experimental.get("msc3848_enabled", False)
|
||||
|
||||
# MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices.
|
||||
self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False)
|
||||
|
||||
# MSC3866: M_USER_AWAITING_APPROVAL error code
|
||||
raw_msc3866_config = experimental.get("msc3866", {})
|
||||
self.msc3866 = MSC3866Config(**raw_msc3866_config)
|
||||
|
||||
# MSC3881: Remotely toggle push notifications for another client
|
||||
self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False)
|
||||
|
||||
# MSC3882: Allow an existing session to sign in a new session
|
||||
self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False)
|
||||
self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True)
|
||||
self.msc3882_token_timeout = self.parse_duration(
|
||||
experimental.get("msc3882_token_timeout", "5m")
|
||||
)
|
||||
|
||||
# MSC3874: Filtering /messages with rel_types / not_rel_types.
|
||||
self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False)
|
||||
|
||||
# MSC3886: Simple client rendezvous capability
|
||||
self.msc3886_endpoint: Optional[str] = experimental.get(
|
||||
"msc3886_endpoint", None
|
||||
)
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class GroupsConfig(Config):
|
||||
section = "groups"
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||
@@ -53,7 +53,7 @@ DEFAULT_LOG_CONFIG = Template(
|
||||
# Synapse also supports structured logging for machine readable logs which can
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [1]: https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
|
||||
version: 1
|
||||
@@ -326,6 +326,8 @@ def setup_logging(
|
||||
logBeginner: The Twisted logBeginner to use.
|
||||
|
||||
"""
|
||||
from twisted.internet import reactor
|
||||
|
||||
log_config_path = (
|
||||
config.worker.worker_log_config
|
||||
if use_worker_options
|
||||
@@ -348,3 +350,4 @@ def setup_logging(
|
||||
)
|
||||
logging.info("Server hostname: %s", config.server.server_name)
|
||||
logging.info("Instance name: %s", hs.get_instance_name())
|
||||
logging.info("Twisted reactor: %s", type(reactor).__name__)
|
||||
|
||||
@@ -43,33 +43,7 @@ class MetricsConfig(Config):
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_metrics = config.get("enable_metrics", False)
|
||||
|
||||
"""
|
||||
### `enable_legacy_metrics` (experimental)
|
||||
|
||||
**Experimental: this option may be removed or have its behaviour
|
||||
changed at any time, with no notice.**
|
||||
|
||||
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
|
||||
or to `false` to only publish non-legacy Prometheus metric names.
|
||||
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
|
||||
|
||||
Legacy metric names include:
|
||||
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
|
||||
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
|
||||
|
||||
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
|
||||
They are included for backwards compatibility.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_legacy_metrics: false
|
||||
```
|
||||
|
||||
See https://github.com/matrix-org/synapse/issues/11106 for context.
|
||||
|
||||
*Since v1.67.0.*
|
||||
"""
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
|
||||
self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
|
||||
|
||||
self.report_stats = config.get("report_stats", None)
|
||||
self.report_stats_endpoint = config.get(
|
||||
|
||||
@@ -123,6 +123,8 @@ OIDC_PROVIDER_CONFIG_SCHEMA = {
|
||||
"userinfo_endpoint": {"type": "string"},
|
||||
"jwks_uri": {"type": "string"},
|
||||
"skip_verification": {"type": "boolean"},
|
||||
"backchannel_logout_enabled": {"type": "boolean"},
|
||||
"backchannel_logout_ignore_sub": {"type": "boolean"},
|
||||
"user_profile_method": {
|
||||
"type": "string",
|
||||
"enum": ["auto", "userinfo_endpoint"],
|
||||
@@ -292,6 +294,10 @@ def _parse_oidc_config_dict(
|
||||
token_endpoint=oidc_config.get("token_endpoint"),
|
||||
userinfo_endpoint=oidc_config.get("userinfo_endpoint"),
|
||||
jwks_uri=oidc_config.get("jwks_uri"),
|
||||
backchannel_logout_enabled=oidc_config.get("backchannel_logout_enabled", False),
|
||||
backchannel_logout_ignore_sub=oidc_config.get(
|
||||
"backchannel_logout_ignore_sub", False
|
||||
),
|
||||
skip_verification=oidc_config.get("skip_verification", False),
|
||||
user_profile_method=oidc_config.get("user_profile_method", "auto"),
|
||||
allow_existing_users=oidc_config.get("allow_existing_users", False),
|
||||
@@ -368,6 +374,12 @@ class OidcProviderConfig:
|
||||
# "openid" scope is used.
|
||||
jwks_uri: Optional[str]
|
||||
|
||||
# Whether Synapse should react to backchannel logouts
|
||||
backchannel_logout_enabled: bool
|
||||
|
||||
# Whether Synapse should ignore the `sub` claim in backchannel logouts or not.
|
||||
backchannel_logout_ignore_sub: bool
|
||||
|
||||
# Whether to skip metadata verification
|
||||
skip_verification: bool
|
||||
|
||||
|
||||
@@ -205,7 +205,7 @@ class ContentRepositoryConfig(Config):
|
||||
)
|
||||
self.url_preview_enabled = config.get("url_preview_enabled", False)
|
||||
if self.url_preview_enabled:
|
||||
check_requirements("url_preview")
|
||||
check_requirements("url-preview")
|
||||
|
||||
proxy_env = getproxies_environment()
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
|
||||
@@ -207,6 +207,9 @@ class HttpListenerConfig:
|
||||
additional_resources: Dict[str, dict] = attr.Factory(dict)
|
||||
tag: Optional[str] = None
|
||||
request_id_header: Optional[str] = None
|
||||
# If true, the listener will return CORS response headers compatible with MSC3886:
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3886
|
||||
experimental_cors_msc3886: bool = False
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
@@ -935,6 +938,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
|
||||
additional_resources=listener.get("additional_resources", {}),
|
||||
tag=listener.get("tag"),
|
||||
request_id_header=listener.get("request_id_header"),
|
||||
experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False),
|
||||
)
|
||||
|
||||
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
|
||||
|
||||
@@ -15,7 +15,18 @@
|
||||
|
||||
import logging
|
||||
import typing
|
||||
from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
from typing import (
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -134,6 +145,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
|
||||
async def check_state_independent_auth_rules(
|
||||
store: _EventSourceStore,
|
||||
event: "EventBase",
|
||||
batched_auth_events: Optional[Mapping[str, "EventBase"]] = None,
|
||||
) -> None:
|
||||
"""Check that an event complies with auth rules that are independent of room state
|
||||
|
||||
@@ -143,6 +155,8 @@ async def check_state_independent_auth_rules(
|
||||
Args:
|
||||
store: the datastore; used to fetch the auth events for validation
|
||||
event: the event being checked.
|
||||
batched_auth_events: if the event being authed is part of a batch, any events
|
||||
from the same batch that may be necessary to auth the current event
|
||||
|
||||
Raises:
|
||||
AuthError if the checks fail
|
||||
@@ -162,6 +176,9 @@ async def check_state_independent_auth_rules(
|
||||
redact_behaviour=EventRedactBehaviour.as_is,
|
||||
allow_rejected=True,
|
||||
)
|
||||
if batched_auth_events:
|
||||
auth_events.update(batched_auth_events)
|
||||
|
||||
room_id = event.room_id
|
||||
auth_dict: MutableStateMap[str] = {}
|
||||
expected_auth_types = auth_types_for_event(event.room_version, event)
|
||||
|
||||
@@ -289,6 +289,10 @@ class _EventInternalMetadata:
|
||||
"""
|
||||
return self._dict.get("historical", False)
|
||||
|
||||
def is_notifiable(self) -> bool:
|
||||
"""Whether this event can trigger a push notification"""
|
||||
return not self.is_outlier() or self.is_out_of_band_membership()
|
||||
|
||||
|
||||
class EventBase(metaclass=abc.ABCMeta):
|
||||
@property
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user