Compare commits
105 Commits
release-v1
...
erikj/tree
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5de571987e | ||
|
|
129691f190 | ||
|
|
462db2a171 | ||
|
|
7f7b36d56d | ||
|
|
057ae8b61c | ||
|
|
7aceec3ed9 | ||
|
|
cad555f07c | ||
|
|
23c2f394a5 | ||
|
|
602a81f5a2 | ||
|
|
f046366d2a | ||
|
|
a22716c5c5 | ||
|
|
40a8fba5f6 | ||
|
|
326a175987 | ||
|
|
0731e0829c | ||
|
|
3343035a06 | ||
|
|
7281591f4c | ||
|
|
d765ada84f | ||
|
|
b116d3ce00 | ||
|
|
7be954f59b | ||
|
|
512486bbeb | ||
|
|
cc21a431f3 | ||
|
|
21eeacc995 | ||
|
|
fe15a865a5 | ||
|
|
df55b377be | ||
|
|
0ca4172b5d | ||
|
|
599c403d99 | ||
|
|
0eb7e69768 | ||
|
|
cc1071598a | ||
|
|
ad5761b65c | ||
|
|
2341032cf2 | ||
|
|
982fe29655 | ||
|
|
1d5c80b161 | ||
|
|
3371e1abcb | ||
|
|
4db7862e0f | ||
|
|
90e9b4fa1e | ||
|
|
0312ff44c6 | ||
|
|
1381563988 | ||
|
|
a366b75b72 | ||
|
|
7218a0ca18 | ||
|
|
52a0c8f2f7 | ||
|
|
fa71bb18b5 | ||
|
|
3f178332d6 | ||
|
|
6f30eb5b8e | ||
|
|
b19060a29b | ||
|
|
2d82cdafd2 | ||
|
|
f14c632134 | ||
|
|
ac7aec0cd3 | ||
|
|
6173d585df | ||
|
|
e5716b631c | ||
|
|
bc8eefc1e1 | ||
|
|
92202ce867 | ||
|
|
11f811470f | ||
|
|
d736d5cfad | ||
|
|
f1711e1f5c | ||
|
|
5ef2f87569 | ||
|
|
e610128c50 | ||
|
|
a113011794 | ||
|
|
28d96cb2b4 | ||
|
|
739adf1551 | ||
|
|
757bc0caef | ||
|
|
a962c5a56d | ||
|
|
0c95313a44 | ||
|
|
bb20113c8f | ||
|
|
1391a76cd2 | ||
|
|
2b5ab8e367 | ||
|
|
4aaeb87dad | ||
|
|
fb7d24ab6d | ||
|
|
57f6f59e3e | ||
|
|
dcc7873700 | ||
|
|
a0f51b059c | ||
|
|
68db233f0c | ||
|
|
6ba732fefe | ||
|
|
68695d8007 | ||
|
|
578a5e24a9 | ||
|
|
347165bc06 | ||
|
|
2c2a42cc10 | ||
|
|
65e675504f | ||
|
|
e514495465 | ||
|
|
d102ad67fd | ||
|
|
5b5c943e7d | ||
|
|
dcc4e0621c | ||
|
|
6180e1bc4b | ||
|
|
9820665597 | ||
|
|
fa10468eb4 | ||
|
|
c04e25789e | ||
|
|
fe910fb10e | ||
|
|
5296c09473 | ||
|
|
d70ff5cc35 | ||
|
|
6da861ae69 | ||
|
|
8c2825276f | ||
|
|
c0efc689cb | ||
|
|
50f0e4028b | ||
|
|
b0366853ca | ||
|
|
046a6513bc | ||
|
|
8330fc9953 | ||
|
|
0ceb3af10b | ||
|
|
6ad012ef89 | ||
|
|
9667bad55d | ||
|
|
09f6e43025 | ||
|
|
80c7a06777 | ||
|
|
4d3b8fb23f | ||
|
|
13e359aec8 | ||
|
|
e714b8a057 | ||
|
|
92a0c18ef0 | ||
|
|
cdc0259449 |
36
.ci/scripts/setup_complement_prerequisites.sh
Executable file
36
.ci/scripts/setup_complement_prerequisites.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Common commands to set up Complement's prerequisites in a GitHub Actions CI run.
|
||||
#
|
||||
# Must be called after Synapse has been checked out to `synapse/`.
|
||||
#
|
||||
set -eu
|
||||
|
||||
alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func'
|
||||
alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func'
|
||||
|
||||
block Set Go Version
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
endblock
|
||||
|
||||
block Install Complement Dependencies
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
endblock
|
||||
|
||||
block Install custom gotestfmt template
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
endblock
|
||||
|
||||
block Check out Complement
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
synapse/.ci/scripts/checkout_complement.sh
|
||||
endblock
|
||||
@@ -7,3 +7,4 @@ root = true
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
67
.github/workflows/tests.yml
vendored
67
.github/workflows/tests.yml
vendored
@@ -329,84 +329,41 @@ jobs:
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: "Install custom gotestfmt template"
|
||||
run: |
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
- name: Checkout complement
|
||||
run: synapse/.ci/scripts/checkout_complement.sh
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
|
||||
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
|
||||
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
|
||||
complement-developonly:
|
||||
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
|
||||
# XXX When complement with workers is stable, move this back into the standard
|
||||
# "complement" matrix above.
|
||||
#
|
||||
# See https://github.com/matrix-org/synapse/issues/13161
|
||||
complement-workers:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
name: "Complement Workers (develop only)"
|
||||
|
||||
steps:
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
run: |
|
||||
sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: "Install custom gotestfmt template"
|
||||
run: |
|
||||
mkdir .gotestfmt/github -p
|
||||
cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
- name: Checkout complement
|
||||
run: synapse/.ci/scripts/checkout_complement.sh
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
|
||||
46
.github/workflows/twisted_trunk.yml
vendored
46
.github/workflows/twisted_trunk.yml
vendored
@@ -96,6 +96,51 @@ jobs:
|
||||
/logs/results.tap
|
||||
/logs/**/*.log*
|
||||
|
||||
complement:
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arrangement: monolith
|
||||
database: SQLite
|
||||
|
||||
- arrangement: monolith
|
||||
database: Postgres
|
||||
|
||||
- arrangement: workers
|
||||
database: Postgres
|
||||
|
||||
steps:
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
# This step is specific to the 'Twisted trunk' test run:
|
||||
- name: Patch dependencies
|
||||
run: |
|
||||
set -x
|
||||
DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx
|
||||
pipx install poetry==1.1.12
|
||||
|
||||
poetry remove -n twisted
|
||||
poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk
|
||||
poetry lock --no-update
|
||||
# NOT IN 1.1.12 poetry lock --check
|
||||
working-directory: synapse
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# open an issue if the build fails, so we know about it.
|
||||
open-issue:
|
||||
if: failure()
|
||||
@@ -103,6 +148,7 @@ jobs:
|
||||
- mypy
|
||||
- trial
|
||||
- sytest
|
||||
- complement
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
||||
81
CHANGES.md
81
CHANGES.md
@@ -1,3 +1,83 @@
|
||||
Synapse vNext
|
||||
=============
|
||||
|
||||
As of this release, Synapse no longer allows the tasks of verifying email address ownership, and password reset confirmation, to be delegated to an identity server. For more information, see the [upgrade notes](https://matrix-org.github.io/synapse/v1.64/upgrade.html#upgrading-to-v1640).
|
||||
|
||||
Synapse 1.63.0rc1 (2022-07-12)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add a rate limit for local users sending invites. ([\#13125](https://github.com/matrix-org/synapse/issues/13125))
|
||||
- Implement [MSC3827](https://github.com/matrix-org/matrix-spec-proposals/pull/3827): Filtering of `/publicRooms` by room type. ([\#13031](https://github.com/matrix-org/synapse/issues/13031))
|
||||
- Improve validation logic in Synapse's REST endpoints. ([\#13148](https://github.com/matrix-org/synapse/issues/13148))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug where application services were not able to join remote federated rooms without a profile. ([\#13131](https://github.com/matrix-org/synapse/issues/13131))
|
||||
- Fix a long-standing bug where `_get_state_map_for_room` might raise errors when third party event rules callbacks are present. ([\#13174](https://github.com/matrix-org/synapse/issues/13174))
|
||||
- Fix a long-standing bug where the `synapse_port_db` script could fail to copy rows with negative row ids. ([\#13226](https://github.com/matrix-org/synapse/issues/13226))
|
||||
- Fix a bug introduced in 1.54.0 where appservices would not receive room-less EDUs, like presence, when both [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) and [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202) are enabled. ([\#13236](https://github.com/matrix-org/synapse/issues/13236))
|
||||
- Fix a bug introduced in 1.62.0 where rows were not deleted from `event_push_actions` table on large servers. ([\#13194](https://github.com/matrix-org/synapse/issues/13194))
|
||||
- Fix a bug introduced in 1.62.0 where notification counts would get stuck after a highlighted message. ([\#13223](https://github.com/matrix-org/synapse/issues/13223))
|
||||
- Fix exception when using experimental [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to look for remote federated imported events before room creation. ([\#13197](https://github.com/matrix-org/synapse/issues/13197))
|
||||
- Fix [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202)-enabled appservices not receiving to-device messages, preventing messages from being decrypted. ([\#13235](https://github.com/matrix-org/synapse/issues/13235))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Bump the version of `lxml` in matrix.org Docker images Debian packages from 4.8.0 to 4.9.1. ([\#13207](https://github.com/matrix-org/synapse/issues/13207))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add an explanation of the `--report-stats` argument to the docs. ([\#13029](https://github.com/matrix-org/synapse/issues/13029))
|
||||
- Add a helpful example bash script to the contrib directory for creating multiple worker configuration files of the same type. Contributed by @villepeh. ([\#13032](https://github.com/matrix-org/synapse/issues/13032))
|
||||
- Add missing links to config options. ([\#13166](https://github.com/matrix-org/synapse/issues/13166))
|
||||
- Add documentation for anonymised homeserver statistics collection. ([\#13086](https://github.com/matrix-org/synapse/issues/13086))
|
||||
- Add documentation for the existing `databases` option in the homeserver configuration manual. ([\#13212](https://github.com/matrix-org/synapse/issues/13212))
|
||||
- Clean up references to sample configuration and redirect users to the configuration manual instead. ([\#13077](https://github.com/matrix-org/synapse/issues/13077), [\#13139](https://github.com/matrix-org/synapse/issues/13139))
|
||||
- Document how the Synapse team does reviews. ([\#13132](https://github.com/matrix-org/synapse/issues/13132))
|
||||
- Fix wrong section header for `allow_public_rooms_over_federation` in the homeserver config documentation. ([\#13116](https://github.com/matrix-org/synapse/issues/13116))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove obsolete and for 8 years unused `RoomEventsStoreTestCase`. Contributed by @arkamar. ([\#13200](https://github.com/matrix-org/synapse/issues/13200))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Add type annotations to `synapse.logging`, `tests.server` and `tests.utils`. ([\#13028](https://github.com/matrix-org/synapse/issues/13028), [\#13103](https://github.com/matrix-org/synapse/issues/13103), [\#13159](https://github.com/matrix-org/synapse/issues/13159), [\#13136](https://github.com/matrix-org/synapse/issues/13136))
|
||||
- Enforce type annotations for `tests.test_server`. ([\#13135](https://github.com/matrix-org/synapse/issues/13135))
|
||||
- Support temporary experimental return values for spam checker module callbacks. ([\#13044](https://github.com/matrix-org/synapse/issues/13044))
|
||||
- Add support to `complement.sh` for skipping the docker build. ([\#13143](https://github.com/matrix-org/synapse/issues/13143), [\#13158](https://github.com/matrix-org/synapse/issues/13158))
|
||||
- Add support to `complement.sh` for setting the log level using the `SYNAPSE_TEST_LOG_LEVEL` environment variable. ([\#13152](https://github.com/matrix-org/synapse/issues/13152))
|
||||
- Enable Complement testing in the 'Twisted Trunk' CI runs. ([\#13079](https://github.com/matrix-org/synapse/issues/13079), [\#13157](https://github.com/matrix-org/synapse/issues/13157))
|
||||
- Improve startup times in Complement test runs against workers, particularly in CPU-constrained environments. ([\#13127](https://github.com/matrix-org/synapse/issues/13127))
|
||||
- Update config used by Complement to allow device name lookup over federation. ([\#13167](https://github.com/matrix-org/synapse/issues/13167))
|
||||
- Faster room joins: handle race between persisting an event and un-partial stating a room. ([\#13100](https://github.com/matrix-org/synapse/issues/13100))
|
||||
- Faster room joins: fix race in recalculation of current room state. ([\#13151](https://github.com/matrix-org/synapse/issues/13151))
|
||||
- Faster room joins: skip waiting for full state when processing incoming events over federation. ([\#13144](https://github.com/matrix-org/synapse/issues/13144))
|
||||
- Raise a `DependencyError` on missing dependencies instead of a `ConfigError`. ([\#13113](https://github.com/matrix-org/synapse/issues/13113))
|
||||
- Avoid stripping line breaks from SQL sent to the database. ([\#13129](https://github.com/matrix-org/synapse/issues/13129))
|
||||
- Apply ratelimiting earlier in processing of `/send` requests. ([\#13134](https://github.com/matrix-org/synapse/issues/13134))
|
||||
- Improve exception handling when processing events received over federation. ([\#13145](https://github.com/matrix-org/synapse/issues/13145))
|
||||
- Check that `auto_vacuum` is disabled when porting a SQLite database to Postgres, as `VACUUM`s must not be performed between runs of the script. ([\#13195](https://github.com/matrix-org/synapse/issues/13195))
|
||||
- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13119](https://github.com/matrix-org/synapse/issues/13119), [\#13153](https://github.com/matrix-org/synapse/issues/13153))
|
||||
- Reduce memory consumption when processing incoming events in large rooms. ([\#13078](https://github.com/matrix-org/synapse/issues/13078), [\#13222](https://github.com/matrix-org/synapse/issues/13222))
|
||||
- Reduce number of queries used to get profile information. Contributed by Nick @ Beeper (@fizzadar). ([\#13209](https://github.com/matrix-org/synapse/issues/13209))
|
||||
- Reduce number of events queried during room creation. Contributed by Nick @ Beeper (@fizzadar). ([\#13210](https://github.com/matrix-org/synapse/issues/13210))
|
||||
- More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211))
|
||||
- Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228))
|
||||
|
||||
Synapse 1.62.0 (2022-07-05)
|
||||
===========================
|
||||
|
||||
@@ -5,7 +85,6 @@ No significant changes since 1.62.0rc3.
|
||||
|
||||
Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse.
|
||||
|
||||
|
||||
Synapse 1.62.0rc3 (2022-07-04)
|
||||
==============================
|
||||
|
||||
|
||||
1
changelog.d/12943.misc
Normal file
1
changelog.d/12943.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events.
|
||||
1
changelog.d/12967.removal
Normal file
1
changelog.d/12967.removal
Normal file
@@ -0,0 +1 @@
|
||||
Drop tables used for groups/communities.
|
||||
1
changelog.d/13094.misc
Normal file
1
changelog.d/13094.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make the AS login method call `Auth.get_user_by_req` for checking the AS token.
|
||||
1
changelog.d/13175.misc
Normal file
1
changelog.d/13175.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper.
|
||||
1
changelog.d/13192.removal
Normal file
1
changelog.d/13192.removal
Normal file
@@ -0,0 +1 @@
|
||||
Drop support for delegating email verification to an external server.
|
||||
1
changelog.d/13198.misc
Normal file
1
changelog.d/13198.misc
Normal file
@@ -0,0 +1 @@
|
||||
Refactor receipts servlet logic to avoid duplicated code.
|
||||
1
changelog.d/13208.feature
Normal file
1
changelog.d/13208.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a `room_type` field in the responses for the list room and room details admin API. Contributed by @andrewdoh.
|
||||
1
changelog.d/13215.misc
Normal file
1
changelog.d/13215.misc
Normal file
@@ -0,0 +1 @@
|
||||
Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table.
|
||||
1
changelog.d/13218.misc
Normal file
1
changelog.d/13218.misc
Normal file
@@ -0,0 +1 @@
|
||||
Remove unused database table `event_reference_hashes`.
|
||||
1
changelog.d/13220.feature
Normal file
1
changelog.d/13220.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add support for room version 10.
|
||||
1
changelog.d/13224.misc
Normal file
1
changelog.d/13224.misc
Normal file
@@ -0,0 +1 @@
|
||||
Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar).
|
||||
1
changelog.d/13231.doc
Normal file
1
changelog.d/13231.doc
Normal file
@@ -0,0 +1 @@
|
||||
Provide an example of using the Admin API. Contributed by @jejo86.
|
||||
1
changelog.d/13233.doc
Normal file
1
changelog.d/13233.doc
Normal file
@@ -0,0 +1 @@
|
||||
Move the documentation for how URL previews work to the URL preview module.
|
||||
1
changelog.d/13239.removal
Normal file
1
changelog.d/13239.removal
Normal file
@@ -0,0 +1 @@
|
||||
Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu.
|
||||
1
changelog.d/13240.misc
Normal file
1
changelog.d/13240.misc
Normal file
@@ -0,0 +1 @@
|
||||
Call the v2 identity service `/3pid/unbind` endpoint, rather than v1.
|
||||
1
changelog.d/13242.misc
Normal file
1
changelog.d/13242.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar).
|
||||
1
changelog.d/13251.misc
Normal file
1
changelog.d/13251.misc
Normal file
@@ -0,0 +1 @@
|
||||
Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar).
|
||||
1
changelog.d/13253.misc
Normal file
1
changelog.d/13253.misc
Normal file
@@ -0,0 +1 @@
|
||||
Preparatory work for a per-room rate limiter on joins.
|
||||
1
changelog.d/13254.misc
Normal file
1
changelog.d/13254.misc
Normal file
@@ -0,0 +1 @@
|
||||
Preparatory work for a per-room rate limiter on joins.
|
||||
1
changelog.d/13255.misc
Normal file
1
changelog.d/13255.misc
Normal file
@@ -0,0 +1 @@
|
||||
Preparatory work for a per-room rate limiter on joins.
|
||||
1
changelog.d/13257.misc
Normal file
1
changelog.d/13257.misc
Normal file
@@ -0,0 +1 @@
|
||||
Log the stack when waiting for an entire room to be un-partial stated.
|
||||
1
changelog.d/13260.misc
Normal file
1
changelog.d/13260.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean-up tests for notifications.
|
||||
1
changelog.d/13261.doc
Normal file
1
changelog.d/13261.doc
Normal file
@@ -0,0 +1 @@
|
||||
Move the documentation for how URL previews work to the URL preview module.
|
||||
1
changelog.d/13263.bugfix
Normal file
1
changelog.d/13263.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the "enable_email_notifs" and "email_notifs_for_new_users" options were enabled. Contributed by @thomasweston12.
|
||||
1
changelog.d/13266.misc
Normal file
1
changelog.d/13266.misc
Normal file
@@ -0,0 +1 @@
|
||||
Do not fail build if complement with workers fails.
|
||||
1
changelog.d/13267.misc
Normal file
1
changelog.d/13267.misc
Normal file
@@ -0,0 +1 @@
|
||||
Don't pull out state in `compute_event_context` for unconflicted state.
|
||||
1
changelog.d/13270.bugfix
Normal file
1
changelog.d/13270.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse 1.40 where a user invited to a restricted room would be briefly unable to join.
|
||||
1
changelog.d/13274.misc
Normal file
1
changelog.d/13274.misc
Normal file
@@ -0,0 +1 @@
|
||||
Don't pull out state in `compute_event_context` for unconflicted state.
|
||||
1
changelog.d/13278.bugfix
Normal file
1
changelog.d/13278.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix long-standing bug where in rare instances Synapse could store the incorrect state for a room after a state resolution.
|
||||
1
changelog.d/13279.misc
Normal file
1
changelog.d/13279.misc
Normal file
@@ -0,0 +1 @@
|
||||
Reduce the rebuild time for the complement-synapse docker image.
|
||||
1
changelog.d/13284.misc
Normal file
1
changelog.d/13284.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update locked version of `frozendict` to 2.3.2, which has a fix for a memory leak.
|
||||
1
changelog.d/13292.misc
Normal file
1
changelog.d/13292.misc
Normal file
@@ -0,0 +1 @@
|
||||
Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently.
|
||||
31
contrib/workers-bash-scripts/create-multiple-workers.md
Normal file
31
contrib/workers-bash-scripts/create-multiple-workers.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Creating multiple workers with a bash script
|
||||
|
||||
Setting up multiple worker configuration files manually can be time-consuming.
|
||||
You can alternatively create multiple worker configuration files with a simple `bash` script. For example:
|
||||
|
||||
```sh
|
||||
#!/bin/bash
|
||||
for i in {1..5}
|
||||
do
|
||||
cat << EOF >> generic_worker$i.yaml
|
||||
worker_app: synapse.app.generic_worker
|
||||
worker_name: generic_worker$i
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 808$i
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml
|
||||
EOF
|
||||
done
|
||||
```
|
||||
|
||||
This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085.
|
||||
|
||||
Customise the script to your needs.
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.63.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.63.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 12 Jul 2022 11:26:02 +0100
|
||||
|
||||
matrix-synapse-py3 (1.62.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.62.0.
|
||||
|
||||
@@ -62,7 +62,13 @@ WORKDIR /synapse
|
||||
# Copy just what we need to run `poetry export`...
|
||||
COPY pyproject.toml poetry.lock /synapse/
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt
|
||||
|
||||
# If specified, we won't verify the hashes of dependencies.
|
||||
# This is only needed if the hashes of dependencies cannot be checked for some
|
||||
# reason, such as when a git repository is used directly as a dependency.
|
||||
ARG TEST_ONLY_SKIP_DEP_HASH_VERIFICATION
|
||||
|
||||
RUN /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}
|
||||
|
||||
###
|
||||
### Stage 1: builder
|
||||
@@ -85,6 +91,7 @@ RUN \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
|
||||
@@ -67,6 +67,13 @@ The following environment variables are supported in `generate` mode:
|
||||
* `UID`, `GID`: the user id and group id to use for creating the data
|
||||
directories. If unset, and no user is set via `docker run --user`, defaults
|
||||
to `991`, `991`.
|
||||
* `SYNAPSE_LOG_LEVEL`: the log level to use (one of `DEBUG`, `INFO`, `WARNING` or `ERROR`).
|
||||
Defaults to `INFO`.
|
||||
* `SYNAPSE_LOG_SENSITIVE`: if set and the log level is set to `DEBUG`, Synapse
|
||||
will log sensitive information such as access tokens.
|
||||
This should not be needed unless you are a developer attempting to debug something
|
||||
particularly tricky.
|
||||
|
||||
|
||||
## Postgres
|
||||
|
||||
|
||||
@@ -4,42 +4,58 @@
|
||||
#
|
||||
# Instructions for building this image from those it depends on is detailed in this guide:
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
|
||||
ARG SYNAPSE_VERSION=latest
|
||||
|
||||
# first of all, we create a base image with a postgres server and database,
|
||||
# which we can copy into the target image. For repeated rebuilds, this is
|
||||
# much faster than apt installing postgres each time.
|
||||
#
|
||||
# This trick only works because (a) the Synapse image happens to have all the
|
||||
# shared libraries that postgres wants, (b) we use a postgres image based on
|
||||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
|
||||
FROM postgres:13-bullseye AS postgres_base
|
||||
# initialise the database cluster in /var/lib/postgresql
|
||||
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
|
||||
|
||||
# Configure a password and create a database for Synapse
|
||||
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
|
||||
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
|
||||
|
||||
# now build the final image, based on the Synapse image.
|
||||
|
||||
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
|
||||
# copy the postgres installation over from the image we built above
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
|
||||
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
||||
# Install postgresql
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -yqq postgresql-13
|
||||
# Extend the shared homeserver config to disable rate-limiting,
|
||||
# set Complement's static shared secret, enable registration, amongst other
|
||||
# tweaks to get Synapse ready for testing.
|
||||
# To do this, we copy the old template out of the way and then include it
|
||||
# with Jinja2.
|
||||
RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
|
||||
COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
|
||||
|
||||
# Configure a user and create a database for Synapse
|
||||
RUN pg_ctlcluster 13 main start && su postgres -c "echo \
|
||||
\"ALTER USER postgres PASSWORD 'somesecret'; \
|
||||
CREATE DATABASE synapse \
|
||||
ENCODING 'UTF8' \
|
||||
LC_COLLATE='C' \
|
||||
LC_CTYPE='C' \
|
||||
template=template0;\" | psql" && pg_ctlcluster 13 main stop
|
||||
WORKDIR /data
|
||||
|
||||
# Extend the shared homeserver config to disable rate-limiting,
|
||||
# set Complement's static shared secret, enable registration, amongst other
|
||||
# tweaks to get Synapse ready for testing.
|
||||
# To do this, we copy the old template out of the way and then include it
|
||||
# with Jinja2.
|
||||
RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2
|
||||
COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2
|
||||
COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
|
||||
|
||||
WORKDIR /data
|
||||
# Copy the entrypoint
|
||||
COPY conf/start_for_complement.sh /
|
||||
|
||||
COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
|
||||
# Expose nginx's listener ports
|
||||
EXPOSE 8008 8448
|
||||
|
||||
# Copy the entrypoint
|
||||
COPY conf/start_for_complement.sh /
|
||||
ENTRYPOINT ["/start_for_complement.sh"]
|
||||
|
||||
# Expose nginx's listener ports
|
||||
EXPOSE 8008 8448
|
||||
|
||||
ENTRYPOINT ["/start_for_complement.sh"]
|
||||
|
||||
# Update the healthcheck to have a shorter check interval
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
# Update the healthcheck to have a shorter check interval
|
||||
HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \
|
||||
CMD /bin/sh /healthcheck.sh
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[program:postgres]
|
||||
command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground
|
||||
command=/usr/local/bin/prefix-log gosu postgres postgres
|
||||
|
||||
# Only start if START_POSTGRES=1
|
||||
autostart=%(ENV_START_POSTGRES)s
|
||||
|
||||
@@ -59,6 +59,9 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then
|
||||
synchrotron, \
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
# Improve startup times by using a launcher based on fork()
|
||||
export SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER=1
|
||||
else
|
||||
# Empty string here means 'main process only'
|
||||
export SYNAPSE_WORKER_TYPES=""
|
||||
|
||||
@@ -81,6 +81,8 @@ rc_invites:
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
allow_device_name_lookup_over_federation: true
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
experimental_features:
|
||||
|
||||
@@ -1,3 +1,24 @@
|
||||
{% if use_forking_launcher %}
|
||||
[program:synapse_fork]
|
||||
command=/usr/local/bin/python -m synapse.app.complement_fork_starter
|
||||
{{ main_config_path }}
|
||||
synapse.app.homeserver
|
||||
--config-path="{{ main_config_path }}"
|
||||
--config-path=/conf/workers/shared.yaml
|
||||
{%- for worker in workers %}
|
||||
-- {{ worker.app }}
|
||||
--config-path="{{ main_config_path }}"
|
||||
--config-path=/conf/workers/shared.yaml
|
||||
--config-path=/conf/workers/{{ worker.name }}.yaml
|
||||
{%- endfor %}
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=unexpected
|
||||
exitcodes=0
|
||||
|
||||
{% else %}
|
||||
[program:synapse_main]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver
|
||||
--config-path="{{ main_config_path }}"
|
||||
@@ -13,7 +34,7 @@ autorestart=unexpected
|
||||
exitcodes=0
|
||||
|
||||
|
||||
{% for worker in workers %}
|
||||
{% for worker in workers %}
|
||||
[program:synapse_{{ worker.name }}]
|
||||
command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }}
|
||||
--config-path="{{ main_config_path }}"
|
||||
@@ -27,4 +48,5 @@ stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -2,7 +2,11 @@ version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
{% if include_worker_name_in_log_line %}
|
||||
format: '{{ worker_name }} | %(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
{% else %}
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
{% endif %}
|
||||
|
||||
handlers:
|
||||
{% if LOG_FILE_PATH %}
|
||||
@@ -45,11 +49,17 @@ handlers:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
|
||||
{% if not SYNAPSE_LOG_SENSITIVE %}
|
||||
{#
|
||||
If SYNAPSE_LOG_SENSITIVE is unset, then override synapse.storage.SQL to INFO
|
||||
so that DEBUG entries (containing sensitive information) are not emitted.
|
||||
#}
|
||||
loggers:
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
{% endif %}
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "INFO" }}
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
|
||||
# Nginx will be configured to serve TLS on port 8448.
|
||||
# * SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER: Whether to use the forking launcher,
|
||||
# only intended for usage in Complement at the moment.
|
||||
# No stability guarantees are provided.
|
||||
# * SYNAPSE_LOG_LEVEL: Set this to DEBUG, INFO, WARNING or ERROR to change the
|
||||
# log level. INFO is the default.
|
||||
# * SYNAPSE_LOG_SENSITIVE: If unset, SQL and SQL values won't be logged,
|
||||
# regardless of the SYNAPSE_LOG_LEVEL setting.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
@@ -35,7 +42,7 @@ import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
@@ -525,6 +532,7 @@ def generate_worker_files(
|
||||
"/etc/supervisor/conf.d/synapse.conf",
|
||||
workers=worker_descriptors,
|
||||
main_config_path=config_path,
|
||||
use_forking_launcher=environ.get("SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"),
|
||||
)
|
||||
|
||||
# healthcheck config
|
||||
@@ -548,18 +556,25 @@ def generate_worker_log_config(
|
||||
Returns: the path to the generated file
|
||||
"""
|
||||
# Check whether we should write worker logs to disk, in addition to the console
|
||||
extra_log_template_args = {}
|
||||
extra_log_template_args: Dict[str, Optional[str]] = {}
|
||||
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
|
||||
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
|
||||
dir=data_dir, name=worker_name
|
||||
)
|
||||
extra_log_template_args["LOG_FILE_PATH"] = f"{data_dir}/logs/{worker_name}.log"
|
||||
|
||||
extra_log_template_args["SYNAPSE_LOG_LEVEL"] = environ.get("SYNAPSE_LOG_LEVEL")
|
||||
extra_log_template_args["SYNAPSE_LOG_SENSITIVE"] = environ.get(
|
||||
"SYNAPSE_LOG_SENSITIVE"
|
||||
)
|
||||
|
||||
# Render and write the file
|
||||
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
|
||||
log_config_filepath = f"/conf/workers/{worker_name}.log.config"
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_filepath,
|
||||
worker_name=worker_name,
|
||||
**extra_log_template_args,
|
||||
include_worker_name_in_log_line=environ.get(
|
||||
"SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER"
|
||||
),
|
||||
)
|
||||
return log_config_filepath
|
||||
|
||||
|
||||
@@ -110,7 +110,11 @@ def generate_config_from_template(
|
||||
|
||||
log_config_file = environ["SYNAPSE_LOG_CONFIG"]
|
||||
log("Generating log config file " + log_config_file)
|
||||
convert("/conf/log.config", log_config_file, environ)
|
||||
convert(
|
||||
"/conf/log.config",
|
||||
log_config_file,
|
||||
{**environ, "include_worker_name_in_log_line": False},
|
||||
)
|
||||
|
||||
# Hopefully we already have a signing key, but generate one if not.
|
||||
args = [
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
- [Application Services](application_services.md)
|
||||
- [Server Notices](server_notices.md)
|
||||
- [Consent Tracking](consent_tracking.md)
|
||||
- [URL Previews](development/url_previews.md)
|
||||
- [User Directory](user_directory.md)
|
||||
- [Message Retention Policies](message_retention_policies.md)
|
||||
- [Pluggable Modules](modules/index.md)
|
||||
@@ -69,6 +68,7 @@
|
||||
- [Federation](usage/administration/admin_api/federation.md)
|
||||
- [Manhole](manhole.md)
|
||||
- [Monitoring](metrics-howto.md)
|
||||
- [Reporting Anonymised Statistics](usage/administration/monitoring/reporting_anonymised_statistics.md)
|
||||
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
|
||||
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
|
||||
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)
|
||||
@@ -80,6 +80,7 @@
|
||||
# Development
|
||||
- [Contributing Guide](development/contributing_guide.md)
|
||||
- [Code Style](code_style.md)
|
||||
- [Reviewing Code](development/reviews.md)
|
||||
- [Release Cycle](development/releases.md)
|
||||
- [Git Usage](development/git.md)
|
||||
- [Testing]()
|
||||
|
||||
@@ -59,6 +59,7 @@ The following fields are possible in the JSON response body:
|
||||
- `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
|
||||
- `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||
- `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
- `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. If the room does not define a type, the value will be `null`.
|
||||
* `offset` - The current pagination offset in rooms. This parameter should be
|
||||
used instead of `next_token` for room offset as `next_token` is
|
||||
not intended to be parsed.
|
||||
@@ -101,7 +102,8 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
"state_events": 93534,
|
||||
"room_type": "m.space"
|
||||
},
|
||||
... (8 hidden items) ...
|
||||
{
|
||||
@@ -118,7 +120,8 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
"state_events": 8345,
|
||||
"room_type": null
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -151,7 +154,8 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8
|
||||
"state_events": 8,
|
||||
"room_type": null
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -184,7 +188,8 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
"state_events": 93534,
|
||||
"room_type": null
|
||||
},
|
||||
... (98 hidden items) ...
|
||||
{
|
||||
@@ -201,7 +206,8 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
"state_events": 8345,
|
||||
"room_type": "m.space"
|
||||
}
|
||||
],
|
||||
"offset": 0,
|
||||
@@ -238,7 +244,9 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
"state_events": 93534,
|
||||
"room_type": "m.space"
|
||||
|
||||
},
|
||||
... (48 hidden items) ...
|
||||
{
|
||||
@@ -255,7 +263,9 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 8345
|
||||
"state_events": 8345,
|
||||
"room_type": null
|
||||
|
||||
}
|
||||
],
|
||||
"offset": 100,
|
||||
@@ -290,6 +300,8 @@ The following fields are possible in the JSON response body:
|
||||
* `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"].
|
||||
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
|
||||
* `state_events` - Total number of state_events of a room. Complexity of the room.
|
||||
* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space.
|
||||
If the room does not define a type, the value will be `null`.
|
||||
|
||||
The API is:
|
||||
|
||||
@@ -317,7 +329,8 @@ A response body like the following is returned:
|
||||
"join_rules": "invite",
|
||||
"guest_access": null,
|
||||
"history_visibility": "shared",
|
||||
"state_events": 93534
|
||||
"state_events": 93534,
|
||||
"room_type": "m.space"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -124,9 +124,8 @@ Body parameters:
|
||||
- `address` - string. Value of third-party ID.
|
||||
belonging to a user.
|
||||
- `external_ids` - array, optional. Allow setting the identifier of the external identity
|
||||
provider for SSO (Single sign-on). Details in
|
||||
[Sample Configuration File](../usage/configuration/homeserver_sample_config.html)
|
||||
section `sso` and `oidc_providers`.
|
||||
provider for SSO (Single sign-on). Details in the configuration manual under the
|
||||
sections [sso](../usage/configuration/config_documentation.md#sso) and [oidc_providers](../usage/configuration/config_documentation.md#oidc_providers).
|
||||
- `auth_provider` - string. ID of the external identity provider. Value of `idp_id`
|
||||
in the homeserver configuration. Note that no error is raised if the provided
|
||||
value is not in the homeserver configuration.
|
||||
@@ -545,7 +544,7 @@ Gets a list of all local media that a specific `user_id` has created.
|
||||
These are media that the user has uploaded themselves
|
||||
([local media](../media_repository.md#local-media)), as well as
|
||||
[URL preview images](../media_repository.md#url-previews) requested by the user if the
|
||||
[feature is enabled](../development/url_previews.md).
|
||||
[feature is enabled](../usage/configuration/config_documentation.md#url_preview_enabled).
|
||||
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
|
||||
@@ -70,82 +70,61 @@ on save as they take a while and can be very resource intensive.
|
||||
- Avoid wildcard imports (`from synapse.types import *`) and
|
||||
relative imports (`from .types import UserID`).
|
||||
|
||||
## Configuration file format
|
||||
## Configuration code and documentation format
|
||||
|
||||
The [sample configuration file](./sample_config.yaml) acts as a
|
||||
When adding a configuration option to the code, if several settings are grouped into a single dict, ensure that your code
|
||||
correctly handles the top-level option being set to `None` (as it will be if no sub-options are enabled).
|
||||
|
||||
The [configuration manual](usage/configuration/config_documentation.md) acts as a
|
||||
reference to Synapse's configuration options for server administrators.
|
||||
Remember that many readers will be unfamiliar with YAML and server
|
||||
administration in general, so that it is important that the file be as
|
||||
easy to understand as possible, which includes following a consistent
|
||||
format.
|
||||
administration in general, so it is important that when you add
|
||||
a configuration option the documentation be as easy to understand as possible, which
|
||||
includes following a consistent format.
|
||||
|
||||
Some guidelines follow:
|
||||
|
||||
- Sections should be separated with a heading consisting of a single
|
||||
line prefixed and suffixed with `##`. There should be **two** blank
|
||||
lines before the section header, and **one** after.
|
||||
- Each option should be listed in the file with the following format:
|
||||
- A comment describing the setting. Each line of this comment
|
||||
should be prefixed with a hash (`#`) and a space.
|
||||
- Each option should be listed in the config manual with the following format:
|
||||
|
||||
- The name of the option, prefixed by `###`.
|
||||
|
||||
The comment should describe the default behaviour (ie, what
|
||||
- A comment which describes the default behaviour (i.e. what
|
||||
happens if the setting is omitted), as well as what the effect
|
||||
will be if the setting is changed.
|
||||
|
||||
Often, the comment end with something like "uncomment the
|
||||
following to <do action>".
|
||||
|
||||
- A line consisting of only `#`.
|
||||
- A commented-out example setting, prefixed with only `#`.
|
||||
- An example setting, using backticks to define the code block
|
||||
|
||||
For boolean (on/off) options, convention is that this example
|
||||
should be the *opposite* to the default (so the comment will end
|
||||
with "Uncomment the following to enable [or disable]
|
||||
<feature>." For other options, the example should give some
|
||||
non-default value which is likely to be useful to the reader.
|
||||
should be the *opposite* to the default. For other options, the example should give
|
||||
some non-default value which is likely to be useful to the reader.
|
||||
|
||||
- There should be a blank line between each option.
|
||||
- Where several settings are grouped into a single dict, *avoid* the
|
||||
convention where the whole block is commented out, resulting in
|
||||
comment lines starting `# #`, as this is hard to read and confusing
|
||||
to edit. Instead, leave the top-level config option uncommented, and
|
||||
follow the conventions above for sub-options. Ensure that your code
|
||||
correctly handles the top-level option being set to `None` (as it
|
||||
will be if no sub-options are enabled).
|
||||
- Lines should be wrapped at 80 characters.
|
||||
- Use two-space indents.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
- Use single quotes (`'`) rather than double-quotes (`"`) or backticks
|
||||
(`` ` ``) to refer to configuration options.
|
||||
- There should be a horizontal rule between each option, which can be achieved by adding `---` before and
|
||||
after the option.
|
||||
- `true` and `false` are spelt thus (as opposed to `True`, etc.)
|
||||
|
||||
Example:
|
||||
|
||||
---
|
||||
### `modules`
|
||||
|
||||
Use the `module` sub-option to add a module under `modules` to extend functionality.
|
||||
The `module` setting then has a sub-option, `config`, which can be used to define some configuration
|
||||
for the `module`.
|
||||
|
||||
Defaults to none.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
## Frobnication ##
|
||||
|
||||
# The frobnicator will ensure that all requests are fully frobnicated.
|
||||
# To enable it, uncomment the following.
|
||||
#
|
||||
#frobnicator_enabled: true
|
||||
|
||||
# By default, the frobnicator will frobnicate with the default frobber.
|
||||
# The following will make it use an alternative frobber.
|
||||
#
|
||||
#frobincator_frobber: special_frobber
|
||||
|
||||
# Settings for the frobber
|
||||
#
|
||||
frobber:
|
||||
# frobbing speed. Defaults to 1.
|
||||
#
|
||||
#speed: 10
|
||||
|
||||
# frobbing distance. Defaults to 1000.
|
||||
#
|
||||
#distance: 100
|
||||
modules:
|
||||
- module: my_super_module.MySuperClass
|
||||
config:
|
||||
do_thing: true
|
||||
- module: my_other_super_module.SomeClass
|
||||
config: {}
|
||||
```
|
||||
---
|
||||
|
||||
Note that the sample configuration is generated from the synapse code
|
||||
and is maintained by a script, `scripts-dev/generate_sample_config.sh`.
|
||||
Making sure that the output from this script matches the desired format
|
||||
is left as an exercise for the reader!
|
||||
|
||||
|
||||
@@ -309,6 +309,10 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data
|
||||
- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
|
||||
- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
|
||||
|
||||
To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
|
||||
```sh
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
|
||||
```
|
||||
|
||||
### Prettier formatting with `gotestfmt`
|
||||
|
||||
@@ -347,7 +351,7 @@ To prepare a Pull Request, please:
|
||||
3. `git push` your commit to your fork of Synapse;
|
||||
4. on GitHub, [create the Pull Request](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request);
|
||||
5. add a [changelog entry](#changelog) and push it to your Pull Request;
|
||||
6. for most contributors, that's all - however, if you are a member of the organization `matrix-org`, on GitHub, please request a review from `matrix.org / Synapse Core`.
|
||||
6. that's it for now, a non-draft pull request will automatically request review from the team;
|
||||
7. if you need to update your PR, please avoid rebasing and just add new commits to your branch.
|
||||
|
||||
|
||||
@@ -523,10 +527,13 @@ From this point, you should:
|
||||
1. Look at the results of the CI pipeline.
|
||||
- If there is any error, fix the error.
|
||||
2. If a developer has requested changes, make these changes and let us know if it is ready for a developer to review again.
|
||||
- A pull request is a conversation, if you disagree with the suggestions, please respond and discuss it.
|
||||
3. Create a new commit with the changes.
|
||||
- Please do NOT overwrite the history. New commits make the reviewer's life easier.
|
||||
- Push this commits to your Pull Request.
|
||||
4. Back to 1.
|
||||
5. Once the pull request is ready for review again please re-request review from whichever developer did your initial
|
||||
review (or leave a comment in the pull request that you believe all required changes have been done).
|
||||
|
||||
Once both the CI and the developers are happy, the patch will be merged into Synapse and released shortly!
|
||||
|
||||
|
||||
41
docs/development/reviews.md
Normal file
41
docs/development/reviews.md
Normal file
@@ -0,0 +1,41 @@
|
||||
Some notes on how we do reviews
|
||||
===============================
|
||||
|
||||
The Synapse team works off a shared review queue -- any new pull requests for
|
||||
Synapse (or related projects) has a review requested from the entire team. Team
|
||||
members should process this queue using the following rules:
|
||||
|
||||
* Any high urgency pull requests (e.g. fixes for broken continuous integration
|
||||
or fixes for release blockers);
|
||||
* Follow-up reviews for pull requests which have previously received reviews;
|
||||
* Any remaining pull requests.
|
||||
|
||||
For the latter two categories above, older pull requests should be prioritised.
|
||||
|
||||
It is explicit that there is no priority given to pull requests from the team
|
||||
(vs from the community). If a pull request requires a quick turn around, please
|
||||
explicitly communicate this via [#synapse-dev:matrix.org](https://matrix.to/#/#synapse-dev:matrix.org)
|
||||
or as a comment on the pull request.
|
||||
|
||||
Once an initial review has been completed and the author has made additional changes,
|
||||
follow-up reviews should go back to the same reviewer. This helps build a shared
|
||||
context and conversation between author and reviewer.
|
||||
|
||||
As a team we aim to keep the number of inflight pull requests to a minimum to ensure
|
||||
that ongoing work is finished before starting new work.
|
||||
|
||||
Performing a review
|
||||
-------------------
|
||||
|
||||
To communicate to the rest of the team the status of each pull request, team
|
||||
members should do the following:
|
||||
|
||||
* Assign themselves to the pull request (they should be left assigned to the
|
||||
pull request until it is merged, closed, or are no longer the reviewer);
|
||||
* Review the pull request by leaving comments, questions, and suggestions;
|
||||
* Mark the pull request appropriately (as needing changes or accepted).
|
||||
|
||||
If you are unsure about a particular part of the pull request (or are not confident
|
||||
in your understanding of part of the code) then ask questions or request review
|
||||
from the team again. When requesting review from the team be sure to leave a comment
|
||||
with the rationale on why you're putting it back in the queue.
|
||||
@@ -1,61 +0,0 @@
|
||||
URL Previews
|
||||
============
|
||||
|
||||
The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API
|
||||
for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix
|
||||
specific additions).
|
||||
|
||||
This does have trade-offs compared to other designs:
|
||||
|
||||
* Pros:
|
||||
* Simple and flexible; can be used by any clients at any point
|
||||
* Cons:
|
||||
* If each homeserver provides one of these independently, all the HSes in a
|
||||
room may needlessly DoS the target URI
|
||||
* The URL metadata must be stored somewhere, rather than just using Matrix
|
||||
itself to store the media.
|
||||
* Matrix cannot be used to distribute the metadata between homeservers.
|
||||
|
||||
When Synapse is asked to preview a URL it does the following:
|
||||
|
||||
1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the
|
||||
config).
|
||||
2. Checks the in-memory cache by URLs and returns the result if it exists. (This
|
||||
is also used to de-duplicate processing of multiple in-flight requests at once.)
|
||||
3. Kicks off a background process to generate a preview:
|
||||
1. Checks the database cache by URL and timestamp and returns the result if it
|
||||
has not expired and was successful (a 2xx return code).
|
||||
2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it
|
||||
does, update the URL to download.
|
||||
3. Downloads the URL and stores it into a file via the media storage provider
|
||||
and saves the local media metadata.
|
||||
4. If the media is an image:
|
||||
1. Generates thumbnails.
|
||||
2. Generates an Open Graph response based on image properties.
|
||||
5. If the media is HTML:
|
||||
1. Decodes the HTML via the stored file.
|
||||
2. Generates an Open Graph response from the HTML.
|
||||
3. If a JSON oEmbed URL was found in the HTML via autodiscovery:
|
||||
1. Downloads the URL and stores it into a file via the media storage provider
|
||||
and saves the local media metadata.
|
||||
2. Convert the oEmbed response to an Open Graph response.
|
||||
3. Override any Open Graph data from the HTML with data from oEmbed.
|
||||
4. If an image exists in the Open Graph response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
6. If the media is JSON and an oEmbed URL was found:
|
||||
1. Convert the oEmbed response to an Open Graph response.
|
||||
2. If a thumbnail or image is in the oEmbed response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
7. Stores the result in the database cache.
|
||||
4. Returns the result.
|
||||
|
||||
The in-memory cache expires after 1 hour.
|
||||
|
||||
Expired entries in the database cache (and their associated media files) are
|
||||
deleted every 10 seconds. The default expiration time is 1 hour from download.
|
||||
@@ -49,9 +49,8 @@ as follows:
|
||||
* For other installation mechanisms, see the documentation provided by the
|
||||
maintainer.
|
||||
|
||||
To enable the JSON web token integration, you should then add a `jwt_config` section
|
||||
to your configuration file (or uncomment the `enabled: true` line in the
|
||||
existing section). See [sample_config.yaml](./sample_config.yaml) for some
|
||||
To enable the JSON web token integration, you should then add a `jwt_config` option
|
||||
to your configuration file. See the [configuration manual](usage/configuration/config_documentation.md#jwt_config) for some
|
||||
sample settings.
|
||||
|
||||
## How to test JWT as a developer
|
||||
|
||||
@@ -13,8 +13,10 @@ environments where untrusted users have shell access.
|
||||
|
||||
## Configuring the manhole
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`. The configuration is slightly different if you're using docker.
|
||||
To enable it, first add the `manhole` listener configuration in your
|
||||
`homeserver.yaml`. You can find information on how to do that
|
||||
in the [configuration manual](usage/configuration/config_documentation.md#manhole_settings).
|
||||
The configuration is slightly different if you're using docker.
|
||||
|
||||
#### Docker config
|
||||
|
||||
|
||||
@@ -7,8 +7,7 @@ The media repository
|
||||
users.
|
||||
* caches avatars, attachments and their thumbnails for media uploaded by remote
|
||||
users.
|
||||
* caches resources and thumbnails used for
|
||||
[URL previews](development/url_previews.md).
|
||||
* caches resources and thumbnails used for URL previews.
|
||||
|
||||
All media in Matrix can be identified by a unique
|
||||
[MXC URI](https://spec.matrix.org/latest/client-server-api/#matrix-content-mxc-uris),
|
||||
@@ -59,8 +58,6 @@ remote_thumbnail/matrix.org/aa/bb/cccccccccccccccccccc/128-96-image-jpeg
|
||||
Note that `remote_thumbnail/` does not have an `s`.
|
||||
|
||||
## URL Previews
|
||||
See [URL Previews](development/url_previews.md) for documentation on the URL preview
|
||||
process.
|
||||
|
||||
When generating previews for URLs, Synapse may download and cache various
|
||||
resources, including images. These resources are assigned temporary media IDs
|
||||
|
||||
@@ -49,9 +49,9 @@ clients.
|
||||
|
||||
## Server configuration
|
||||
|
||||
Support for this feature can be enabled and configured in the
|
||||
`retention` section of the Synapse configuration file (see the
|
||||
[sample file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518)).
|
||||
Support for this feature can be enabled and configured by adding a the
|
||||
`retention` in the Synapse configuration file (see
|
||||
[configuration manual](usage/configuration/config_documentation.md#retention)).
|
||||
|
||||
To enable support for message retention policies, set the setting
|
||||
`enabled` in this section to `true`.
|
||||
@@ -65,8 +65,8 @@ message retention policy configured in its state. This allows server
|
||||
admins to ensure that messages are never kept indefinitely in a server's
|
||||
database.
|
||||
|
||||
A default policy can be defined as such, in the `retention` section of
|
||||
the configuration file:
|
||||
A default policy can be defined as such, by adding the `retention` option in
|
||||
the configuration file and adding these sub-options:
|
||||
|
||||
```yaml
|
||||
default_policy:
|
||||
@@ -86,8 +86,8 @@ Purge jobs are the jobs that Synapse runs in the background to purge
|
||||
expired events from the database. They are only run if support for
|
||||
message retention policies is enabled in the server's configuration. If
|
||||
no configuration for purge jobs is configured by the server admin,
|
||||
Synapse will use a default configuration, which is described in the
|
||||
[sample configuration file](https://github.com/matrix-org/synapse/blob/v1.36.0/docs/sample_config.yaml#L451-L518).
|
||||
Synapse will use a default configuration, which is described here in the
|
||||
[configuration manual](usage/configuration/config_documentation.md#retention).
|
||||
|
||||
Some server admins might want a finer control on when events are removed
|
||||
depending on an event's room's policy. This can be done by setting the
|
||||
@@ -137,8 +137,8 @@ the server's database.
|
||||
### Lifetime limits
|
||||
|
||||
Server admins can set limits on the values of `max_lifetime` to use when
|
||||
purging old events in a room. These limits can be defined as such in the
|
||||
`retention` section of the configuration file:
|
||||
purging old events in a room. These limits can be defined under the
|
||||
`retention` option in the configuration file:
|
||||
|
||||
```yaml
|
||||
allowed_lifetime_min: 1d
|
||||
|
||||
@@ -45,8 +45,8 @@ as follows:
|
||||
maintainer.
|
||||
|
||||
To enable the OpenID integration, you should then add a section to the `oidc_providers`
|
||||
setting in your configuration file (or uncomment one of the existing examples).
|
||||
See [sample_config.yaml](./sample_config.yaml) for some sample settings, as well as
|
||||
setting in your configuration file.
|
||||
See the [configuration manual](usage/configuration/config_documentation.md#oidc_providers) for some sample settings, as well as
|
||||
the text below for example configurations for specific providers.
|
||||
|
||||
## Sample configs
|
||||
|
||||
@@ -143,6 +143,14 @@ to do step 2.
|
||||
|
||||
It is safe to at any time kill the port script and restart it.
|
||||
|
||||
However, under no circumstances should the SQLite database be `VACUUM`ed between
|
||||
multiple runs of the script. Doing so can lead to an inconsistent copy of your database
|
||||
into Postgres.
|
||||
To avoid accidental error, the script will check that SQLite's `auto_vacuum` mechanism
|
||||
is disabled, but the script is not able to protect against a manual `VACUUM` operation
|
||||
performed either by the administrator or by any automated task that the administrator
|
||||
may have configured.
|
||||
|
||||
Note that the database may take up significantly more (25% - 100% more)
|
||||
space on disk after porting to Postgres.
|
||||
|
||||
|
||||
@@ -66,8 +66,8 @@ in Synapse can be deactivated.
|
||||
|
||||
**NOTE**: This has an impact on security and is for testing purposes only!
|
||||
|
||||
To deactivate the certificate validation, the following setting must be made in
|
||||
[homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
To deactivate the certificate validation, the following setting must be added to
|
||||
your [homserver.yaml](../usage/configuration/homeserver_sample_config.md).
|
||||
|
||||
```yaml
|
||||
use_insecure_ssl_client_just_for_testing_do_not_use: true
|
||||
|
||||
@@ -232,7 +232,9 @@ python -m synapse.app.homeserver \
|
||||
--report-stats=[yes|no]
|
||||
```
|
||||
|
||||
... substituting an appropriate value for `--server-name`.
|
||||
... substituting an appropriate value for `--server-name` and choosing whether
|
||||
or not to report usage statistics (hostname, Synapse version, uptime, total
|
||||
users, etc.) to the developers via the `--report-stats` argument.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
@@ -405,11 +407,11 @@ The recommended way to do so is to set up a reverse proxy on port
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
- First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
- First, under the `listeners` option, add the configuration for the
|
||||
TLS-enabled listener like so:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
@@ -417,9 +419,11 @@ so, you will need to edit `homeserver.yaml`, as follows:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
- You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself.
|
||||
- You will also need to add the options `tls_certificate_path` and
|
||||
`tls_private_key_path`. to your configuration file. You will need to manage provisioning of
|
||||
these certificates yourself.
|
||||
- You can find more information about these options as well as how to configure synapse in the
|
||||
[configuration manual](../usage/configuration/config_documentation.md).
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
|
||||
@@ -89,6 +89,21 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.64.0
|
||||
|
||||
## Delegation of email validation no longer supported
|
||||
|
||||
As of this version, Synapse no longer allows the tasks of verifying email address
|
||||
ownership, and password reset confirmation, to be delegated to an identity server.
|
||||
|
||||
To continue to allow users to add email addresses to their homeserver accounts,
|
||||
and perform password resets, make sure that Synapse is configured with a
|
||||
working email server in the `email` configuration section (including, at a
|
||||
minimum, a `notif_from` setting.)
|
||||
|
||||
Specifying an `email` setting under `account_threepid_delegates` will now cause
|
||||
an error at startup.
|
||||
|
||||
# Upgrading to v1.62.0
|
||||
|
||||
## New signatures for spam checker callbacks
|
||||
|
||||
@@ -18,6 +18,11 @@ already on your `$PATH` depending on how Synapse was installed.
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
|
||||
that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
|
||||
reverse proxy. This means you should typically query the Admin API from a terminal on
|
||||
the machine which runs Synapse.
|
||||
|
||||
Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by
|
||||
providing the token as either a query parameter or a request header. To add it as a request header in cURL:
|
||||
|
||||
@@ -25,5 +30,17 @@ providing the token as either a query parameter or a request header. To add it a
|
||||
curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>
|
||||
```
|
||||
|
||||
For example, suppose we want to
|
||||
[query the account](user_admin_api.md#query-user-account) of the user
|
||||
`@foo:bar.com`. We need an admin access token (e.g.
|
||||
`syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk`), and we need to know which port
|
||||
Synapse's [`client` listener](config_documentation.md#listeners) is listening
|
||||
on (e.g. `8008`). Then we can use the following command to request the account
|
||||
information from the Admin API.
|
||||
|
||||
```sh
|
||||
curl --header "Authorization: Bearer syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk" -X GET http://127.0.0.1:8008/_synapse/admin/v2/users/@foo:bar.com
|
||||
```
|
||||
|
||||
For more details on access tokens in Matrix, please refer to the complete
|
||||
[matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens).
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
# Reporting Anonymised Statistics
|
||||
|
||||
When generating your Synapse configuration file, you are asked whether you
|
||||
would like to report anonymised statistics to Matrix.org. These statistics
|
||||
provide the foundation a glimpse into the number of Synapse homeservers
|
||||
participating in the network, as well as statistics such as the number of
|
||||
rooms being created and messages being sent. This feature is sometimes
|
||||
affectionately called "phone-home" stats. Reporting
|
||||
[is optional](../../configuration/config_documentation.md#report_stats)
|
||||
and the reporting endpoint
|
||||
[can be configured](../../configuration/config_documentation.md#report_stats_endpoint),
|
||||
in case you would like to instead report statistics from a set of homeservers
|
||||
to your own infrastructure.
|
||||
|
||||
This documentation aims to define the statistics available and the
|
||||
homeserver configuration options that exist to tweak it.
|
||||
|
||||
## Available Statistics
|
||||
|
||||
The following statistics are sent to the configured reporting endpoint:
|
||||
|
||||
| Statistic Name | Type | Description |
|
||||
|----------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `memory_rss` | int | The memory usage of the process (in kilobytes on Unix-based systems, bytes on MacOS). |
|
||||
| `cpu_average` | int | CPU time in % of a single core (not % of all cores). |
|
||||
| `homeserver` | string | The homeserver's server name. |
|
||||
| `server_context` | string | An arbitrary string used to group statistics from a set of homeservers. |
|
||||
| `timestamp` | int | The current time, represented as the number of seconds since the epoch. |
|
||||
| `uptime_seconds` | int | The number of seconds since the homeserver was last started. |
|
||||
| `python_version` | string | The Python version number in use (e.g "3.7.1"). Taken from `sys.version_info`. |
|
||||
| `total_users` | int | The number of registered users on the homeserver. |
|
||||
| `total_nonbridged_users` | int | The number of users, excluding those created by an Application Service. |
|
||||
| `daily_user_type_native` | int | The number of native users created in the last 24 hours. |
|
||||
| `daily_user_type_guest` | int | The number of guest users created in the last 24 hours. |
|
||||
| `daily_user_type_bridged` | int | The number of users created by Application Services in the last 24 hours. |
|
||||
| `total_room_count` | int | The total number of rooms present on the homeserver. |
|
||||
| `daily_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 24 hours. |
|
||||
| `monthly_active_users` | int | The number of unique users[^1] that have used the homeserver in the last 30 days. |
|
||||
| `daily_active_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.message` sent in them in the last 24 hours. |
|
||||
| `daily_active_e2ee_rooms` | int | The number of rooms that have had a (state) event with the type `m.room.encrypted` sent in them in the last 24 hours. |
|
||||
| `daily_messages` | int | The number of (state) events with the type `m.room.message` seen in the last 24 hours. |
|
||||
| `daily_e2ee_messages` | int | The number of (state) events with the type `m.room.encrypted` seen in the last 24 hours. |
|
||||
| `daily_sent_messages` | int | The number of (state) events sent by a local user with the type `m.room.message` seen in the last 24 hours. |
|
||||
| `daily_sent_e2ee_messages` | int | The number of (state) events sent by a local user with the type `m.room.encrypted` seen in the last 24 hours. |
|
||||
| `r30_users_all` | int | The number of 30 day retained users, defined as users who have created their accounts more than 30 days ago, where they were last seen at most 30 days ago and where those two timestamps are over 30 days apart. Includes clients that do not fit into the below r30 client types. |
|
||||
| `r30_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Android" in the user agent string. |
|
||||
| `r30_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "iOS" in the user agent string. |
|
||||
| `r30_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Electron" in the user agent string. |
|
||||
| `r30_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "Mozilla" or "Gecko" in the user agent string. |
|
||||
| `r30v2_users_all` | int | The number of 30 day retained users, with a revised algorithm. Defined as users that appear more than once in the past 60 days, and have more than 30 days between the most and least recent appearances in the past 60 days. Includes clients that do not fit into the below r30 client types. |
|
||||
| `r30v2_users_android` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "android" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_ios` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "ios" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_electron` | int | The number of 30 day retained users, as defined above. Filtered only to clients with ("riot" or "element") and "electron" (case-insensitive) in the user agent string. |
|
||||
| `r30v2_users_web` | int | The number of 30 day retained users, as defined above. Filtered only to clients with "mozilla" or "gecko" (case-insensitive) in the user agent string. |
|
||||
| `cache_factor` | int | The configured [`global factor`](../../configuration/config_documentation.md#caching) value for caching. |
|
||||
| `event_cache_size` | int | The configured [`event_cache_size`](../../configuration/config_documentation.md#caching) value for caching. |
|
||||
| `database_engine` | string | The database engine that is in use. Either "psycopg2" meaning PostgreSQL is in use, or "sqlite3" for SQLite3. |
|
||||
| `database_server_version` | string | The version of the database server. Examples being "10.10" for PostgreSQL server version 10.0, and "3.38.5" for SQLite 3.38.5 installed on the system. |
|
||||
| `log_level` | string | The log level in use. Examples are "INFO", "WARNING", "ERROR", "DEBUG", etc. |
|
||||
|
||||
|
||||
[^1]: Native matrix users and guests are always counted. If the
|
||||
[`track_puppeted_user_ips`](../../configuration/config_documentation.md#track_puppeted_user_ips)
|
||||
option is set to `true`, "puppeted" users (users that an Application Service have performed
|
||||
[an action on behalf of](https://spec.matrix.org/v1.3/application-service-api/#identity-assertion))
|
||||
will also be counted. Note that an Application Service can "puppet" any user in their
|
||||
[user namespace](https://spec.matrix.org/v1.3/application-service-api/#registration),
|
||||
not only users that the Application Service has created. If this happens, the Application Service
|
||||
will additionally be counted as a user (irrespective of `track_puppeted_user_ips`).
|
||||
|
||||
## Using a Custom Statistics Collection Server
|
||||
|
||||
If statistics reporting is enabled, the endpoint that Synapse sends metrics to is configured by the
|
||||
[`report_stats_endpoint`](../../configuration/config_documentation.md#report_stats_endpoint) config
|
||||
option. By default, statistics are sent to Matrix.org.
|
||||
|
||||
If you would like to set up your own statistics collection server and send metrics there, you may
|
||||
consider using one of the following known implementations:
|
||||
|
||||
* [Matrix.org's Panopticon](https://github.com/matrix-org/panopticon)
|
||||
* [Famedly's Barad-dûr](https://gitlab.com/famedly/company/devops/services/barad-dur)
|
||||
@@ -317,7 +317,7 @@ Example configuration:
|
||||
allow_public_rooms_without_auth: true
|
||||
```
|
||||
---
|
||||
### `allow_public_rooms_without_auth`
|
||||
### `allow_public_rooms_over_federation`
|
||||
|
||||
If set to true, allows any other homeserver to fetch the server's public
|
||||
rooms directory via federation. Defaults to false.
|
||||
@@ -591,7 +591,7 @@ Example configuration:
|
||||
dummy_events_threshold: 5
|
||||
```
|
||||
---
|
||||
Config option `delete_stale_devices_after`
|
||||
### `delete_stale_devices_after`
|
||||
|
||||
An optional duration. If set, Synapse will run a daily background task to log out and
|
||||
delete any device that hasn't been accessed for more than the specified amount of time.
|
||||
@@ -1257,6 +1257,98 @@ database:
|
||||
cp_max: 10
|
||||
```
|
||||
---
|
||||
### `databases`
|
||||
|
||||
The `databases` option allows specifying a mapping between certain database tables and
|
||||
database host details, spreading the load of a single Synapse instance across multiple
|
||||
database backends. This is often referred to as "database sharding". This option is only
|
||||
supported for PostgreSQL database backends.
|
||||
|
||||
**Important note:** This is a supported option, but is not currently used in production by the
|
||||
Matrix.org Foundation. Proceed with caution and always make backups.
|
||||
|
||||
`databases` is a dictionary of arbitrarily-named database entries. Each entry is equivalent
|
||||
to the value of the `database` homeserver config option (see above), with the addition of
|
||||
a `data_stores` key. `data_stores` is an array of strings that specifies the data store(s)
|
||||
(a defined label for a set of tables) that should be stored on the associated database
|
||||
backend entry.
|
||||
|
||||
The currently defined values for `data_stores` are:
|
||||
|
||||
* `"state"`: Database that relates to state groups will be stored in this database.
|
||||
|
||||
Specifically, that means the following tables:
|
||||
* `state_groups`
|
||||
* `state_group_edges`
|
||||
* `state_groups_state`
|
||||
|
||||
And the following sequences:
|
||||
* `state_groups_seq_id`
|
||||
|
||||
* `"main"`: All other database tables and sequences.
|
||||
|
||||
All databases will end up with additional tables used for tracking database schema migrations
|
||||
and any pending background updates. Synapse will create these automatically on startup when checking for
|
||||
and/or performing database schema migrations.
|
||||
|
||||
To migrate an existing database configuration (e.g. all tables on a single database) to a different
|
||||
configuration (e.g. the "main" data store on one database, and "state" on another), do the following:
|
||||
|
||||
1. Take a backup of your existing database. Things can and do go wrong and database corruption is no joke!
|
||||
2. Ensure all pending database migrations have been applied and background updates have run. The simplest
|
||||
way to do this is to use the `update_synapse_database` script supplied with your Synapse installation.
|
||||
|
||||
```sh
|
||||
update_synapse_database --database-config homeserver.yaml --run-background-updates
|
||||
```
|
||||
|
||||
3. Copy over the necessary tables and sequences from one database to the other. Tables relating to database
|
||||
migrations, schemas, schema versions and background updates should **not** be copied.
|
||||
|
||||
As an example, say that you'd like to split out the "state" data store from an existing database which
|
||||
currently contains all data stores.
|
||||
|
||||
Simply copy the tables and sequences defined above for the "state" datastore from the existing database
|
||||
to the secondary database. As noted above, additional tables will be created in the secondary database
|
||||
when Synapse is started.
|
||||
|
||||
4. Modify/create the `databases` option in your `homeserver.yaml` to match the desired database configuration.
|
||||
5. Start Synapse. Check that it starts up successfully and that things generally seem to be working.
|
||||
6. Drop the old tables that were copied in step 3.
|
||||
|
||||
Only one of the options `database` or `databases` may be specified in your config, but not both.
|
||||
|
||||
Example configuration:
|
||||
|
||||
```yaml
|
||||
databases:
|
||||
basement_box:
|
||||
name: psycopg2
|
||||
txn_limit: 10000
|
||||
data_stores: ["main"]
|
||||
args:
|
||||
user: synapse_user
|
||||
password: secretpassword
|
||||
database: synapse_main
|
||||
host: localhost
|
||||
port: 5432
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
|
||||
my_other_database:
|
||||
name: psycopg2
|
||||
txn_limit: 10000
|
||||
data_stores: ["state"]
|
||||
args:
|
||||
user: synapse_user
|
||||
password: secretpassword
|
||||
database: synapse_state
|
||||
host: localhost
|
||||
port: 5432
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
```
|
||||
---
|
||||
## Logging ##
|
||||
Config options related to logging.
|
||||
|
||||
@@ -1843,7 +1935,7 @@ Example configuration:
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
```
|
||||
----
|
||||
Config options: `turn_username` and `turn_password`
|
||||
### `turn_username` and `turn_password`
|
||||
|
||||
The Username and password if the TURN server needs them and does not use a token.
|
||||
|
||||
@@ -2076,30 +2168,26 @@ default_identity_server: https://matrix.org
|
||||
---
|
||||
### `account_threepid_delegates`
|
||||
|
||||
Handle threepid (email/phone etc) registration and password resets through a set of
|
||||
*trusted* identity servers. Note that this allows the configured identity server to
|
||||
reset passwords for accounts!
|
||||
Delegate verification of phone numbers to an identity server.
|
||||
|
||||
Be aware that if `email` is not set, and SMTP options have not been
|
||||
configured in the email config block, registration and user password resets via
|
||||
email will be globally disabled.
|
||||
When a user wishes to add a phone number to their account, we need to verify that they
|
||||
actually own that phone number, which requires sending them a text message (SMS).
|
||||
Currently Synapse does not support sending those texts itself and instead delegates the
|
||||
task to an identity server. The base URI for the identity server to be used is
|
||||
specified by the `account_threepid_delegates.msisdn` option.
|
||||
|
||||
Additionally, if `msisdn` is not set, registration and password resets via msisdn
|
||||
will be disabled regardless, and users will not be able to associate an msisdn
|
||||
identifier to their account. This is due to Synapse currently not supporting
|
||||
any method of sending SMS messages on its own.
|
||||
If this is left unspecified, Synapse will not allow users to add phone numbers to
|
||||
their account.
|
||||
|
||||
To enable using an identity server for operations regarding a particular third-party
|
||||
identifier type, set the value to the URL of that identity server as shown in the
|
||||
examples below.
|
||||
(Servers handling the these requests must answer the `/requestToken` endpoints defined
|
||||
by the Matrix Identity Service API
|
||||
[specification](https://matrix.org/docs/spec/identity_service/latest).)
|
||||
|
||||
Servers handling the these requests must answer the `/requestToken` endpoints defined
|
||||
by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest).
|
||||
*Updated in Synapse 1.64.0*: No longer accepts an `email` option.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
account_threepid_delegates:
|
||||
email: https://example.com # Delegate email sending to example.com
|
||||
msisdn: http://localhost:8090 # Delegate SMS sending to this local process
|
||||
```
|
||||
---
|
||||
@@ -2999,7 +3087,7 @@ This setting has the following sub-options:
|
||||
* `localdb_enabled`: Set to false to disable authentication against the local password
|
||||
database. This is ignored if `enabled` is false, and is only useful
|
||||
if you have other `password_providers`. Defaults to true.
|
||||
* `pepper`: Set the value here to a secret random string for extra security. # Uncomment and change to a secret random string for extra security.
|
||||
* `pepper`: Set the value here to a secret random string for extra security.
|
||||
DO NOT CHANGE THIS AFTER INITIAL SETUP!
|
||||
* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc.
|
||||
Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows:
|
||||
@@ -3373,7 +3461,7 @@ alias_creation_rules:
|
||||
action: deny
|
||||
```
|
||||
---
|
||||
Config options: `room_list_publication_rules`
|
||||
### `room_list_publication_rules`
|
||||
|
||||
The `room_list_publication_rules` option controls who can publish and
|
||||
which rooms can be published in the public room list.
|
||||
|
||||
@@ -9,6 +9,9 @@ a real homeserver.yaml. Instead, if you are starting from scratch, please genera
|
||||
a fresh config using Synapse by following the instructions in
|
||||
[Installation](../../setup/installation.md).
|
||||
|
||||
Documentation for all configuration options can be found in the
|
||||
[Configuration Manual](./config_documentation.md).
|
||||
|
||||
```yaml
|
||||
{{#include ../../sample_config.yaml}}
|
||||
```
|
||||
|
||||
@@ -4,5 +4,5 @@ Synapse supports authenticating users via the [Central Authentication
|
||||
Service protocol](https://en.wikipedia.org/wiki/Central_Authentication_Service)
|
||||
(CAS) natively.
|
||||
|
||||
Please see the `cas_config` and `sso` sections of the [Synapse configuration
|
||||
file](../../../configuration/homeserver_sample_config.md) for more details.
|
||||
Please see the [cas_config](../../../configuration/config_documentation.md#cas_config) and [sso](../../../configuration/config_documentation.md#sso)
|
||||
sections of the configuration manual for more details.
|
||||
8
mypy.ini
8
mypy.ini
@@ -56,7 +56,6 @@ exclude = (?x)
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_server.py
|
||||
|tests/test_state.py
|
||||
|tests/test_terms_auth.py
|
||||
|tests/util/caches/test_cached_call.py
|
||||
@@ -74,7 +73,6 @@ exclude = (?x)
|
||||
|tests/util/test_lrucache.py
|
||||
|tests/util/test_rwlock.py
|
||||
|tests/util/test_wheel_timer.py
|
||||
|tests/utils.py
|
||||
)$
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
@@ -89,9 +87,6 @@ disallow_untyped_defs = False
|
||||
[mypy-synapse.logging.opentracing]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.logging.scopecontextmanager]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
disallow_untyped_defs = False
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
@@ -131,6 +126,9 @@ disallow_untyped_defs = True
|
||||
[mypy-tests.federation.transport.test_client]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-tests.utils]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
;; Dependencies without annotations
|
||||
;; Before ignoring a module, check to see if type stubs are available.
|
||||
|
||||
169
poetry.lock
generated
169
poetry.lock
generated
@@ -290,7 +290,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[[package]]
|
||||
name = "frozendict"
|
||||
version = "2.3.0"
|
||||
version = "2.3.2"
|
||||
description = "A simple immutable dictionary"
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -502,7 +502,7 @@ pyasn1 = ">=0.4.6"
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "4.8.0"
|
||||
version = "4.9.1"
|
||||
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
|
||||
category = "main"
|
||||
optional = true
|
||||
@@ -1753,23 +1753,23 @@ flake8-comprehensions = [
|
||||
{file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
|
||||
]
|
||||
frozendict = [
|
||||
{file = "frozendict-2.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e18e2abd144a9433b0a8334582843b2aa0d3b9ac8b209aaa912ad365115fe2e1"},
|
||||
{file = "frozendict-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96dc7a02e78da5725e5e642269bb7ae792e0c9f13f10f2e02689175ebbfedb35"},
|
||||
{file = "frozendict-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:752a6dcfaf9bb20a7ecab24980e4dbe041f154509c989207caf185522ef85461"},
|
||||
{file = "frozendict-2.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5346d9fc1c936c76d33975a9a9f1a067342963105d9a403a99e787c939cc2bb2"},
|
||||
{file = "frozendict-2.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60dd2253f1bacb63a7c486ec541a968af4f985ffb06602ee8954a3d39ec6bd2e"},
|
||||
{file = "frozendict-2.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:b2e044602ce17e5cd86724add46660fb9d80169545164e763300a3b839cb1b79"},
|
||||
{file = "frozendict-2.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a27a69b1ac3591e4258325108aee62b53c0eeb6ad0a993ae68d3c7eaea980420"},
|
||||
{file = "frozendict-2.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f45ef5f6b184d84744fff97b61f6b9a855e24d36b713ea2352fc723a047afa5"},
|
||||
{file = "frozendict-2.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2d3f5016650c0e9a192f5024e68fb4d63f670d0ee58b099ed3f5b4c62ea30ecb"},
|
||||
{file = "frozendict-2.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6cf605916f50aabaaba5624c81eb270200f6c2c466c46960237a125ec8fe3ae0"},
|
||||
{file = "frozendict-2.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6da06e44904beae4412199d7e49be4f85c6cc168ab06b77c735ea7da5ce3454"},
|
||||
{file = "frozendict-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:1f34793fb409c4fa70ffd25bea87b01f3bd305fb1c6b09e7dff085b126302206"},
|
||||
{file = "frozendict-2.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd72494a559bdcd28aa71f4aa81860269cd0b7c45fff3e2614a0a053ecfd2a13"},
|
||||
{file = "frozendict-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00ea9166aa68cc5feed05986206fdbf35e838a09cb3feef998cf35978ff8a803"},
|
||||
{file = "frozendict-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9ffaf440648b44e0bc694c1a4701801941378ba3ba6541e17750ae4b4aeeb116"},
|
||||
{file = "frozendict-2.3.0-py3-none-any.whl", hash = "sha256:8578fe06815fcdcc672bd5603eebc98361a5317c1c3a13b28c6c810f6ea3b323"},
|
||||
{file = "frozendict-2.3.0.tar.gz", hash = "sha256:da4231adefc5928e7810da2732269d3ad7b5616295b3e693746392a8205ea0b5"},
|
||||
{file = "frozendict-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4fb171d1e84d17335365877e19d17440373b47ca74a73c06f65ac0b16d01e87f"},
|
||||
{file = "frozendict-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a3640e9d7533d164160b758351aa49d9e85bbe0bd76d219d4021e90ffa6a52"},
|
||||
{file = "frozendict-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:87cfd00fafbc147d8cd2590d1109b7db8ac8d7d5bdaa708ba46caee132b55d4d"},
|
||||
{file = "frozendict-2.3.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fb09761e093cfabb2f179dbfdb2521e1ec5701df714d1eb5c51fa7849027be19"},
|
||||
{file = "frozendict-2.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82176dc7adf01cf8f0193e909401939415a230a1853f4a672ec1629a06ceae18"},
|
||||
{file = "frozendict-2.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:c1c70826aa4a50fa283fe161834ac4a3ac7c753902c980bb8b595b0998a38ddb"},
|
||||
{file = "frozendict-2.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1db5035ddbed995badd1a62c4102b5e207b5aeb24472df2c60aba79639d7996b"},
|
||||
{file = "frozendict-2.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4246fc4cb1413645ba4d3513939b90d979a5bae724be605a10b2b26ee12f839c"},
|
||||
{file = "frozendict-2.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:680cd42fb0a255da1ce45678ccbd7f69da750d5243809524ebe8f45b2eda6e6b"},
|
||||
{file = "frozendict-2.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a7f3a181d6722c92a9fab12d0c5c2b006a18ca5666098531f316d1e1c8984e3"},
|
||||
{file = "frozendict-2.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1cb866eabb3c1384a7fe88e1e1033e2b6623073589012ab637c552bf03f6364"},
|
||||
{file = "frozendict-2.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:952c5e5e664578c5c2ce8489ee0ab6a1855da02b58ef593ee728fc10d672641a"},
|
||||
{file = "frozendict-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:608b77904cd0117cd816df605a80d0043a5326ee62529327d2136c792165a823"},
|
||||
{file = "frozendict-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0eed41fd326f0bcc779837d8d9e1374da1bc9857fe3b9f2910195bbd5fff3aeb"},
|
||||
{file = "frozendict-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:bde28db6b5868dd3c45b3555f9d1dc5a1cca6d93591502fa5dcecce0dde6a335"},
|
||||
{file = "frozendict-2.3.2-py3-none-any.whl", hash = "sha256:6882a9bbe08ab9b5ff96ce11bdff3fe40b114b9813bc6801261e2a7b45e20012"},
|
||||
{file = "frozendict-2.3.2.tar.gz", hash = "sha256:7fac4542f0a13fbe704db4942f41ba3abffec5af8b100025973e59dff6a09d0d"},
|
||||
]
|
||||
gitdb = [
|
||||
{file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"},
|
||||
@@ -1937,67 +1937,76 @@ ldap3 = [
|
||||
{file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"},
|
||||
]
|
||||
lxml = [
|
||||
{file = "lxml-4.8.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:e1ab2fac607842ac36864e358c42feb0960ae62c34aa4caaf12ada0a1fb5d99b"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28d1af847786f68bec57961f31221125c29d6f52d9187c01cd34dc14e2b29430"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b92d40121dcbd74831b690a75533da703750f7041b4bf951befc657c37e5695a"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-win32.whl", hash = "sha256:e01f9531ba5420838c801c21c1b0f45dbc9607cb22ea2cf132844453bec863a5"},
|
||||
{file = "lxml-4.8.0-cp27-cp27m-win_amd64.whl", hash = "sha256:6259b511b0f2527e6d55ad87acc1c07b3cbffc3d5e050d7e7bcfa151b8202df9"},
|
||||
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1010042bfcac2b2dc6098260a2ed022968dbdfaf285fc65a3acf8e4eb1ffd1bc"},
|
||||
{file = "lxml-4.8.0-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fa56bb08b3dd8eac3a8c5b7d075c94e74f755fd9d8a04543ae8d37b1612dd170"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:31ba2cbc64516dcdd6c24418daa7abff989ddf3ba6d3ea6f6ce6f2ed6e754ec9"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:31499847fc5f73ee17dbe1b8e24c6dafc4e8d5b48803d17d22988976b0171f03"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5f7d7d9afc7b293147e2d506a4596641d60181a35279ef3aa5778d0d9d9123fe"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a3c5f1a719aa11866ffc530d54ad965063a8cbbecae6515acbd5f0fae8f48eaa"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6268e27873a3d191849204d00d03f65c0e343b3bcb518a6eaae05677c95621d1"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-win32.whl", hash = "sha256:330bff92c26d4aee79c5bc4d9967858bdbe73fdbdbacb5daf623a03a914fe05b"},
|
||||
{file = "lxml-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2582b238e1658c4061ebe1b4df53c435190d22457642377fd0cb30685cdfb76"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2bfc7e2a0601b475477c954bf167dee6d0f55cb167e3f3e7cefad906e7759f6"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a1547ff4b8a833511eeaceacbcd17b043214fcdb385148f9c1bc5556ca9623e2"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-win32.whl", hash = "sha256:a9f1c3489736ff8e1c7652e9dc39f80cff820f23624f23d9eab6e122ac99b150"},
|
||||
{file = "lxml-4.8.0-cp35-cp35m-win_amd64.whl", hash = "sha256:530f278849031b0eb12f46cca0e5db01cfe5177ab13bd6878c6e739319bae654"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:078306d19a33920004addeb5f4630781aaeabb6a8d01398045fcde085091a169"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:86545e351e879d0b72b620db6a3b96346921fa87b3d366d6c074e5a9a0b8dadb"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24f5c5ae618395ed871b3d8ebfcbb36e3f1091fd847bf54c4de623f9107942f3"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:bbab6faf6568484707acc052f4dfc3802bdb0cafe079383fbaa23f1cdae9ecd4"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7993232bd4044392c47779a3c7e8889fea6883be46281d45a81451acfd704d7e"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d6483b1229470e1d8835e52e0ff3c6973b9b97b24cd1c116dca90b57a2cc613"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ad4332a532e2d5acb231a2e5d33f943750091ee435daffca3fec0a53224e7e33"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-win32.whl", hash = "sha256:db3535733f59e5605a88a706824dfcb9bd06725e709ecb017e165fc1d6e7d429"},
|
||||
{file = "lxml-4.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5f148b0c6133fb928503cfcdfdba395010f997aa44bcf6474fcdd0c5398d9b63"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:8a31f24e2a0b6317f33aafbb2f0895c0bce772980ae60c2c640d82caac49628a"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:719544565c2937c21a6f76d520e6e52b726d132815adb3447ccffbe9f44203c4"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:c0b88ed1ae66777a798dc54f627e32d3b81c8009967c63993c450ee4cbcbec15"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fa9b7c450be85bfc6cd39f6df8c5b8cbd76b5d6fc1f69efec80203f9894b885f"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9f84ed9f4d50b74fbc77298ee5c870f67cb7e91dcdc1a6915cb1ff6a317476c"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1d650812b52d98679ed6c6b3b55cbb8fe5a5460a0aef29aeb08dc0b44577df85"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:80bbaddf2baab7e6de4bc47405e34948e694a9efe0861c61cdc23aa774fcb141"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-win32.whl", hash = "sha256:6f7b82934c08e28a2d537d870293236b1000d94d0b4583825ab9649aef7ddf63"},
|
||||
{file = "lxml-4.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e1fd7d2fe11f1cb63d3336d147c852f6d07de0d0020d704c6031b46a30b02ca8"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:5045ee1ccd45a89c4daec1160217d363fcd23811e26734688007c26f28c9e9e7"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0c1978ff1fd81ed9dcbba4f91cf09faf1f8082c9d72eb122e92294716c605428"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cbf2ff155b19dc4d4100f7442f6a697938bf4493f8d3b0c51d45568d5666b5"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ce13d6291a5f47c1c8dbd375baa78551053bc6b5e5c0e9bb8e39c0a8359fd52f"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11527dc23d5ef44d76fef11213215c34f36af1608074561fcc561d983aeb870"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60d2f60bd5a2a979df28ab309352cdcf8181bda0cca4529769a945f09aba06f9"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:62f93eac69ec0f4be98d1b96f4d6b964855b8255c345c17ff12c20b93f247b68"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-win32.whl", hash = "sha256:20b8a746a026017acf07da39fdb10aa80ad9877046c9182442bf80c84a1c4696"},
|
||||
{file = "lxml-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:891dc8f522d7059ff0024cd3ae79fd224752676447f9c678f2a5c14b84d9a939"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:b6fc2e2fb6f532cf48b5fed57567ef286addcef38c28874458a41b7837a57807"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:74eb65ec61e3c7c019d7169387d1b6ffcfea1b9ec5894d116a9a903636e4a0b1"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:627e79894770783c129cc5e89b947e52aa26e8e0557c7e205368a809da4b7939"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:545bd39c9481f2e3f2727c78c169425efbfb3fbba6e7db4f46a80ebb249819ca"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a58d0b12f5053e270510bf12f753a76aaf3d74c453c00942ed7d2c804ca845c"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ec4b4e75fc68da9dc0ed73dcdb431c25c57775383fec325d23a770a64e7ebc87"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5804e04feb4e61babf3911c2a974a5b86f66ee227cc5006230b00ac6d285b3a9"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-win32.whl", hash = "sha256:aa0cf4922da7a3c905d000b35065df6184c0dc1d866dd3b86fd961905bbad2ea"},
|
||||
{file = "lxml-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:dd10383f1d6b7edf247d0960a3db274c07e96cf3a3fc7c41c8448f93eac3fb1c"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:2403a6d6fb61c285969b71f4a3527873fe93fd0abe0832d858a17fe68c8fa507"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:986b7a96228c9b4942ec420eff37556c5777bfba6758edcb95421e4a614b57f9"},
|
||||
{file = "lxml-4.8.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6fe4ef4402df0250b75ba876c3795510d782def5c1e63890bde02d622570d39e"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:f10ce66fcdeb3543df51d423ede7e238be98412232fca5daec3e54bcd16b8da0"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:730766072fd5dcb219dd2b95c4c49752a54f00157f322bc6d71f7d2a31fecd79"},
|
||||
{file = "lxml-4.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8b99ec73073b37f9ebe8caf399001848fced9c08064effdbfc4da2b5a8d07b93"},
|
||||
{file = "lxml-4.8.0.tar.gz", hash = "sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"},
|
||||
{file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"},
|
||||
{file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"},
|
||||
{file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"},
|
||||
{file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"},
|
||||
{file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"},
|
||||
{file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"},
|
||||
{file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"},
|
||||
{file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"},
|
||||
{file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"},
|
||||
{file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"},
|
||||
{file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"},
|
||||
{file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"},
|
||||
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"},
|
||||
{file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"},
|
||||
{file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"},
|
||||
]
|
||||
markupsafe = [
|
||||
{file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.62.0"
|
||||
version = "1.63.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
||||
@@ -14,12 +14,18 @@
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
# You can optionally give a "-f" argument (for "fast") before any to skip
|
||||
# rebuilding the docker images, if you just want to rerun the tests.
|
||||
#
|
||||
# Remaining commandline arguments are passed through to `go test`. For example,
|
||||
# you can supply a regular expression of test method names via the "-run"
|
||||
# argument:
|
||||
#
|
||||
# ./complement.sh -run "TestOutboundFederation(Profile|Send)"
|
||||
#
|
||||
# Specifying TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 will cause `poetry export`
|
||||
# to not emit any hashes when building the Docker image. This then means that
|
||||
# you can use 'unverifiable' sources such as git repositories as dependencies.
|
||||
|
||||
# Exit if a line returns a non-zero exit code
|
||||
set -e
|
||||
@@ -32,6 +38,47 @@ echo_if_github() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper to print out the usage instructions
|
||||
usage() {
|
||||
cat >&2 <<EOF
|
||||
Usage: $0 [-f] <go test arguments>...
|
||||
Run the complement test suite on Synapse.
|
||||
|
||||
-f, --fast
|
||||
Skip rebuilding the docker images, and just use the most recent
|
||||
'complement-synapse:latest' image.
|
||||
Conflicts with --build-only.
|
||||
|
||||
--build-only
|
||||
Only build the Docker images. Don't actually run Complement.
|
||||
Conflicts with -f/--fast.
|
||||
|
||||
For help on arguments to 'go test', run 'go help testflag'.
|
||||
EOF
|
||||
}
|
||||
|
||||
# parse our arguments
|
||||
skip_docker_build=""
|
||||
skip_complement_run=""
|
||||
while [ $# -ge 1 ]; do
|
||||
arg=$1
|
||||
case "$arg" in
|
||||
"-h")
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
"-f"|"--fast")
|
||||
skip_docker_build=1
|
||||
;;
|
||||
"--build-only")
|
||||
skip_complement_run=1
|
||||
;;
|
||||
*)
|
||||
# unknown arg: presumably an argument to gotest. break the loop.
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# enable buildkit for the docker builds
|
||||
export DOCKER_BUILDKIT=1
|
||||
@@ -49,21 +96,30 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "Checkout available at 'complement-${COMPLEMENT_REF}'"
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
if [ -z "$skip_docker_build" ]; then
|
||||
# Build the base Synapse image from the local checkout
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
|
||||
docker build -t matrixdotorg/synapse \
|
||||
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
|
||||
-f "docker/Dockerfile" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the workers docker image (from the base Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers"
|
||||
docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" .
|
||||
echo_if_github "::endgroup::"
|
||||
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
# Build the unified Complement image (from the worker Synapse image we just built).
|
||||
echo_if_github "::group::Build Docker image: complement/Dockerfile"
|
||||
docker build -t complement-synapse \
|
||||
-f "docker/complement/Dockerfile" "docker/complement"
|
||||
echo_if_github "::endgroup::"
|
||||
fi
|
||||
|
||||
if [ -n "$skip_complement_run" ]; then
|
||||
echo "Skipping Complement run as requested."
|
||||
exit
|
||||
fi
|
||||
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
|
||||
@@ -104,6 +160,18 @@ else
|
||||
test_tags="$test_tags,faster_joins"
|
||||
fi
|
||||
|
||||
|
||||
if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then
|
||||
# Set the log level to what is desired
|
||||
export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL"
|
||||
|
||||
# Allow logging sensitive things (currently SQL queries & parameters).
|
||||
# (This won't have any effect if we're not logging at DEBUG level overall.)
|
||||
# Since this is just a test suite, this is fine and won't reveal anyone's
|
||||
# personal information
|
||||
export PASS_SYNAPSE_LOG_SENSITIVE=1
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -166,22 +166,6 @@ IGNORED_TABLES = {
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Groups/communities is no longer supported.
|
||||
"group_attestations_remote",
|
||||
"group_attestations_renewals",
|
||||
"group_invites",
|
||||
"group_roles",
|
||||
"group_room_categories",
|
||||
"group_rooms",
|
||||
"group_summary_roles",
|
||||
"group_summary_room_categories",
|
||||
"group_summary_rooms",
|
||||
"group_summary_users",
|
||||
"group_users",
|
||||
"groups",
|
||||
"local_group_membership",
|
||||
"local_group_updates",
|
||||
"remote_profile_cache",
|
||||
}
|
||||
|
||||
|
||||
@@ -268,6 +252,9 @@ class MockHomeserver:
|
||||
def get_instance_name(self) -> str:
|
||||
return "master"
|
||||
|
||||
def should_send_federation(self) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
class Porter:
|
||||
def __init__(
|
||||
@@ -415,12 +402,15 @@ class Porter:
|
||||
self.progress.update(table, table_size) # Mark table as done
|
||||
return
|
||||
|
||||
# We sweep over rowids in two directions: one forwards (rowids 1, 2, 3, ...)
|
||||
# and another backwards (rowids 0, -1, -2, ...).
|
||||
forward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,)
|
||||
)
|
||||
|
||||
backward_select = (
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,)
|
||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid DESC LIMIT ?"
|
||||
% (table,)
|
||||
)
|
||||
|
||||
do_forward = [True]
|
||||
@@ -618,6 +608,25 @@ class Porter:
|
||||
self.postgres_store.db_pool.updates.has_completed_background_updates()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _is_sqlite_autovacuum_enabled(txn: LoggingTransaction) -> bool:
|
||||
"""
|
||||
Returns true if auto_vacuum is enabled in SQLite.
|
||||
https://www.sqlite.org/pragma.html#pragma_auto_vacuum
|
||||
|
||||
Vacuuming changes the rowids on rows in the database.
|
||||
Auto-vacuuming is therefore dangerous when used in conjunction with this script.
|
||||
|
||||
Note that the auto_vacuum setting can't be changed without performing
|
||||
a VACUUM after trying to change the pragma.
|
||||
"""
|
||||
txn.execute("PRAGMA auto_vacuum")
|
||||
row = txn.fetchone()
|
||||
assert row is not None, "`PRAGMA auto_vacuum` did not give a row."
|
||||
(autovacuum_setting,) = row
|
||||
# 0 means off. 1 means full. 2 means incremental.
|
||||
return autovacuum_setting != 0
|
||||
|
||||
async def run(self) -> None:
|
||||
"""Ports the SQLite database to a PostgreSQL database.
|
||||
|
||||
@@ -634,6 +643,21 @@ class Porter:
|
||||
allow_outdated_version=True,
|
||||
)
|
||||
|
||||
# For safety, ensure auto_vacuums are disabled.
|
||||
if await self.sqlite_store.db_pool.runInteraction(
|
||||
"is_sqlite_autovacuum_enabled", self._is_sqlite_autovacuum_enabled
|
||||
):
|
||||
end_error = (
|
||||
"auto_vacuum is enabled in the SQLite database."
|
||||
" (This is not the default configuration.)\n"
|
||||
" This script relies on rowids being consistent and must not"
|
||||
" be used if the database could be vacuumed between re-runs.\n"
|
||||
" To disable auto_vacuum, you need to stop Synapse and run the following SQL:\n"
|
||||
" PRAGMA auto_vacuum=off;\n"
|
||||
" VACUUM;"
|
||||
)
|
||||
return
|
||||
|
||||
# Check if all background updates are done, abort if not.
|
||||
updates_complete = (
|
||||
await self.sqlite_store.db_pool.updates.has_completed_background_updates()
|
||||
|
||||
@@ -259,3 +259,13 @@ class ReceiptTypes:
|
||||
READ: Final = "m.read"
|
||||
READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
|
||||
FULLY_READ: Final = "m.fully_read"
|
||||
|
||||
|
||||
class PublicRoomsFilterFields:
|
||||
"""Fields in the search filter for `/publicRooms` that we understand.
|
||||
|
||||
As defined in https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3publicrooms
|
||||
"""
|
||||
|
||||
GENERIC_SEARCH_TERM: Final = "generic_search_term"
|
||||
ROOM_TYPES: Final = "org.matrix.msc3827.room_types"
|
||||
|
||||
@@ -297,8 +297,14 @@ class AuthError(SynapseError):
|
||||
other poorly-defined times.
|
||||
"""
|
||||
|
||||
def __init__(self, code: int, msg: str, errcode: str = Codes.FORBIDDEN):
|
||||
super().__init__(code, msg, errcode)
|
||||
def __init__(
|
||||
self,
|
||||
code: int,
|
||||
msg: str,
|
||||
errcode: str = Codes.FORBIDDEN,
|
||||
additional_fields: Optional[dict] = None,
|
||||
):
|
||||
super().__init__(code, msg, errcode, additional_fields)
|
||||
|
||||
|
||||
class InvalidClientCredentialsError(SynapseError):
|
||||
|
||||
@@ -27,6 +27,33 @@ class Ratelimiter:
|
||||
"""
|
||||
Ratelimit actions marked by arbitrary keys.
|
||||
|
||||
(Note that the source code speaks of "actions" and "burst_count" rather than
|
||||
"tokens" and a "bucket_size".)
|
||||
|
||||
This is a "leaky bucket as a meter". For each key to be tracked there is a bucket
|
||||
containing some number 0 <= T <= `burst_count` of tokens corresponding to previously
|
||||
permitted requests for that key. Each bucket starts empty, and gradually leaks
|
||||
tokens at a rate of `rate_hz`.
|
||||
|
||||
Upon an incoming request, we must determine:
|
||||
- the key that this request falls under (which bucket to inspect), and
|
||||
- the cost C of this request in tokens.
|
||||
Then, if there is room in the bucket for C tokens (T + C <= `burst_count`),
|
||||
the request is permitted and `cost` tokens are added to the bucket.
|
||||
Otherwise the request is denied, and the bucket continues to hold T tokens.
|
||||
|
||||
This means that the limiter enforces an average request frequency of `rate_hz`,
|
||||
while accumulating a buffer of up to `burst_count` requests which can be consumed
|
||||
instantaneously.
|
||||
|
||||
The tricky bit is the leaking. We do not want to have a periodic process which
|
||||
leaks every bucket! Instead, we track
|
||||
- the time point when the bucket was last completely empty, and
|
||||
- how many tokens have added to the bucket permitted since then.
|
||||
Then for each incoming request, we can calculate how many tokens have leaked
|
||||
since this time point, and use that to decide if we should accept or reject the
|
||||
request.
|
||||
|
||||
Args:
|
||||
clock: A homeserver clock, for retrieving the current time
|
||||
rate_hz: The long term number of actions that can be performed in a second.
|
||||
@@ -41,14 +68,30 @@ class Ratelimiter:
|
||||
self.burst_count = burst_count
|
||||
self.store = store
|
||||
|
||||
# A ordered dictionary keeping track of actions, when they were last
|
||||
# performed and how often. Each entry is a mapping from a key of arbitrary type
|
||||
# to a tuple representing:
|
||||
# * How many times an action has occurred since a point in time
|
||||
# * The point in time
|
||||
# * The rate_hz of this particular entry. This can vary per request
|
||||
# An ordered dictionary representing the token buckets tracked by this rate
|
||||
# limiter. Each entry maps a key of arbitrary type to a tuple representing:
|
||||
# * The number of tokens currently in the bucket,
|
||||
# * The time point when the bucket was last completely empty, and
|
||||
# * The rate_hz (leak rate) of this particular bucket.
|
||||
self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict()
|
||||
|
||||
def _get_key(
|
||||
self, requester: Optional[Requester], key: Optional[Hashable]
|
||||
) -> Hashable:
|
||||
"""Use the requester's MXID as a fallback key if no key is provided."""
|
||||
if key is None:
|
||||
if not requester:
|
||||
raise ValueError("Must supply at least one of `requester` or `key`")
|
||||
|
||||
key = requester.user.to_string()
|
||||
return key
|
||||
|
||||
def _get_action_counts(
|
||||
self, key: Hashable, time_now_s: float
|
||||
) -> Tuple[float, float, float]:
|
||||
"""Retrieve the action counts, with a fallback representing an empty bucket."""
|
||||
return self.actions.get(key, (0.0, time_now_s, 0.0))
|
||||
|
||||
async def can_do_action(
|
||||
self,
|
||||
requester: Optional[Requester],
|
||||
@@ -88,11 +131,7 @@ class Ratelimiter:
|
||||
* The reactor timestamp for when the action can be performed next.
|
||||
-1 if rate_hz is less than or equal to zero
|
||||
"""
|
||||
if key is None:
|
||||
if not requester:
|
||||
raise ValueError("Must supply at least one of `requester` or `key`")
|
||||
|
||||
key = requester.user.to_string()
|
||||
key = self._get_key(requester, key)
|
||||
|
||||
if requester:
|
||||
# Disable rate limiting of users belonging to any AS that is configured
|
||||
@@ -121,7 +160,7 @@ class Ratelimiter:
|
||||
self._prune_message_counts(time_now_s)
|
||||
|
||||
# Check if there is an existing count entry for this key
|
||||
action_count, time_start, _ = self.actions.get(key, (0.0, time_now_s, 0.0))
|
||||
action_count, time_start, _ = self._get_action_counts(key, time_now_s)
|
||||
|
||||
# Check whether performing another action is allowed
|
||||
time_delta = time_now_s - time_start
|
||||
@@ -164,6 +203,37 @@ class Ratelimiter:
|
||||
|
||||
return allowed, time_allowed
|
||||
|
||||
def record_action(
|
||||
self,
|
||||
requester: Optional[Requester],
|
||||
key: Optional[Hashable] = None,
|
||||
n_actions: int = 1,
|
||||
_time_now_s: Optional[float] = None,
|
||||
) -> None:
|
||||
"""Record that an action(s) took place, even if they violate the rate limit.
|
||||
|
||||
This is useful for tracking the frequency of events that happen across
|
||||
federation which we still want to impose local rate limits on. For instance, if
|
||||
we are alice.com monitoring a particular room, we cannot prevent bob.com
|
||||
from joining users to that room. However, we can track the number of recent
|
||||
joins in the room and refuse to serve new joins ourselves if there have been too
|
||||
many in the room across both homeservers.
|
||||
|
||||
Args:
|
||||
requester: The requester that is doing the action, if any.
|
||||
key: An arbitrary key used to classify an action. Defaults to the
|
||||
requester's user ID.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
_time_now_s: The current time. Optional, defaults to the current time according
|
||||
to self.clock. Only used by tests.
|
||||
"""
|
||||
key = self._get_key(requester, key)
|
||||
time_now_s = _time_now_s if _time_now_s is not None else self.clock.time()
|
||||
action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s)
|
||||
self.actions[key] = (action_count + n_actions, time_start, rate_hz)
|
||||
|
||||
def _prune_message_counts(self, time_now_s: float) -> None:
|
||||
"""Remove message count entries that have not exceeded their defined
|
||||
rate_hz limit
|
||||
|
||||
@@ -84,6 +84,8 @@ class RoomVersion:
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
# MSC3667: Enforce integer power levels
|
||||
msc3667_int_only_power_levels: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -103,6 +105,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -120,6 +123,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -137,6 +141,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -154,6 +159,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -171,6 +177,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -188,6 +195,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -205,6 +213,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -222,6 +231,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -239,6 +249,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -256,6 +267,7 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
MSC2716v3 = RoomVersion(
|
||||
"org.matrix.msc2716v3",
|
||||
@@ -273,6 +285,7 @@ class RoomVersions:
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
@@ -290,6 +303,25 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=False,
|
||||
)
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
msc3667_int_only_power_levels=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -308,6 +340,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC2716v3,
|
||||
RoomVersions.MSC3787,
|
||||
RoomVersions.V10,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -106,7 +106,9 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
|
||||
def start_worker_reactor(
|
||||
appname: str,
|
||||
config: HomeServerConfig,
|
||||
run_command: Callable[[], None] = reactor.run,
|
||||
# Use a lambda to avoid binding to a given reactor at import time.
|
||||
# (needed when synapse.app.complement_fork_starter is being used)
|
||||
run_command: Callable[[], None] = lambda: reactor.run(),
|
||||
) -> None:
|
||||
"""Run the reactor in the main process
|
||||
|
||||
@@ -141,7 +143,9 @@ def start_reactor(
|
||||
daemonize: bool,
|
||||
print_pidfile: bool,
|
||||
logger: logging.Logger,
|
||||
run_command: Callable[[], None] = reactor.run,
|
||||
# Use a lambda to avoid binding to a given reactor at import time.
|
||||
# (needed when synapse.app.complement_fork_starter is being used)
|
||||
run_command: Callable[[], None] = lambda: reactor.run(),
|
||||
) -> None:
|
||||
"""Run the reactor in the main process
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.databases.main.room import RoomWorkerStore
|
||||
from synapse.types import StateMap
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
@@ -60,7 +61,17 @@ class AdminCmdSlavedStore(
|
||||
BaseSlavedStore,
|
||||
RoomWorkerStore,
|
||||
):
|
||||
pass
|
||||
def __init__(
|
||||
self,
|
||||
database: DatabasePool,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
hs: "HomeServer",
|
||||
):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
# Annoyingly `filter_events_for_client` assumes that this exists. We
|
||||
# should refactor it to take a `Clock` directly.
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
|
||||
190
synapse/app/complement_fork_starter.py
Normal file
190
synapse/app/complement_fork_starter.py
Normal file
@@ -0,0 +1,190 @@
|
||||
# Copyright 2022 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# ## What this script does
|
||||
#
|
||||
# This script spawns multiple workers, whilst only going through the code loading
|
||||
# process once. The net effect is that start-up time for a swarm of workers is
|
||||
# reduced, particularly in CPU-constrained environments.
|
||||
#
|
||||
# Before the workers are spawned, the database is prepared in order to avoid the
|
||||
# workers racing.
|
||||
#
|
||||
# ## Stability
|
||||
#
|
||||
# This script is only intended for use within the Synapse images for the
|
||||
# Complement test suite.
|
||||
# There are currently no stability guarantees whatsoever; especially not about:
|
||||
# - whether it will continue to exist in future versions;
|
||||
# - the format of its command-line arguments; or
|
||||
# - any details about its behaviour or principles of operation.
|
||||
#
|
||||
# ## Usage
|
||||
#
|
||||
# The first argument should be the path to the database configuration, used to
|
||||
# set up the database. The rest of the arguments are used as follows:
|
||||
# Each worker is specified as an argument group (each argument group is
|
||||
# separated by '--').
|
||||
# The first argument in each argument group is the Python module name of the application
|
||||
# to start. Further arguments are then passed to that module as-is.
|
||||
#
|
||||
# ## Example
|
||||
#
|
||||
# python -m synapse.app.complement_fork_starter path_to_db_config.yaml \
|
||||
# synapse.app.homeserver [args..] -- \
|
||||
# synapse.app.generic_worker [args..] -- \
|
||||
# ...
|
||||
# synapse.app.generic_worker [args..]
|
||||
#
|
||||
import argparse
|
||||
import importlib
|
||||
import itertools
|
||||
import multiprocessing
|
||||
import sys
|
||||
from typing import Any, Callable, List
|
||||
|
||||
from twisted.internet.main import installReactor
|
||||
|
||||
|
||||
class ProxiedReactor:
|
||||
"""
|
||||
Twisted tracks the 'installed' reactor as a global variable.
|
||||
(Actually, it does some module trickery, but the effect is similar.)
|
||||
|
||||
The default EpollReactor is buggy if it's created before a process is
|
||||
forked, then used in the child.
|
||||
See https://twistedmatrix.com/trac/ticket/4759#comment:17.
|
||||
|
||||
However, importing certain Twisted modules will automatically create and
|
||||
install a reactor if one hasn't already been installed.
|
||||
It's not normally possible to re-install a reactor.
|
||||
|
||||
Given the goal of launching workers with fork() to only import the code once,
|
||||
this presents a conflict.
|
||||
Our work around is to 'install' this ProxiedReactor which prevents Twisted
|
||||
from creating and installing one, but which lets us replace the actual reactor
|
||||
in use later on.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.___reactor_target: Any = None
|
||||
|
||||
def _install_real_reactor(self, new_reactor: Any) -> None:
|
||||
"""
|
||||
Install a real reactor for this ProxiedReactor to forward lookups onto.
|
||||
|
||||
This method is specific to our ProxiedReactor and should not clash with
|
||||
any names used on an actual Twisted reactor.
|
||||
"""
|
||||
self.___reactor_target = new_reactor
|
||||
|
||||
def __getattr__(self, attr_name: str) -> Any:
|
||||
return getattr(self.___reactor_target, attr_name)
|
||||
|
||||
|
||||
def _worker_entrypoint(
|
||||
func: Callable[[], None], proxy_reactor: ProxiedReactor, args: List[str]
|
||||
) -> None:
|
||||
"""
|
||||
Entrypoint for a forked worker process.
|
||||
|
||||
We just need to set up the command-line arguments, create our real reactor
|
||||
and then kick off the worker's main() function.
|
||||
"""
|
||||
|
||||
sys.argv = args
|
||||
|
||||
from twisted.internet.epollreactor import EPollReactor
|
||||
|
||||
proxy_reactor._install_real_reactor(EPollReactor())
|
||||
func()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""
|
||||
Entrypoint for the forking launcher.
|
||||
"""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("db_config", help="Path to database config file")
|
||||
parser.add_argument(
|
||||
"args",
|
||||
nargs="...",
|
||||
help="Argument groups separated by `--`. "
|
||||
"The first argument of each group is a Synapse app name. "
|
||||
"Subsequent arguments are passed through.",
|
||||
)
|
||||
ns = parser.parse_args()
|
||||
|
||||
# Split up the subsequent arguments into each workers' arguments;
|
||||
# `--` is our delimiter of choice.
|
||||
args_by_worker: List[List[str]] = [
|
||||
list(args)
|
||||
for cond, args in itertools.groupby(ns.args, lambda ele: ele != "--")
|
||||
if cond and args
|
||||
]
|
||||
|
||||
# Prevent Twisted from installing a shared reactor that all the workers will
|
||||
# inherit when we fork(), by installing our own beforehand.
|
||||
proxy_reactor = ProxiedReactor()
|
||||
installReactor(proxy_reactor)
|
||||
|
||||
# Import the entrypoints for all the workers.
|
||||
worker_functions = []
|
||||
for worker_args in args_by_worker:
|
||||
worker_module = importlib.import_module(worker_args[0])
|
||||
worker_functions.append(worker_module.main)
|
||||
|
||||
# We need to prepare the database first as otherwise all the workers will
|
||||
# try to create a schema version table and some will crash out.
|
||||
from synapse._scripts import update_synapse_database
|
||||
|
||||
update_proc = multiprocessing.Process(
|
||||
target=_worker_entrypoint,
|
||||
args=(
|
||||
update_synapse_database.main,
|
||||
proxy_reactor,
|
||||
[
|
||||
"update_synapse_database",
|
||||
"--database-config",
|
||||
ns.db_config,
|
||||
"--run-background-updates",
|
||||
],
|
||||
),
|
||||
)
|
||||
print("===== PREPARING DATABASE =====", file=sys.stderr)
|
||||
update_proc.start()
|
||||
update_proc.join()
|
||||
print("===== PREPARED DATABASE =====", file=sys.stderr)
|
||||
|
||||
# At this point, we've imported all the main entrypoints for all the workers.
|
||||
# Now we basically just fork() out to create the workers we need.
|
||||
# Because we're using fork(), all the workers get a clone of this launcher's
|
||||
# memory space and don't need to repeat the work of loading the code!
|
||||
# Instead of using fork() directly, we use the multiprocessing library,
|
||||
# which uses fork() on Unix platforms.
|
||||
processes = []
|
||||
for (func, worker_args) in zip(worker_functions, args_by_worker):
|
||||
process = multiprocessing.Process(
|
||||
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)
|
||||
)
|
||||
process.start()
|
||||
processes.append(process)
|
||||
|
||||
# Be a good parent and wait for our children to die before exiting.
|
||||
for process in processes:
|
||||
process.join()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -44,7 +44,6 @@ from synapse.app._base import (
|
||||
register_start,
|
||||
)
|
||||
from synapse.config._base import ConfigError, format_config_error
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
@@ -202,7 +201,7 @@ class SynapseHomeServer(HomeServer):
|
||||
}
|
||||
)
|
||||
|
||||
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
if self.config.email.can_verify_email:
|
||||
from synapse.rest.synapse.client.password_reset import (
|
||||
PasswordResetSubmitTokenResource,
|
||||
)
|
||||
|
||||
@@ -53,6 +53,18 @@ sent_events_counter = Counter(
|
||||
"synapse_appservice_api_sent_events", "Number of events sent to the AS", ["service"]
|
||||
)
|
||||
|
||||
sent_ephemeral_counter = Counter(
|
||||
"synapse_appservice_api_sent_ephemeral",
|
||||
"Number of ephemeral events sent to the AS",
|
||||
["service"],
|
||||
)
|
||||
|
||||
sent_todevice_counter = Counter(
|
||||
"synapse_appservice_api_sent_todevice",
|
||||
"Number of todevice messages sent to the AS",
|
||||
["service"],
|
||||
)
|
||||
|
||||
HOUR_IN_MS = 60 * 60 * 1000
|
||||
|
||||
|
||||
@@ -310,6 +322,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
)
|
||||
sent_transactions_counter.labels(service.id).inc()
|
||||
sent_events_counter.labels(service.id).inc(len(serialized_events))
|
||||
sent_ephemeral_counter.labels(service.id).inc(len(ephemeral))
|
||||
sent_todevice_counter.labels(service.id).inc(len(to_device_messages))
|
||||
return True
|
||||
except CodeMessageException as e:
|
||||
logger.warning(
|
||||
|
||||
@@ -319,7 +319,9 @@ class _ServiceQueuer:
|
||||
rooms_of_interesting_users.update(event.room_id for event in events)
|
||||
# EDUs
|
||||
rooms_of_interesting_users.update(
|
||||
ephemeral["room_id"] for ephemeral in ephemerals
|
||||
ephemeral["room_id"]
|
||||
for ephemeral in ephemerals
|
||||
if ephemeral.get("room_id") is not None
|
||||
)
|
||||
|
||||
# Look up the AS users in those rooms
|
||||
@@ -329,8 +331,9 @@ class _ServiceQueuer:
|
||||
)
|
||||
|
||||
# Add recipients of to-device messages.
|
||||
# device_message["user_id"] is the ID of the recipient.
|
||||
users.update(device_message["user_id"] for device_message in to_device_messages)
|
||||
users.update(
|
||||
device_message["to_user_id"] for device_message in to_device_messages
|
||||
)
|
||||
|
||||
# Compute and return the counts / fallback key usage states
|
||||
otk_counts = await self._store.count_bulk_e2e_one_time_keys_for_as(users)
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import Any, Callable, Dict, Optional
|
||||
import attr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -159,12 +159,7 @@ class CacheConfig(Config):
|
||||
|
||||
self.track_memory_usage = cache_config.get("track_memory_usage", False)
|
||||
if self.track_memory_usage:
|
||||
try:
|
||||
check_requirements("cache_memory")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("cache_memory")
|
||||
|
||||
expire_caches = cache_config.get("expire_caches", True)
|
||||
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
import email.utils
|
||||
import logging
|
||||
import os
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
import attr
|
||||
@@ -131,41 +130,22 @@ class EmailConfig(Config):
|
||||
|
||||
self.email_enable_notifs = email_config.get("enable_notifs", False)
|
||||
|
||||
self.threepid_behaviour_email = (
|
||||
# Have Synapse handle the email sending if account_threepid_delegates.email
|
||||
# is not defined
|
||||
# msisdn is currently always remote while Synapse does not support any method of
|
||||
# sending SMS messages
|
||||
ThreepidBehaviour.REMOTE
|
||||
if self.root.registration.account_threepid_delegate_email
|
||||
else ThreepidBehaviour.LOCAL
|
||||
)
|
||||
|
||||
if config.get("trust_identity_server_for_password_resets"):
|
||||
raise ConfigError(
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
'has been replaced by "account_threepid_delegate". '
|
||||
"Please consult the sample config at docs/sample_config.yaml for "
|
||||
"details and update your config file."
|
||||
"is no longer supported. Please remove it from the config file."
|
||||
)
|
||||
|
||||
self.local_threepid_handling_disabled_due_to_email_config = False
|
||||
if (
|
||||
self.threepid_behaviour_email == ThreepidBehaviour.LOCAL
|
||||
and email_config == {}
|
||||
):
|
||||
# We cannot warn the user this has happened here
|
||||
# Instead do so when a user attempts to reset their password
|
||||
self.local_threepid_handling_disabled_due_to_email_config = True
|
||||
|
||||
self.threepid_behaviour_email = ThreepidBehaviour.OFF
|
||||
# If we have email config settings, assume that we can verify ownership of
|
||||
# email addresses.
|
||||
self.can_verify_email = email_config != {}
|
||||
|
||||
# Get lifetime of a validation token in milliseconds
|
||||
self.email_validation_token_lifetime = self.parse_duration(
|
||||
email_config.get("validation_token_lifetime", "1h")
|
||||
)
|
||||
|
||||
if self.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
if self.can_verify_email:
|
||||
missing = []
|
||||
if not self.email_notif_from:
|
||||
missing.append("email.notif_from")
|
||||
@@ -356,18 +336,3 @@ class EmailConfig(Config):
|
||||
"Config option email.invite_client_location must be a http or https URL",
|
||||
path=("email", "invite_client_location"),
|
||||
)
|
||||
|
||||
|
||||
class ThreepidBehaviour(Enum):
|
||||
"""
|
||||
Enum to define the behaviour of Synapse with regards to when it contacts an identity
|
||||
server for 3pid registration and password resets
|
||||
|
||||
REMOTE = use an external server to send tokens
|
||||
LOCAL = send tokens ourselves
|
||||
OFF = disable registration via 3pid and password resets
|
||||
"""
|
||||
|
||||
REMOTE = "remote"
|
||||
LOCAL = "local"
|
||||
OFF = "off"
|
||||
|
||||
@@ -87,3 +87,6 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3715: dir param on /relations.
|
||||
self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False)
|
||||
|
||||
# MSC3827: Filtering of /publicRooms by room type
|
||||
self.msc3827_enabled: bool = experimental.get("msc3827_enabled", False)
|
||||
|
||||
@@ -15,14 +15,9 @@
|
||||
from typing import Any
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
MISSING_AUTHLIB = """Missing authlib library. This is required for jwt login.
|
||||
|
||||
Install by running:
|
||||
pip install synapse[jwt]
|
||||
"""
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class JWTConfig(Config):
|
||||
@@ -41,13 +36,7 @@ class JWTConfig(Config):
|
||||
# that the claims exist on the JWT.
|
||||
self.jwt_issuer = jwt_config.get("issuer")
|
||||
self.jwt_audiences = jwt_config.get("audiences")
|
||||
|
||||
try:
|
||||
from authlib.jose import JsonWebToken
|
||||
|
||||
JsonWebToken # To stop unused lint.
|
||||
except ImportError:
|
||||
raise ConfigError(MISSING_AUTHLIB)
|
||||
check_requirements("jwt")
|
||||
else:
|
||||
self.jwt_enabled = False
|
||||
self.jwt_secret = None
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import Any, Optional
|
||||
import attr
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -57,12 +57,7 @@ class MetricsConfig(Config):
|
||||
|
||||
self.sentry_enabled = "sentry" in config
|
||||
if self.sentry_enabled:
|
||||
try:
|
||||
check_requirements("sentry")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("sentry")
|
||||
|
||||
self.sentry_dsn = config["sentry"].get("dsn")
|
||||
if not self.sentry_dsn:
|
||||
|
||||
@@ -24,7 +24,7 @@ from synapse.types import JsonDict
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.stringutils import parse_and_validate_mxc_uri
|
||||
|
||||
from ..util.check_dependencies import DependencyException, check_requirements
|
||||
from ..util.check_dependencies import check_requirements
|
||||
from ._base import Config, ConfigError, read_file
|
||||
|
||||
DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider"
|
||||
@@ -41,12 +41,7 @@ class OIDCConfig(Config):
|
||||
if not self.oidc_providers:
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("oidc")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
) from e
|
||||
check_requirements("oidc")
|
||||
|
||||
# check we don't have any duplicate idp_ids now. (The SSO handler will also
|
||||
# check for duplicates when the REST listeners get registered, but that happens
|
||||
@@ -146,7 +141,6 @@ OIDC_PROVIDER_CONFIG_WITH_ID_SCHEMA = {
|
||||
"allOf": [OIDC_PROVIDER_CONFIG_SCHEMA, {"required": ["idp_id", "idp_name"]}]
|
||||
}
|
||||
|
||||
|
||||
# the `oidc_providers` list can either be None (as it is in the default config), or
|
||||
# a list of provider configs, each of which requires an explicit ID and name.
|
||||
OIDC_PROVIDER_LIST_SCHEMA = {
|
||||
|
||||
@@ -136,6 +136,11 @@ class RatelimitConfig(Config):
|
||||
defaults={"per_second": 0.003, "burst_count": 5},
|
||||
)
|
||||
|
||||
self.rc_invites_per_issuer = RateLimitConfig(
|
||||
config.get("rc_invites", {}).get("per_issuer", {}),
|
||||
defaults={"per_second": 0.3, "burst_count": 10},
|
||||
)
|
||||
|
||||
self.rc_third_party_invite = RateLimitConfig(
|
||||
config.get("rc_third_party_invite", {}),
|
||||
defaults={
|
||||
|
||||
@@ -20,6 +20,13 @@ from synapse.config._base import Config, ConfigError
|
||||
from synapse.types import JsonDict, RoomAlias, UserID
|
||||
from synapse.util.stringutils import random_string_with_symbols, strtobool
|
||||
|
||||
NO_EMAIL_DELEGATE_ERROR = """\
|
||||
Delegation of email verification to an identity server is no longer supported. To
|
||||
continue to allow users to add email addresses to their accounts, and use them for
|
||||
password resets, configure Synapse with an SMTP server via the `email` setting, and
|
||||
remove `account_threepid_delegates.email`.
|
||||
"""
|
||||
|
||||
|
||||
class RegistrationConfig(Config):
|
||||
section = "registration"
|
||||
@@ -51,7 +58,9 @@ class RegistrationConfig(Config):
|
||||
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
|
||||
|
||||
account_threepid_delegates = config.get("account_threepid_delegates") or {}
|
||||
self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||
if "email" in account_threepid_delegates:
|
||||
raise ConfigError(NO_EMAIL_DELEGATE_ERROR)
|
||||
# self.account_threepid_delegate_email = account_threepid_delegates.get("email")
|
||||
self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn")
|
||||
self.default_identity_server = config.get("default_identity_server")
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
|
||||
@@ -21,7 +21,7 @@ import attr
|
||||
|
||||
from synapse.config.server import generate_ip_set
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -184,13 +184,7 @@ class ContentRepositoryConfig(Config):
|
||||
)
|
||||
self.url_preview_enabled = config.get("url_preview_enabled", False)
|
||||
if self.url_preview_enabled:
|
||||
try:
|
||||
check_requirements("url_preview")
|
||||
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("url_preview")
|
||||
|
||||
proxy_env = getproxies_environment()
|
||||
if "url_preview_ip_range_blacklist" not in config:
|
||||
|
||||
@@ -18,7 +18,7 @@ from typing import Any, List, Set
|
||||
|
||||
from synapse.config.sso import SsoAttributeRequirement
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
from synapse.util.module_loader import load_module, load_python_module
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -76,12 +76,7 @@ class SAML2Config(Config):
|
||||
if not saml2_config.get("sp_config") and not saml2_config.get("config_path"):
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("saml2")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("saml2")
|
||||
|
||||
self.saml2_enabled = True
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
from typing import Any, List, Set
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
@@ -40,12 +40,7 @@ class TracerConfig(Config):
|
||||
if not self.opentracer_enabled:
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("opentracing")
|
||||
except DependencyException as e:
|
||||
raise ConfigError(
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
check_requirements("opentracing")
|
||||
|
||||
# The tracer is enabled so sanitize the config
|
||||
|
||||
|
||||
@@ -740,6 +740,32 @@ def _check_power_levels(
|
||||
except Exception:
|
||||
raise SynapseError(400, "Not a valid power level: %s" % (v,))
|
||||
|
||||
# Reject events with stringy power levels if required by room version
|
||||
if (
|
||||
event.type == EventTypes.PowerLevels
|
||||
and room_version_obj.msc3667_int_only_power_levels
|
||||
):
|
||||
for k, v in event.content.items():
|
||||
if k in {
|
||||
"users_default",
|
||||
"events_default",
|
||||
"state_default",
|
||||
"ban",
|
||||
"redact",
|
||||
"kick",
|
||||
"invite",
|
||||
}:
|
||||
if not isinstance(v, int):
|
||||
raise SynapseError(400, f"{v!r} must be an integer.")
|
||||
if k in {"events", "notifications", "users"}:
|
||||
if not isinstance(v, dict) or not all(
|
||||
isinstance(v, int) for v in v.values()
|
||||
):
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"{v!r} must be a dict wherein all the values are integers.",
|
||||
)
|
||||
|
||||
key = (event.type, event.state_key)
|
||||
current_state = auth_events.get(key)
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ class EventBuilder:
|
||||
The signed and hashed event.
|
||||
"""
|
||||
if auth_event_ids is None:
|
||||
state_ids = await self._state.get_current_state_ids(
|
||||
state_ids = await self._state.compute_state_after_events(
|
||||
self.room_id, prev_event_ids
|
||||
)
|
||||
auth_event_ids = self._event_auth_handler.compute_auth_events(
|
||||
|
||||
@@ -21,7 +21,6 @@ from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
@@ -32,10 +31,11 @@ from typing import (
|
||||
from typing_extensions import Literal
|
||||
|
||||
import synapse
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import RoomAlias, UserProfile
|
||||
from synapse.types import JsonDict, RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -50,12 +50,12 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
str,
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple["synapse.api.errors.Codes", Dict],
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -70,7 +70,12 @@ USER_MAY_JOIN_ROOM_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -81,7 +86,12 @@ USER_MAY_INVITE_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -92,7 +102,12 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -103,7 +118,12 @@ USER_MAY_CREATE_ROOM_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -114,7 +134,12 @@ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -125,7 +150,12 @@ USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -154,7 +184,12 @@ CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[
|
||||
Awaitable[
|
||||
Union[
|
||||
Literal["NOT_SPAM"],
|
||||
"synapse.api.errors.Codes",
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, JsonDict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
@@ -345,7 +380,7 @@ class SpamChecker:
|
||||
|
||||
async def check_event_for_spam(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[Tuple["synapse.api.errors.Codes", Dict], str]:
|
||||
) -> Union[Tuple[Codes, JsonDict], str]:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -376,7 +411,16 @@ class SpamChecker:
|
||||
elif res is True:
|
||||
# This spam-checker rejects the event with deprecated
|
||||
# return value `True`
|
||||
return (synapse.api.errors.Codes.FORBIDDEN, {})
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif not isinstance(res, str):
|
||||
# mypy complains that we can't reach this code because of the
|
||||
# return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know
|
||||
@@ -422,7 +466,7 @@ class SpamChecker:
|
||||
|
||||
async def user_may_join_room(
|
||||
self, user_id: str, room_id: str, is_invited: bool
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, JsonDict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a given users is allowed to join a room.
|
||||
Not called when a user creates a room.
|
||||
|
||||
@@ -432,7 +476,7 @@ class SpamChecker:
|
||||
is_invited: Whether the user is invited into the room
|
||||
|
||||
Returns:
|
||||
NOT_SPAM if the operation is permitted, Codes otherwise.
|
||||
NOT_SPAM if the operation is permitted, [Codes, Dict] otherwise.
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
with Measure(
|
||||
@@ -443,21 +487,28 @@ class SpamChecker:
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is False:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting join as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
# No spam-checker has rejected the request, let it pass.
|
||||
return self.NOT_SPAM
|
||||
|
||||
async def user_may_invite(
|
||||
self, inviter_userid: str, invitee_userid: str, room_id: str
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a given user may send an invite
|
||||
|
||||
Args:
|
||||
@@ -479,21 +530,28 @@ class SpamChecker:
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is False:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting invite as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
# No spam-checker has rejected the request, let it pass.
|
||||
return self.NOT_SPAM
|
||||
|
||||
async def user_may_send_3pid_invite(
|
||||
self, inviter_userid: str, medium: str, address: str, room_id: str
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a given user may invite a given threepid into the room
|
||||
|
||||
Note that if the threepid is already associated with a Matrix user ID, Synapse
|
||||
@@ -519,20 +577,27 @@ class SpamChecker:
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is False:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting 3pid invite as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
return self.NOT_SPAM
|
||||
|
||||
async def user_may_create_room(
|
||||
self, userid: str
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a given user may create a room
|
||||
|
||||
Args:
|
||||
@@ -546,20 +611,27 @@ class SpamChecker:
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is False:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting room creation as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
return self.NOT_SPAM
|
||||
|
||||
async def user_may_create_room_alias(
|
||||
self, userid: str, room_alias: RoomAlias
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a given user may create a room alias
|
||||
|
||||
Args:
|
||||
@@ -575,20 +647,27 @@ class SpamChecker:
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is False:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting room create as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
return self.NOT_SPAM
|
||||
|
||||
async def user_may_publish_room(
|
||||
self, userid: str, room_id: str
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a given user may publish a room to the directory
|
||||
|
||||
Args:
|
||||
@@ -603,14 +682,21 @@ class SpamChecker:
|
||||
if res is True or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is False:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting room publication as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
return self.NOT_SPAM
|
||||
|
||||
@@ -678,7 +764,7 @@ class SpamChecker:
|
||||
|
||||
async def check_media_file_for_spam(
|
||||
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
|
||||
) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]:
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
"""Checks if a piece of newly uploaded media should be blocked.
|
||||
|
||||
This will be called for local uploads, downloads of remote media, each
|
||||
@@ -715,13 +801,20 @@ class SpamChecker:
|
||||
if res is False or res is self.NOT_SPAM:
|
||||
continue
|
||||
elif res is True:
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
elif isinstance(res, synapse.api.errors.Codes):
|
||||
return res, {}
|
||||
elif (
|
||||
isinstance(res, tuple)
|
||||
and len(res) == 2
|
||||
and isinstance(res[0], synapse.api.errors.Codes)
|
||||
and isinstance(res[1], dict)
|
||||
):
|
||||
return res
|
||||
else:
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting media file as spam"
|
||||
)
|
||||
return synapse.api.errors.Codes.FORBIDDEN
|
||||
return synapse.api.errors.Codes.FORBIDDEN, {}
|
||||
|
||||
return self.NOT_SPAM
|
||||
|
||||
@@ -464,14 +464,7 @@ class ThirdPartyEventRules:
|
||||
Returns:
|
||||
A dict mapping (event type, state key) to state event.
|
||||
"""
|
||||
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
|
||||
room_state_events = await self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
for key, event_id in state_ids.items():
|
||||
state_events[key] = room_state_events[event_id]
|
||||
|
||||
return state_events
|
||||
return await self._storage_controllers.state.get_current_state(room_id)
|
||||
|
||||
async def on_profile_update(
|
||||
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user