Compare commits
81 Commits
dmr/return
...
azren/comp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a037c2ed43 | ||
|
|
fd1d3e1fb3 | ||
|
|
1a50b18994 | ||
|
|
584c670802 | ||
|
|
61c5650058 | ||
|
|
db6cc8f35b | ||
|
|
8aaa4b7b5d | ||
|
|
2622b28c5c | ||
|
|
eb2c7e51c4 | ||
|
|
d6b511e669 | ||
|
|
c3ccad7785 | ||
|
|
a8bbf08576 | ||
|
|
707d5e4e48 | ||
|
|
596e13ce74 | ||
|
|
efbc338043 | ||
|
|
d37841787a | ||
|
|
71aace8a0d | ||
|
|
a5819f7da9 | ||
|
|
7d49d86b60 | ||
|
|
f7768f62cb | ||
|
|
6c83c27107 | ||
|
|
d138187045 | ||
|
|
b10257e879 | ||
|
|
ea01d4c2de | ||
|
|
f1c149cb18 | ||
|
|
3e5dda1a47 | ||
|
|
0420d4e6a5 | ||
|
|
bb7fdd821b | ||
|
|
85551b7a85 | ||
|
|
261c9763c4 | ||
|
|
50022cff96 | ||
|
|
fa74536384 | ||
|
|
7f3352743e | ||
|
|
e704cc2a48 | ||
|
|
90d9fc7505 | ||
|
|
a7304adc7d | ||
|
|
47854c71e9 | ||
|
|
a10988983a | ||
|
|
dcfd864970 | ||
|
|
e584534403 | ||
|
|
aa2c027792 | ||
|
|
26f2bfedbf | ||
|
|
f78b68a96b | ||
|
|
03db6701d5 | ||
|
|
8f2a52766b | ||
|
|
6fc8be9a1b | ||
|
|
9391de3f37 | ||
|
|
52913d56a5 | ||
|
|
724aef9a87 | ||
|
|
80828eda06 | ||
|
|
4ecf51812e | ||
|
|
a2d7195e01 | ||
|
|
51e2db3598 | ||
|
|
4054dfa409 | ||
|
|
b25a494779 | ||
|
|
ebd8baf61f | ||
|
|
ba7a91aea5 | ||
|
|
2843058a8b | ||
|
|
5fca3c8ae6 | ||
|
|
ee557b5375 | ||
|
|
706b0e41a1 | ||
|
|
8c0fe97edf | ||
|
|
60453315bd | ||
|
|
9ffa787eb2 | ||
|
|
9b5782d51d | ||
|
|
c17e698e1b | ||
|
|
6c92ba3eac | ||
|
|
c4ef61136f | ||
|
|
6a751ff5e0 | ||
|
|
f455b0e420 | ||
|
|
da1f804aa0 | ||
|
|
ffb96458d3 | ||
|
|
2e3d7f5e15 | ||
|
|
ede5974f3d | ||
|
|
b88026654f | ||
|
|
f84cb2c79d | ||
|
|
5e32e2b12a | ||
|
|
1b76638c2a | ||
|
|
f122710716 | ||
|
|
c0915ee998 | ||
|
|
b3590614da |
1
.github/workflows/docs.yaml
vendored
1
.github/workflows/docs.yaml
vendored
@@ -61,6 +61,5 @@ jobs:
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
keep_files: true
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
@@ -192,6 +192,7 @@ jobs:
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
env:
|
||||
SYTEST_BRANCH: ${{ github.head_ref }}
|
||||
POSTGRES: ${{ matrix.postgres && 1}}
|
||||
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
|
||||
WORKERS: ${{ matrix.workers && 1 }}
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -40,6 +40,7 @@ __pycache__/
|
||||
/.coverage*
|
||||
/.mypy_cache/
|
||||
/.tox
|
||||
/.tox-pg-container
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
|
||||
11
CHANGES.md
11
CHANGES.md
@@ -1,3 +1,12 @@
|
||||
Synapse 1.43.0 (2021-09-21)
|
||||
===========================
|
||||
|
||||
This release drops support for the deprecated, unstable API for [MSC2858 (Multiple SSO Identity Providers)](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), as well as the undocumented `experimental.msc2858_enabled` config option. Client authors should update their clients to use the stable API, available since Synapse 1.30.
|
||||
|
||||
The documentation has been updated with configuration for routing `/spaces`, `/hierarchy` and `/summary` to workers. See [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.43/docs/upgrade.md#upgrading-to-v1430) for more details.
|
||||
|
||||
No significant changes since 1.43.0rc2.
|
||||
|
||||
Synapse 1.43.0rc2 (2021-09-17)
|
||||
==============================
|
||||
|
||||
@@ -10,8 +19,6 @@ Bugfixes
|
||||
Synapse 1.43.0rc1 (2021-09-14)
|
||||
==============================
|
||||
|
||||
This release drops support for the deprecated, unstable API for [MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), as well as the undocumented `experimental.msc2858_enabled` config option. Client authors should update their clients to use the stable API, available since Synapse 1.30.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
|
||||
1
changelog.d/10659.misc
Normal file
1
changelog.d/10659.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix GitHub Actions config so we can run sytest on synapse from parallel branches.
|
||||
1
changelog.d/10690.bugfix
Normal file
1
changelog.d/10690.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug that caused an `AssertionError` when purging history in certain rooms. Contributed by @Kokokokoka.
|
||||
1
changelog.d/10782.bugfix
Normal file
1
changelog.d/10782.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug which caused deactivated users that were later reactivated to be missing from the user directory.
|
||||
1
changelog.d/10796.misc
Normal file
1
changelog.d/10796.misc
Normal file
@@ -0,0 +1 @@
|
||||
Simplify the internal logic which maintains the user directory database tables.
|
||||
1
changelog.d/10807.bugfix
Normal file
1
changelog.d/10807.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Allow sending a membership event to unban a user. Contributed by @aaronraimist.
|
||||
1
changelog.d/10814.feature
Normal file
1
changelog.d/10814.feature
Normal file
@@ -0,0 +1 @@
|
||||
Improve oEmbed previews by processing the author name, photo, and video information.
|
||||
1
changelog.d/10819.feature
Normal file
1
changelog.d/10819.feature
Normal file
@@ -0,0 +1 @@
|
||||
Improve oEmbed previews by processing the author name, photo, and video information.
|
||||
1
changelog.d/10820.misc
Normal file
1
changelog.d/10820.misc
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug where an `m.room.message` event containing a null byte would cause an internal server error.
|
||||
2
changelog.d/10826.misc
Normal file
2
changelog.d/10826.misc
Normal file
@@ -0,0 +1,2 @@
|
||||
Opt out of cache expiry for `get_users_who_share_room_with_user`, to hopefully improve `/sync` performance when you
|
||||
haven't synced recently.
|
||||
1
changelog.d/10827.bugfix
Normal file
1
changelog.d/10827.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix error in deprecated `/initialSync` endpoint when using the undocumented `from` and `to` parameters.
|
||||
1
changelog.d/10829.misc
Normal file
1
changelog.d/10829.misc
Normal file
@@ -0,0 +1 @@
|
||||
Track cache eviction rates more finely in Prometheus' monitoring.
|
||||
1
changelog.d/10831.misc
Normal file
1
changelog.d/10831.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing type hints to handlers.
|
||||
1
changelog.d/10833.misc
Normal file
1
changelog.d/10833.misc
Normal file
@@ -0,0 +1 @@
|
||||
Extend the ModuleApi to let plug-ins check whether an ID is local and to access IP + User Agent data.
|
||||
1
changelog.d/10835.misc
Normal file
1
changelog.d/10835.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add a test to ensure state events sent by modules get persisted correctly.
|
||||
1
changelog.d/10838.misc
Normal file
1
changelog.d/10838.misc
Normal file
@@ -0,0 +1 @@
|
||||
Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) fields and event types from `chunk` to `batch` to match the `/batch_send` endpoint.
|
||||
1
changelog.d/10839.misc
Normal file
1
changelog.d/10839.misc
Normal file
@@ -0,0 +1 @@
|
||||
Rename [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` query parameter from `?prev_event` to more obvious usage with `?prev_event_id`.
|
||||
1
changelog.d/10856.misc
Normal file
1
changelog.d/10856.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add missing type hints to handlers.
|
||||
1
changelog.d/10859.bugfix
Normal file
1
changelog.d/10859.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug in Unicode support of the room search admin API. It is now possible to search for rooms with non-ASCII characters.
|
||||
1
changelog.d/10865.doc
Normal file
1
changelog.d/10865.doc
Normal file
@@ -0,0 +1 @@
|
||||
Add developer documentation about experimental configuration flags.
|
||||
1
changelog.d/10867.misc
Normal file
1
changelog.d/10867.misc
Normal file
@@ -0,0 +1 @@
|
||||
Add type hints to `synapse.http.site`.
|
||||
1
changelog.d/10868.feature
Normal file
1
changelog.d/10868.feature
Normal file
@@ -0,0 +1 @@
|
||||
Speed up responding with large JSON objects to requests.
|
||||
1
changelog.d/10869.doc
Normal file
1
changelog.d/10869.doc
Normal file
@@ -0,0 +1 @@
|
||||
Properly remove deleted files from GitHub pages when generating the documentation.
|
||||
1
changelog.d/10873.bugfix
Normal file
1
changelog.d/10873.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse 1.37.0 which caused `knock` events which we sent to remote servers to be incorrectly stored in the local database.
|
||||
1
changelog.d/10875.bugfix
Normal file
1
changelog.d/10875.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix invalidating one-time key count cache after claiming keys. Contributed by Tulir at Beeper.
|
||||
1
changelog.d/10879.misc
Normal file
1
changelog.d/10879.misc
Normal file
@@ -0,0 +1 @@
|
||||
Include outlier status when we log V2 or V3 events.
|
||||
1
changelog.d/10880.misc
Normal file
1
changelog.d/10880.misc
Normal file
@@ -0,0 +1 @@
|
||||
Break down Grafana's cache expiry time series based on reason for eviction---see #10829.
|
||||
1
changelog.d/10881.bugfix
Normal file
1
changelog.d/10881.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix application service users being subject to MAU blocking if MAU had been reached, even if configured not to be blocked.
|
||||
1
changelog.d/10883.misc
Normal file
1
changelog.d/10883.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
1
changelog.d/10884.misc
Normal file
1
changelog.d/10884.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
1
changelog.d/10885.misc
Normal file
1
changelog.d/10885.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use direct references to config flags.
|
||||
1
changelog.d/10887.bugfix
Normal file
1
changelog.d/10887.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Allow the `.` and `~` characters when creating registration tokens as per the change to [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231).
|
||||
1
changelog.d/10889.misc
Normal file
1
changelog.d/10889.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up some unnecessary parentheses in places around the codebase.
|
||||
1
changelog.d/10891.misc
Normal file
1
changelog.d/10891.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve type hinting in the user directory code.
|
||||
1
changelog.d/10893.misc
Normal file
1
changelog.d/10893.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use direct references to config flags.
|
||||
1
changelog.d/10896.misc
Normal file
1
changelog.d/10896.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
1
changelog.d/10897.misc
Normal file
1
changelog.d/10897.misc
Normal file
@@ -0,0 +1 @@
|
||||
Use direct references to config flags.
|
||||
1
changelog.d/10898.feature
Normal file
1
changelog.d/10898.feature
Normal file
@@ -0,0 +1 @@
|
||||
Add a `user_may_create_room_with_invites` spam checker callback to allow modules to allow or deny a room creation request based on the invites and/or 3PID invites it includes.
|
||||
1
changelog.d/10901.misc
Normal file
1
changelog.d/10901.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
1
changelog.d/10903.misc
Normal file
1
changelog.d/10903.misc
Normal file
@@ -0,0 +1 @@
|
||||
Drop old functionality which maintained database compatibility with Synapse versions before 1.31.
|
||||
1
changelog.d/10905.feature
Normal file
1
changelog.d/10905.feature
Normal file
@@ -0,0 +1 @@
|
||||
Speed up responding with large JSON objects to requests.
|
||||
1
changelog.d/10906.misc
Normal file
1
changelog.d/10906.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update development testing script `test_postgresql.sh` to use a supported Python version and make re-runs quicker.
|
||||
1
changelog.d/10907.bugfix
Normal file
1
changelog.d/10907.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug which could cause events pulled over federation to be incorrectly rejected.
|
||||
1
changelog.d/10911.bugfix
Normal file
1
changelog.d/10911.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Avoid storing URL cache files in storage providers. Server admins may safely delete the `url_cache/` and `url_cache_thumbnails/` directories from any configured storage providers to reclaim space.
|
||||
1
changelog.d/10913.bugfix
Normal file
1
changelog.d/10913.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix race conditions when creating media store and config directories.
|
||||
1
changelog.d/10915.misc
Normal file
1
changelog.d/10915.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean-up configuration helper classes for the `ServerConfig` class.
|
||||
1
changelog.d/10917.misc
Normal file
1
changelog.d/10917.misc
Normal file
@@ -0,0 +1 @@
|
||||
Document and summarize changes in schema version `61` - `64`.
|
||||
1
changelog.d/10925.misc
Normal file
1
changelog.d/10925.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update release script to sign the newly created git tags.
|
||||
1
changelog.d/10926.misc
Normal file
1
changelog.d/10926.misc
Normal file
@@ -0,0 +1 @@
|
||||
Clean up some of the federation event authentication code for clarity.
|
||||
1
changelog.d/10927.bugfix
Normal file
1
changelog.d/10927.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a bug introduced in Synapse v1.40.0 where the signature checks for room version 8/9 could be applied to earlier room versions in some situations.
|
||||
1
changelog.d/10931.bugfix
Normal file
1
changelog.d/10931.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix debian builds due to dh-virtualenv no longer being able to build their docs.
|
||||
@@ -6785,7 +6785,7 @@
|
||||
"expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{name}} {{job}}-{{index}}",
|
||||
"legendFormat": "{{name}} ({{reason}}) {{job}}-{{index}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
@@ -10888,5 +10888,5 @@
|
||||
"timezone": "",
|
||||
"title": "Synapse",
|
||||
"uid": "000000012",
|
||||
"version": 99
|
||||
"version": 100
|
||||
}
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.43.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.43.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 21 Sep 2021 11:49:05 +0100
|
||||
|
||||
matrix-synapse-py3 (1.43.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.43.0~rc2.
|
||||
|
||||
@@ -47,8 +47,9 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& cd /dh-virtualenv \
|
||||
&& env DEBIAN_FRONTEND=noninteractive mk-build-deps -ri -t "apt-get -y --no-install-recommends"
|
||||
|
||||
# build it
|
||||
RUN cd /dh-virtualenv && dpkg-buildpackage -us -uc -b
|
||||
# Build it. Note that building the docs doesn't work due to differences in
|
||||
# Sphinx APIs across versions/distros.
|
||||
RUN cd /dh-virtualenv && DEB_BUILD_OPTIONS=nodoc dpkg-buildpackage -us -uc -b
|
||||
|
||||
###
|
||||
### Stage 1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Use the Sytest image that comes with a lot of the build dependencies
|
||||
# pre-installed
|
||||
FROM matrixdotorg/sytest:latest
|
||||
FROM matrixdotorg/sytest:bionic
|
||||
|
||||
# The Sytest image doesn't come with python, so install that
|
||||
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||
@@ -8,5 +8,23 @@ RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||
# We need tox to run the tests in run_pg_tests.sh
|
||||
RUN python3 -m pip install tox
|
||||
|
||||
ADD run_pg_tests.sh /pg_tests.sh
|
||||
ENTRYPOINT /pg_tests.sh
|
||||
# Initialise the db
|
||||
RUN su -c '/usr/lib/postgresql/10/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="C.UTF-8" --lc-ctype="C.UTF-8" --username=postgres' postgres
|
||||
|
||||
# Add a user with our UID and GID so that files get created on the host owned
|
||||
# by us, not root.
|
||||
ARG UID
|
||||
ARG GID
|
||||
RUN groupadd --gid $GID user
|
||||
RUN useradd --uid $UID --gid $GID --groups sudo --no-create-home user
|
||||
|
||||
# Ensure we can start postgres by sudo-ing as the postgres user.
|
||||
RUN apt-get update && apt-get -qq install -y sudo
|
||||
RUN echo "user ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
|
||||
|
||||
ADD run_pg_tests.sh /run_pg_tests.sh
|
||||
# Use the "exec form" of ENTRYPOINT (https://docs.docker.com/engine/reference/builder/#entrypoint)
|
||||
# so that we can `docker run` this container and pass arguments to pg_tests.sh
|
||||
ENTRYPOINT ["/run_pg_tests.sh"]
|
||||
|
||||
USER user
|
||||
|
||||
@@ -10,11 +10,10 @@ set -e
|
||||
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
||||
export PGUSER=postgres
|
||||
|
||||
# Initialise & start the database
|
||||
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
|
||||
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||
# Start the database
|
||||
sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/data start
|
||||
|
||||
# Run the tests
|
||||
cd /src
|
||||
export TRIAL_FLAGS="-j 4"
|
||||
tox --workdir=/tmp -e py35-postgres
|
||||
tox --workdir=./.tox-pg-container -e py36-postgres "$@"
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
- [Systemd](systemd-with-workers/README.md)
|
||||
- [State Compressor](state_compressor.md)
|
||||
- [Administration](usage/administration/README.md)
|
||||
- [Admin API](usage/administration/admin_api/README.md)
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
@@ -74,6 +75,7 @@
|
||||
- [Testing]()
|
||||
- [OpenTracing](opentracing.md)
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
- [Synapse Architecture]()
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
|
||||
@@ -170,6 +170,53 @@ To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`:
|
||||
SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests
|
||||
```
|
||||
|
||||
### Running tests under PostgreSQL
|
||||
|
||||
Invoking `trial` as above will use an in-memory SQLite database. This is great for
|
||||
quick development and testing. However, we recommend using a PostgreSQL database
|
||||
in production (and indeed, we have some code paths specific to each database).
|
||||
This means that we need to run our unit tests against PostgreSQL too. Our CI does
|
||||
this automatically for pull requests and release candidates, but it's sometimes
|
||||
useful to reproduce this locally.
|
||||
|
||||
To do so, [configure Postgres](../postgres.md) and run `trial` with the
|
||||
following environment variables matching your configuration:
|
||||
|
||||
- `SYNAPSE_POSTGRES` to anything nonempty
|
||||
- `SYNAPSE_POSTGRES_HOST`
|
||||
- `SYNAPSE_POSTGRES_USER`
|
||||
- `SYNAPSE_POSTGRES_PASSWORD`
|
||||
|
||||
For example:
|
||||
|
||||
```shell
|
||||
export SYNAPSE_POSTGRES=1
|
||||
export SYNAPSE_POSTGRES_HOST=localhost
|
||||
export SYNAPSE_POSTGRES_USER=postgres
|
||||
export SYNAPSE_POSTGRES_PASSWORD=mydevenvpassword
|
||||
trial
|
||||
```
|
||||
|
||||
#### Prebuilt container
|
||||
|
||||
Since configuring PostgreSQL can be fiddly, we can make use of a pre-made
|
||||
Docker container to set up PostgreSQL and run our tests for us. To do so, run
|
||||
|
||||
```shell
|
||||
scripts-dev/test_postgresql.sh
|
||||
```
|
||||
|
||||
Any extra arguments to the script will be passed to `tox` and then to `trial`,
|
||||
so we can run a specific test in this container with e.g.
|
||||
|
||||
```shell
|
||||
scripts-dev/test_postgresql.sh tests.replication.test_sharded_event_persister.EventPersisterShardTestCase
|
||||
```
|
||||
|
||||
The container creates a folder in your Synapse checkout called
|
||||
`.tox-pg-container` and uses this as a tox environment. The output of any
|
||||
`trial` runs goes into `_trial_temp` in your synapse source directory — the same
|
||||
as running `trial` directly on your host machine.
|
||||
|
||||
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||
|
||||
|
||||
37
docs/development/experimental_features.md
Normal file
37
docs/development/experimental_features.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Implementing experimental features in Synapse
|
||||
|
||||
It can be desirable to implement "experimental" features which are disabled by
|
||||
default and must be explicitly enabled via the Synapse configuration. This is
|
||||
applicable for features which:
|
||||
|
||||
* Are unstable in the Matrix spec (e.g. those defined by an MSC that has not yet been merged).
|
||||
* Developers are not confident in their use by general Synapse administrators/users
|
||||
(e.g. a feature is incomplete, buggy, performs poorly, or needs further testing).
|
||||
|
||||
Note that this only really applies to features which are expected to be desirable
|
||||
to a broad audience. The [module infrastructure](../modules/index.md) should
|
||||
instead be investigated for non-standard features.
|
||||
|
||||
Guarding experimental features behind configuration flags should help with some
|
||||
of the following scenarios:
|
||||
|
||||
* Ensure that clients do not assume that unstable features exist (failing
|
||||
gracefully if they do not).
|
||||
* Unstable features do not become de-facto standards and can be removed
|
||||
aggressively (since only those who have opted-in will be affected).
|
||||
* Ease finding the implementation of unstable features in Synapse (for future
|
||||
removal or stabilization).
|
||||
* Ease testing a feature (or removal of feature) due to enabling/disabling without
|
||||
code changes. It also becomes possible to ask for wider testing, if desired.
|
||||
|
||||
Experimental configuration flags should be disabled by default (requiring Synapse
|
||||
administrators to explicitly opt-in), although there are situations where it makes
|
||||
sense (from a product point-of-view) to enable features by default. This is
|
||||
expected and not an issue.
|
||||
|
||||
It is not a requirement for experimental features to be behind a configuration flag,
|
||||
but one should be used if unsure.
|
||||
|
||||
New experimental configuration flags should be added under the `experimental`
|
||||
configuration key (see the `synapse.config.experimental` file) and either explain
|
||||
(briefly) what is being enabled, or include the MSC number.
|
||||
@@ -25,16 +25,14 @@ When Synapse is asked to preview a URL it does the following:
|
||||
3. Kicks off a background process to generate a preview:
|
||||
1. Checks the database cache by URL and timestamp and returns the result if it
|
||||
has not expired and was successful (a 2xx return code).
|
||||
2. Checks if the URL matches an oEmbed pattern. If it does, fetch the oEmbed
|
||||
response. If this is an image, replace the URL to fetch and continue. If
|
||||
if it is HTML content, use the HTML as the document and continue.
|
||||
3. If it doesn't match an oEmbed pattern, downloads the URL and stores it
|
||||
into a file via the media storage provider and saves the local media
|
||||
metadata.
|
||||
5. If the media is an image:
|
||||
2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it
|
||||
does, update the URL to download.
|
||||
3. Downloads the URL and stores it into a file via the media storage provider
|
||||
and saves the local media metadata.
|
||||
4. If the media is an image:
|
||||
1. Generates thumbnails.
|
||||
2. Generates an Open Graph response based on image properties.
|
||||
6. If the media is HTML:
|
||||
5. If the media is HTML:
|
||||
1. Decodes the HTML via the stored file.
|
||||
2. Generates an Open Graph response from the HTML.
|
||||
3. If an image exists in the Open Graph response:
|
||||
@@ -42,6 +40,13 @@ When Synapse is asked to preview a URL it does the following:
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
6. If the media is JSON and an oEmbed URL was found:
|
||||
1. Convert the oEmbed response to an Open Graph response.
|
||||
2. If a thumbnail or image is in the oEmbed response:
|
||||
1. Downloads the URL and stores it into a file via the media storage
|
||||
provider and saves the local media metadata.
|
||||
2. Generates thumbnails.
|
||||
3. Updates the Open Graph response based on image properties.
|
||||
7. Stores the result in the database cache.
|
||||
4. Returns the result.
|
||||
|
||||
|
||||
@@ -38,6 +38,35 @@ async def user_may_create_room(user: str) -> bool
|
||||
Called when processing a room creation request. The module must return a `bool` indicating
|
||||
whether the given user (represented by their Matrix user ID) is allowed to create a room.
|
||||
|
||||
### `user_may_create_room_with_invites`
|
||||
|
||||
```python
|
||||
async def user_may_create_room_with_invites(
|
||||
user: str,
|
||||
invites: List[str],
|
||||
threepid_invites: List[Dict[str, str]],
|
||||
) -> bool
|
||||
```
|
||||
|
||||
Called when processing a room creation request (right after `user_may_create_room`).
|
||||
The module is given the Matrix user ID of the user trying to create a room, as well as a
|
||||
list of Matrix users to invite and a list of third-party identifiers (3PID, e.g. email
|
||||
addresses) to invite.
|
||||
|
||||
An invited Matrix user to invite is represented by their Matrix user IDs, and an invited
|
||||
3PIDs is represented by a dict that includes the 3PID medium (e.g. "email") through its
|
||||
`medium` key and its address (e.g. "alice@example.com") through its `address` key.
|
||||
|
||||
See [the Matrix specification](https://matrix.org/docs/spec/appendices#pid-types) for more
|
||||
information regarding third-party identifiers.
|
||||
|
||||
If no invite and/or 3PID invite were specified in the room creation request, the
|
||||
corresponding list(s) will be empty.
|
||||
|
||||
**Note**: This callback is not called when a room is cloned (e.g. during a room upgrade)
|
||||
since no invites are sent when cloning a room. To cover this case, modules also need to
|
||||
implement `user_may_create_room`.
|
||||
|
||||
### `user_may_create_room_alias`
|
||||
|
||||
```python
|
||||
|
||||
@@ -2362,12 +2362,16 @@ user_directory:
|
||||
#enabled: false
|
||||
|
||||
# Defines whether to search all users visible to your HS when searching
|
||||
# the user directory, rather than limiting to users visible in public
|
||||
# rooms. Defaults to false.
|
||||
# the user directory. If false, search results will only contain users
|
||||
# visible in public rooms and users sharing a room with the requester.
|
||||
# Defaults to false.
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||
# NB. If you set this to true, and the last time the user_directory search
|
||||
# indexes were (re)built was before Synapse 1.44, you'll have to
|
||||
# rebuild the indexes in order to search through all known users.
|
||||
# These indexes are built the first time Synapse starts; admins can
|
||||
# manually trigger a rebuild following the instructions at
|
||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
@@ -2644,3 +2648,38 @@ redis:
|
||||
# Optional password if configured on the Redis instance
|
||||
#
|
||||
#password: <secret_password>
|
||||
|
||||
|
||||
## State compressor ##
|
||||
|
||||
# The state compressor is an experimental tool which attempts to
|
||||
# reduce the number of rows in the state_groups_state table
|
||||
# of postgres databases.
|
||||
#
|
||||
# For more information please see
|
||||
# https://matrix-org.github.io/synapse/latest/state_compressor.html
|
||||
#
|
||||
state_compressor:
|
||||
# Whether the state compressor should run (defaults to false)
|
||||
# Uncomment to enable it - Note, this requires the 'auto-compressor'
|
||||
# library to be installed
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The (rough) number of state groups to load at one time. Defaults
|
||||
# to 500.
|
||||
#
|
||||
#chunk_size: 1000
|
||||
|
||||
# The number of chunks to compress on each run. Defaults to 100.
|
||||
#
|
||||
#number_of_chunks: 1
|
||||
|
||||
# The default level sizes for the compressor to use. Defaults to
|
||||
# 100,50,25.
|
||||
#
|
||||
#default_levels: 128,64,32.
|
||||
|
||||
# How frequently to run the state compressor. Defaults to 1d
|
||||
#
|
||||
#time_between_runs: 1w
|
||||
|
||||
47
docs/state_compressor.md
Normal file
47
docs/state_compressor.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# State compressor
|
||||
|
||||
The state compressor is an **experimental** tool that attempts to reduce the number of rows
|
||||
in the `state_groups_state` table inside of a postgres database. Documentation on how it works
|
||||
can be found on [its github repository](https://github.com/matrix-org/rust-synapse-compress-state).
|
||||
|
||||
## Enabling the state compressor
|
||||
|
||||
The state compressor requires the python library for the `synapse_auto_compressor` tool to be
|
||||
installed. This can be done with pip or by following the instructions for this can be found in [the `python.md` file in the source
|
||||
repo](https://github.com/matrix-org/rust-synapse-compress-state/blob/main/docs/python.md).
|
||||
|
||||
The following configuration options are provided:
|
||||
|
||||
- `chunk_size`
|
||||
The number of state groups to work on at once. All of the entries from
|
||||
`state_groups_state` are requested from the database for state groups that are
|
||||
worked on. Therefore small chunk sizes may be needed on machines with low memory.
|
||||
Note: if the compressor fails to find space savings on the chunk as a whole
|
||||
(which may well happen in rooms with lots of backfill in) then the entire chunk
|
||||
is skipped. This defaults to 500
|
||||
|
||||
- `number_of_chunks`
|
||||
The compressor will stop once it has finished compressing this many chunks. Defaults to 100
|
||||
|
||||
- `default_levels`
|
||||
Sizes of each new level in the compression algorithm, as a comma separated list.
|
||||
The first entry in the list is for the lowest, most granular level, with each
|
||||
subsequent entry being for the next highest level. The number of entries in the
|
||||
list determines the number of levels that will be used. The sum of the sizes of
|
||||
the levels effect the performance of fetching the state from the database, as the
|
||||
sum of the sizes is the upper bound on number of iterations needed to fetch a
|
||||
given set of state. This defaults to "100,50,25"
|
||||
|
||||
- `time_between_runs`
|
||||
This controls how often the state compressor is run. This defaults to once every
|
||||
day.
|
||||
|
||||
An example configuration:
|
||||
```yaml
|
||||
state_compressor:
|
||||
enabled: true
|
||||
chunk_size: 500
|
||||
number_of_chunks: 50
|
||||
default_levels: 100,50,25
|
||||
time_between_runs: 1d
|
||||
```
|
||||
@@ -85,6 +85,13 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.44.0
|
||||
|
||||
## The URL preview cache is no longer mirrored to storage providers
|
||||
The `url_cache/` and `url_cache_thumbnails/` directories in the media store are
|
||||
no longer mirrored to storage providers. These two directories can be safely
|
||||
deleted from any configured storage providers to reclaim space.
|
||||
|
||||
# Upgrading to v1.43.0
|
||||
|
||||
## The spaces summary APIs can now be handled by workers
|
||||
|
||||
9
mypy.ini
9
mypy.ini
@@ -85,12 +85,17 @@ files =
|
||||
tests/handlers/test_room_summary.py,
|
||||
tests/handlers/test_send_email.py,
|
||||
tests/handlers/test_sync.py,
|
||||
tests/handlers/test_user_directory.py,
|
||||
tests/rest/client/test_login.py,
|
||||
tests/rest/client/test_auth.py,
|
||||
tests/storage/test_state.py,
|
||||
tests/storage/test_user_directory.py,
|
||||
tests/util/test_itertools.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
[mypy-synapse.handlers.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
@@ -252,3 +257,7 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
|
||||
[mypy-psycopg2.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -276,7 +276,7 @@ def tag(gh_token: Optional[str]):
|
||||
if click.confirm("Edit text?", default=False):
|
||||
changes = click.edit(changes, require_save=False)
|
||||
|
||||
repo.create_tag(tag_name, message=changes)
|
||||
repo.create_tag(tag_name, message=changes, sign=True)
|
||||
|
||||
if not click.confirm("Push tag to GitHub?", default=True):
|
||||
print("")
|
||||
|
||||
19
scripts-dev/test_postgresql.sh
Executable file
19
scripts-dev/test_postgresql.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script builds the Docker image to run the PostgreSQL tests, and then runs
|
||||
# the tests. It uses a dedicated tox environment so that we don't have to
|
||||
# rebuild it each time.
|
||||
|
||||
# Command line arguments to this script are forwarded to "tox" and then to "trial".
|
||||
|
||||
set -e
|
||||
|
||||
# Build, and tag
|
||||
docker build docker/ \
|
||||
--build-arg "UID=$(id -u)" \
|
||||
--build-arg "GID=$(id -g)" \
|
||||
-f docker/Dockerfile-pgtests \
|
||||
-t synapsepgtests
|
||||
|
||||
# Run, mounting the current directory into /src
|
||||
docker run --rm -it -v "$(pwd):/src" -v synapse-pg-test-tox:/tox synapsepgtests "$@"
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.43.0rc2"
|
||||
__version__ = "1.43.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -81,7 +81,7 @@ class AuthBlocking:
|
||||
# We never block the server from doing actions on behalf of
|
||||
# users.
|
||||
return
|
||||
elif requester.app_service and not self._track_appservice_user_ips:
|
||||
if requester.app_service and not self._track_appservice_user_ips:
|
||||
# If we're authenticated as an appservice then we only block
|
||||
# auth if `track_appservice_user_ips` is set, as that option
|
||||
# implicitly means that application services are part of MAU
|
||||
|
||||
@@ -121,7 +121,7 @@ class EventTypes:
|
||||
SpaceParent = "m.space.parent"
|
||||
|
||||
MSC2716_INSERTION = "org.matrix.msc2716.insertion"
|
||||
MSC2716_CHUNK = "org.matrix.msc2716.chunk"
|
||||
MSC2716_BATCH = "org.matrix.msc2716.batch"
|
||||
MSC2716_MARKER = "org.matrix.msc2716.marker"
|
||||
|
||||
|
||||
@@ -209,11 +209,11 @@ class EventContentFields:
|
||||
|
||||
# Used on normal messages to indicate they were historically imported after the fact
|
||||
MSC2716_HISTORICAL = "org.matrix.msc2716.historical"
|
||||
# For "insertion" events to indicate what the next chunk ID should be in
|
||||
# For "insertion" events to indicate what the next batch ID should be in
|
||||
# order to connect to it
|
||||
MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id"
|
||||
# Used on "chunk" events to indicate which insertion event it connects to
|
||||
MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id"
|
||||
MSC2716_NEXT_BATCH_ID = "org.matrix.msc2716.next_batch_id"
|
||||
# Used on "batch" events to indicate which insertion event it connects to
|
||||
MSC2716_BATCH_ID = "org.matrix.msc2716.batch_id"
|
||||
# For "marker" events
|
||||
MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion"
|
||||
|
||||
|
||||
@@ -244,24 +244,8 @@ class RoomVersions:
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
MSC2716 = RoomVersion(
|
||||
"org.matrix.msc2716",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=False,
|
||||
msc3375_redaction_rules=False,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=False,
|
||||
)
|
||||
MSC2716v2 = RoomVersion(
|
||||
"org.matrix.msc2716v2",
|
||||
MSC2716v3 = RoomVersion(
|
||||
"org.matrix.msc2716v3",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
@@ -289,9 +273,9 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.V7,
|
||||
RoomVersions.MSC2716,
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC2716v3,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -39,12 +39,12 @@ class ConsentURIBuilder:
|
||||
Args:
|
||||
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||
"""
|
||||
if hs_config.form_secret is None:
|
||||
if hs_config.key.form_secret is None:
|
||||
raise ConfigError("form_secret not set in config")
|
||||
if hs_config.server.public_baseurl is None:
|
||||
raise ConfigError("public_baseurl not set in config")
|
||||
|
||||
self._hmac_secret = hs_config.form_secret.encode("utf-8")
|
||||
self._hmac_secret = hs_config.key.form_secret.encode("utf-8")
|
||||
self._public_baseurl = hs_config.server.public_baseurl
|
||||
|
||||
def build_user_consent_uri(self, user_id):
|
||||
|
||||
@@ -48,6 +48,7 @@ from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.state_compressor import setup_state_compressor
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -88,8 +89,8 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
|
||||
appname,
|
||||
soft_file_limit=config.soft_file_limit,
|
||||
gc_thresholds=config.gc_thresholds,
|
||||
pid_file=config.worker_pid_file,
|
||||
daemonize=config.worker_daemonize,
|
||||
pid_file=config.worker.worker_pid_file,
|
||||
daemonize=config.worker.worker_daemonize,
|
||||
print_pidfile=config.print_pidfile,
|
||||
logger=logger,
|
||||
run_command=run_command,
|
||||
@@ -383,6 +384,9 @@ async def start(hs: "HomeServer"):
|
||||
# If we've configured an expiry time for caches, start the background job now.
|
||||
setup_expire_lru_cache_entries(hs)
|
||||
|
||||
# Schedule the state compressor to run
|
||||
setup_state_compressor(hs)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening()
|
||||
hs.get_datastore().db_pool.start_profiling()
|
||||
@@ -424,12 +428,14 @@ def setup_sentry(hs):
|
||||
hs (synapse.server.HomeServer)
|
||||
"""
|
||||
|
||||
if not hs.config.sentry_enabled:
|
||||
if not hs.config.metrics.sentry_enabled:
|
||||
return
|
||||
|
||||
import sentry_sdk
|
||||
|
||||
sentry_sdk.init(dsn=hs.config.sentry_dsn, release=get_version_string(synapse))
|
||||
sentry_sdk.init(
|
||||
dsn=hs.config.metrics.sentry_dsn, release=get_version_string(synapse)
|
||||
)
|
||||
|
||||
# We set some default tags that give some context to this instance
|
||||
with sentry_sdk.configure_scope() as scope:
|
||||
|
||||
@@ -186,13 +186,13 @@ def start(config_options):
|
||||
config.worker.worker_app = "synapse.app.admin_cmd"
|
||||
|
||||
if (
|
||||
not config.worker_daemonize
|
||||
and not config.worker_log_file
|
||||
and not config.worker_log_config
|
||||
not config.worker.worker_daemonize
|
||||
and not config.worker.worker_log_file
|
||||
and not config.worker.worker_log_config
|
||||
):
|
||||
# Since we're meant to be run as a "command" let's not redirect stdio
|
||||
# unless we've actually set log config.
|
||||
config.no_redirect_stdio = True
|
||||
config.logging.no_redirect_stdio = True
|
||||
|
||||
# Explicitly disable background processes
|
||||
config.update_user_directory = False
|
||||
|
||||
@@ -140,7 +140,7 @@ class KeyUploadServlet(RestServlet):
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.http_client = hs.get_simple_http_client()
|
||||
self.main_uri = hs.config.worker_main_http_uri
|
||||
self.main_uri = hs.config.worker.worker_main_http_uri
|
||||
|
||||
async def on_POST(self, request: Request, device_id: Optional[str]):
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
@@ -321,7 +321,7 @@ class GenericWorkerServer(HomeServer):
|
||||
elif name == "federation":
|
||||
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
|
||||
elif name == "media":
|
||||
if self.config.can_load_media_repo:
|
||||
if self.config.media.can_load_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
|
||||
# We need to serve the admin servlets for media on the
|
||||
@@ -384,7 +384,7 @@ class GenericWorkerServer(HomeServer):
|
||||
logger.info("Synapse worker now listening on port %d", port)
|
||||
|
||||
def start_listening(self):
|
||||
for listener in self.config.worker_listeners:
|
||||
for listener in self.config.worker.worker_listeners:
|
||||
if listener.type == "http":
|
||||
self._listen_http(listener)
|
||||
elif listener.type == "manhole":
|
||||
@@ -395,7 +395,7 @@ class GenericWorkerServer(HomeServer):
|
||||
manhole_globals={"hs": self},
|
||||
)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.enable_metrics:
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -488,7 +488,7 @@ def start(config_options):
|
||||
register_start(_base.start, hs)
|
||||
|
||||
# redirect stdio to the logs, if configured.
|
||||
if not hs.config.no_redirect_stdio:
|
||||
if not hs.config.logging.no_redirect_stdio:
|
||||
redirect_stdio_to_logs()
|
||||
|
||||
_base.start_worker_reactor("synapse-generic-worker", config)
|
||||
|
||||
@@ -195,7 +195,7 @@ class SynapseHomeServer(HomeServer):
|
||||
}
|
||||
)
|
||||
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
from synapse.rest.synapse.client.password_reset import (
|
||||
PasswordResetSubmitTokenResource,
|
||||
)
|
||||
@@ -234,7 +234,7 @@ class SynapseHomeServer(HomeServer):
|
||||
)
|
||||
|
||||
if name in ["media", "federation", "client"]:
|
||||
if self.config.enable_media_repo:
|
||||
if self.config.media.enable_media_repo:
|
||||
media_repo = self.get_media_repository_resource()
|
||||
resources.update(
|
||||
{MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo}
|
||||
@@ -269,7 +269,7 @@ class SynapseHomeServer(HomeServer):
|
||||
# https://twistedmatrix.com/trac/ticket/7678
|
||||
resources[WEB_CLIENT_PREFIX] = File(webclient_loc)
|
||||
|
||||
if name == "metrics" and self.config.enable_metrics:
|
||||
if name == "metrics" and self.config.metrics.enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
|
||||
|
||||
if name == "replication":
|
||||
@@ -278,7 +278,7 @@ class SynapseHomeServer(HomeServer):
|
||||
return resources
|
||||
|
||||
def start_listening(self):
|
||||
if self.config.redis_enabled:
|
||||
if self.config.redis.redis_enabled:
|
||||
# If redis is enabled we connect via the replication command handler
|
||||
# in the same way as the workers (since we're effectively a client
|
||||
# rather than a server).
|
||||
@@ -305,7 +305,7 @@ class SynapseHomeServer(HomeServer):
|
||||
for s in services:
|
||||
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
||||
elif listener.type == "metrics":
|
||||
if not self.config.enable_metrics:
|
||||
if not self.config.metrics.enable_metrics:
|
||||
logger.warning(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@@ -366,7 +366,7 @@ def setup(config_options):
|
||||
|
||||
async def start():
|
||||
# Load the OIDC provider metadatas, if OIDC is enabled.
|
||||
if hs.config.oidc_enabled:
|
||||
if hs.config.oidc.oidc_enabled:
|
||||
oidc = hs.get_oidc_handler()
|
||||
# Loading the provider metadata also ensures the provider config is valid.
|
||||
await oidc.load_metadata()
|
||||
@@ -455,7 +455,7 @@ def main():
|
||||
hs = setup(sys.argv[1:])
|
||||
|
||||
# redirect stdio to the logs, if configured.
|
||||
if not hs.config.no_redirect_stdio:
|
||||
if not hs.config.logging.no_redirect_stdio:
|
||||
redirect_stdio_to_logs()
|
||||
|
||||
run(hs)
|
||||
|
||||
@@ -131,10 +131,12 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
|
||||
log_level = synapse_logger.getEffectiveLevel()
|
||||
stats["log_level"] = logging.getLevelName(log_level)
|
||||
|
||||
logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
|
||||
logger.info(
|
||||
"Reporting stats to %s: %s" % (hs.config.metrics.report_stats_endpoint, stats)
|
||||
)
|
||||
try:
|
||||
await hs.get_proxied_http_client().put_json(
|
||||
hs.config.report_stats_endpoint, stats
|
||||
hs.config.metrics.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Error reporting stats: %s", e)
|
||||
@@ -188,7 +190,7 @@ def start_phone_stats_home(hs):
|
||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
||||
# End of monthly active user settings
|
||||
|
||||
if hs.config.report_stats:
|
||||
if hs.config.metrics.report_stats:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats)
|
||||
|
||||
|
||||
@@ -200,11 +200,7 @@ class Config:
|
||||
@classmethod
|
||||
def ensure_directory(cls, dir_path):
|
||||
dir_path = cls.abspath(dir_path)
|
||||
try:
|
||||
os.makedirs(dir_path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
if not os.path.isdir(dir_path):
|
||||
raise ConfigError("%s is not a directory" % (dir_path,))
|
||||
return dir_path
|
||||
@@ -693,8 +689,7 @@ class RootConfig:
|
||||
open_private_ports=config_args.open_private_ports,
|
||||
)
|
||||
|
||||
if not path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
os.makedirs(config_dir_path, exist_ok=True)
|
||||
with open(config_path, "w") as config_file:
|
||||
config_file.write(config_str)
|
||||
config_file.write("\n\n# vim:ft=yaml")
|
||||
|
||||
@@ -32,6 +32,7 @@ from synapse.config import (
|
||||
server_notices,
|
||||
spam_checker,
|
||||
sso,
|
||||
state_compressor,
|
||||
stats,
|
||||
third_party_event_rules,
|
||||
tls,
|
||||
@@ -91,6 +92,7 @@ class RootConfig:
|
||||
modules: modules.ModulesConfig
|
||||
caches: cache.CacheConfig
|
||||
federation: federation.FederationConfig
|
||||
statecompressor: state_compressor.StateCompressorConfig
|
||||
|
||||
config_classes: List = ...
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from os import path
|
||||
from typing import Optional
|
||||
|
||||
from synapse.config import ConfigError
|
||||
|
||||
@@ -78,8 +79,8 @@ class ConsentConfig(Config):
|
||||
def __init__(self, *args):
|
||||
super().__init__(*args)
|
||||
|
||||
self.user_consent_version = None
|
||||
self.user_consent_template_dir = None
|
||||
self.user_consent_version: Optional[str] = None
|
||||
self.user_consent_template_dir: Optional[str] = None
|
||||
self.user_consent_server_notice_content = None
|
||||
self.user_consent_server_notice_to_guests = False
|
||||
self.block_events_without_consent_error = None
|
||||
@@ -94,7 +95,9 @@ class ConsentConfig(Config):
|
||||
return
|
||||
self.user_consent_version = str(consent_config["version"])
|
||||
self.user_consent_template_dir = self.abspath(consent_config["template_dir"])
|
||||
if not path.isdir(self.user_consent_template_dir):
|
||||
if not isinstance(self.user_consent_template_dir, str) or not path.isdir(
|
||||
self.user_consent_template_dir
|
||||
):
|
||||
raise ConfigError(
|
||||
"Could not find template directory '%s'"
|
||||
% (self.user_consent_template_dir,)
|
||||
|
||||
@@ -45,6 +45,7 @@ from .server import ServerConfig
|
||||
from .server_notices import ServerNoticesConfig
|
||||
from .spam_checker import SpamCheckerConfig
|
||||
from .sso import SSOConfig
|
||||
from .state_compressor import StateCompressorConfig
|
||||
from .stats import StatsConfig
|
||||
from .third_party_event_rules import ThirdPartyRulesConfig
|
||||
from .tls import TlsConfig
|
||||
@@ -97,4 +98,5 @@ class HomeServerConfig(RootConfig):
|
||||
WorkerConfig,
|
||||
RedisConfig,
|
||||
ExperimentalConfig,
|
||||
StateCompressorConfig,
|
||||
]
|
||||
|
||||
@@ -322,7 +322,9 @@ def setup_logging(
|
||||
|
||||
"""
|
||||
log_config_path = (
|
||||
config.worker_log_config if use_worker_options else config.log_config
|
||||
config.worker.worker_log_config
|
||||
if use_worker_options
|
||||
else config.logging.log_config
|
||||
)
|
||||
|
||||
# Perform one-time logging configuration.
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, List
|
||||
from typing import Any, List, Tuple, Type
|
||||
|
||||
from synapse.util.module_loader import load_module
|
||||
|
||||
@@ -25,7 +25,7 @@ class PasswordAuthProviderConfig(Config):
|
||||
section = "authproviders"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
self.password_providers: List[Any] = []
|
||||
self.password_providers: List[Tuple[Type, Any]] = []
|
||||
providers = []
|
||||
|
||||
# We want to be backwards compatible with the old `ldap_config`
|
||||
|
||||
@@ -19,7 +19,7 @@ import logging
|
||||
import os.path
|
||||
import re
|
||||
from textwrap import indent
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
||||
|
||||
import attr
|
||||
import yaml
|
||||
@@ -184,49 +184,74 @@ KNOWN_RESOURCES = {
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class HttpResourceConfig:
|
||||
names = attr.ib(
|
||||
type=List[str],
|
||||
names: List[str] = attr.ib(
|
||||
factory=list,
|
||||
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)), # type: ignore
|
||||
)
|
||||
compress = attr.ib(
|
||||
type=bool,
|
||||
compress: bool = attr.ib(
|
||||
default=False,
|
||||
validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class HttpListenerConfig:
|
||||
"""Object describing the http-specific parts of the config of a listener"""
|
||||
|
||||
x_forwarded = attr.ib(type=bool, default=False)
|
||||
resources = attr.ib(type=List[HttpResourceConfig], factory=list)
|
||||
additional_resources = attr.ib(type=Dict[str, dict], factory=dict)
|
||||
tag = attr.ib(type=str, default=None)
|
||||
x_forwarded: bool = False
|
||||
resources: List[HttpResourceConfig] = attr.ib(factory=list)
|
||||
additional_resources: Dict[str, dict] = attr.ib(factory=dict)
|
||||
tag: Optional[str] = None
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ListenerConfig:
|
||||
"""Object describing the configuration of a single listener."""
|
||||
|
||||
port = attr.ib(type=int, validator=attr.validators.instance_of(int))
|
||||
bind_addresses = attr.ib(type=List[str])
|
||||
type = attr.ib(type=str, validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
tls = attr.ib(type=bool, default=False)
|
||||
port: int = attr.ib(validator=attr.validators.instance_of(int))
|
||||
bind_addresses: List[str]
|
||||
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
|
||||
tls: bool = False
|
||||
|
||||
# http_options is only populated if type=http
|
||||
http_options = attr.ib(type=Optional[HttpListenerConfig], default=None)
|
||||
http_options: Optional[HttpListenerConfig] = None
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class ManholeConfig:
|
||||
"""Object describing the configuration of the manhole"""
|
||||
|
||||
username = attr.ib(type=str, validator=attr.validators.instance_of(str))
|
||||
password = attr.ib(type=str, validator=attr.validators.instance_of(str))
|
||||
priv_key = attr.ib(type=Optional[Key])
|
||||
pub_key = attr.ib(type=Optional[Key])
|
||||
username: str = attr.ib(validator=attr.validators.instance_of(str))
|
||||
password: str = attr.ib(validator=attr.validators.instance_of(str))
|
||||
priv_key: Optional[Key]
|
||||
pub_key: Optional[Key]
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class RetentionConfig:
|
||||
"""Object describing the configuration of the manhole"""
|
||||
|
||||
interval: int
|
||||
shortest_max_lifetime: Optional[int]
|
||||
longest_max_lifetime: Optional[int]
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class LimitRemoteRoomsConfig:
|
||||
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
|
||||
complexity: Union[float, int] = attr.ib(
|
||||
validator=attr.validators.instance_of(
|
||||
(float, int) # type: ignore[arg-type] # noqa
|
||||
),
|
||||
default=1.0,
|
||||
)
|
||||
complexity_error: str = attr.ib(
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
admins_can_join: bool = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
|
||||
|
||||
class ServerConfig(Config):
|
||||
@@ -519,7 +544,7 @@ class ServerConfig(Config):
|
||||
" greater than 'allowed_lifetime_max'"
|
||||
)
|
||||
|
||||
self.retention_purge_jobs: List[Dict[str, Optional[int]]] = []
|
||||
self.retention_purge_jobs: List[RetentionConfig] = []
|
||||
for purge_job_config in retention_config.get("purge_jobs", []):
|
||||
interval_config = purge_job_config.get("interval")
|
||||
|
||||
@@ -553,20 +578,12 @@ class ServerConfig(Config):
|
||||
)
|
||||
|
||||
self.retention_purge_jobs.append(
|
||||
{
|
||||
"interval": interval,
|
||||
"shortest_max_lifetime": shortest_max_lifetime,
|
||||
"longest_max_lifetime": longest_max_lifetime,
|
||||
}
|
||||
RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
|
||||
)
|
||||
|
||||
if not self.retention_purge_jobs:
|
||||
self.retention_purge_jobs = [
|
||||
{
|
||||
"interval": self.parse_duration("1d"),
|
||||
"shortest_max_lifetime": None,
|
||||
"longest_max_lifetime": None,
|
||||
}
|
||||
RetentionConfig(self.parse_duration("1d"), None, None)
|
||||
]
|
||||
|
||||
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
|
||||
@@ -591,25 +608,6 @@ class ServerConfig(Config):
|
||||
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
|
||||
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
|
||||
|
||||
@attr.s
|
||||
class LimitRemoteRoomsConfig:
|
||||
enabled = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
complexity = attr.ib(
|
||||
validator=attr.validators.instance_of(
|
||||
(float, int) # type: ignore[arg-type] # noqa
|
||||
),
|
||||
default=1.0,
|
||||
)
|
||||
complexity_error = attr.ib(
|
||||
validator=attr.validators.instance_of(str),
|
||||
default=ROOM_COMPLEXITY_TOO_GREAT,
|
||||
)
|
||||
admins_can_join = attr.ib(
|
||||
validator=attr.validators.instance_of(bool), default=False
|
||||
)
|
||||
|
||||
self.limit_remote_rooms = LimitRemoteRoomsConfig(
|
||||
**(config.get("limit_remote_rooms") or {})
|
||||
)
|
||||
@@ -1447,7 +1445,7 @@ def read_gc_thresholds(thresholds):
|
||||
return None
|
||||
try:
|
||||
assert len(thresholds) == 3
|
||||
return (int(thresholds[0]), int(thresholds[1]), int(thresholds[2]))
|
||||
return int(thresholds[0]), int(thresholds[1]), int(thresholds[2])
|
||||
except Exception:
|
||||
raise ConfigError(
|
||||
"Value of `gc_threshold` must be a list of three integers if set"
|
||||
|
||||
96
synapse/config/state_compressor.py
Normal file
96
synapse/config/state_compressor.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.config._base import Config, ConfigError
|
||||
from synapse.config._util import validate_config
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
|
||||
class StateCompressorConfig(Config):
|
||||
section = "statecompressor"
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
compressor_config = config.get("state_compressor") or {}
|
||||
validate_config(
|
||||
_STATE_COMPRESSOR_SCHEMA, compressor_config, ("state_compressor",)
|
||||
)
|
||||
self.compressor_enabled = compressor_config.get("enabled") or False
|
||||
|
||||
if not self.compressor_enabled:
|
||||
return
|
||||
|
||||
try:
|
||||
check_requirements("synapse_auto_compressor")
|
||||
except DependencyException as e:
|
||||
raise ConfigError from e
|
||||
|
||||
self.compressor_chunk_size = compressor_config.get("chunk_size") or 500
|
||||
self.compressor_number_of_chunks = (
|
||||
compressor_config.get("number_of_chunks") or 100
|
||||
)
|
||||
self.compressor_default_levels = (
|
||||
compressor_config.get("default_levels") or "100,50,25"
|
||||
)
|
||||
self.time_between_compressor_runs = self.parse_duration(
|
||||
compressor_config.get("time_between_runs") or "1d"
|
||||
)
|
||||
|
||||
def generate_config_section(self, **kwargs):
|
||||
return """\
|
||||
## State compressor ##
|
||||
|
||||
# The state compressor is an experimental tool which attempts to
|
||||
# reduce the number of rows in the state_groups_state table
|
||||
# of postgres databases.
|
||||
#
|
||||
# For more information please see
|
||||
# https://matrix-org.github.io/synapse/latest/state_compressor.html
|
||||
#
|
||||
state_compressor:
|
||||
# Whether the state compressor should run (defaults to false)
|
||||
# Uncomment to enable it - Note, this requires the 'auto-compressor'
|
||||
# library to be installed
|
||||
#
|
||||
#enabled: true
|
||||
|
||||
# The (rough) number of state groups to load at one time. Defaults
|
||||
# to 500.
|
||||
#
|
||||
#chunk_size: 1000
|
||||
|
||||
# The number of chunks to compress on each run. Defaults to 100.
|
||||
#
|
||||
#number_of_chunks: 1
|
||||
|
||||
# The default level sizes for the compressor to use. Defaults to
|
||||
# 100,50,25.
|
||||
#
|
||||
#default_levels: 128,64,32.
|
||||
|
||||
# How frequently to run the state compressor. Defaults to 1d
|
||||
#
|
||||
#time_between_runs: 1w
|
||||
"""
|
||||
|
||||
|
||||
_STATE_COMPRESSOR_SCHEMA = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {"type": "boolean"},
|
||||
"chunk_size": {"type": "number"},
|
||||
"number_of_chunks": {"type": "number"},
|
||||
"default_levels": {"type": "string"},
|
||||
"time_between_runs": {"type": "string"},
|
||||
},
|
||||
}
|
||||
@@ -45,12 +45,16 @@ class UserDirectoryConfig(Config):
|
||||
#enabled: false
|
||||
|
||||
# Defines whether to search all users visible to your HS when searching
|
||||
# the user directory, rather than limiting to users visible in public
|
||||
# rooms. Defaults to false.
|
||||
# the user directory. If false, search results will only contain users
|
||||
# visible in public rooms and users sharing a room with the requester.
|
||||
# Defaults to false.
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||
# NB. If you set this to true, and the last time the user_directory search
|
||||
# indexes were (re)built was before Synapse 1.44, you'll have to
|
||||
# rebuild the indexes in order to search through all known users.
|
||||
# These indexes are built the first time Synapse starts; admins can
|
||||
# manually trigger a rebuild following the instructions at
|
||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
|
||||
@@ -74,8 +74,8 @@ class ServerContextFactory(ContextFactory):
|
||||
context.set_options(
|
||||
SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1
|
||||
)
|
||||
context.use_certificate_chain_file(config.tls_certificate_file)
|
||||
context.use_privatekey(config.tls_private_key)
|
||||
context.use_certificate_chain_file(config.tls.tls_certificate_file)
|
||||
context.use_privatekey(config.tls.tls_private_key)
|
||||
|
||||
# https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
||||
context.set_cipher_list(
|
||||
|
||||
@@ -113,7 +113,8 @@ def check(
|
||||
raise AuthError(403, "Event not signed by sending server")
|
||||
|
||||
is_invite_via_allow_rule = (
|
||||
event.type == EventTypes.Member
|
||||
room_version_obj.msc3083_join_rules
|
||||
and event.type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
and "join_authorised_via_users_server" in event.content
|
||||
)
|
||||
@@ -213,7 +214,7 @@ def check(
|
||||
|
||||
if (
|
||||
event.type == EventTypes.MSC2716_INSERTION
|
||||
or event.type == EventTypes.MSC2716_CHUNK
|
||||
or event.type == EventTypes.MSC2716_BATCH
|
||||
or event.type == EventTypes.MSC2716_MARKER
|
||||
):
|
||||
check_historical(room_version_obj, event, auth_events)
|
||||
@@ -552,14 +553,14 @@ def check_historical(
|
||||
auth_events: StateMap[EventBase],
|
||||
) -> None:
|
||||
"""Check whether the event sender is allowed to send historical related
|
||||
events like "insertion", "chunk", and "marker".
|
||||
events like "insertion", "batch", and "marker".
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
AuthError if the event sender is not allowed to send historical related events
|
||||
("insertion", "chunk", and "marker").
|
||||
("insertion", "batch", and "marker").
|
||||
"""
|
||||
# Ignore the auth checks in room versions that do not support historical
|
||||
# events
|
||||
@@ -573,7 +574,7 @@ def check_historical(
|
||||
if user_level < historical_level:
|
||||
raise AuthError(
|
||||
403,
|
||||
'You don\'t have permission to send send historical related events ("insertion", "chunk", and "marker")',
|
||||
'You don\'t have permission to send send historical related events ("insertion", "batch", and "marker")',
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -344,6 +344,18 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
# this will be a no-op if the event dict is already frozen.
|
||||
self._dict = freeze(self._dict)
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
||||
self.__class__.__name__,
|
||||
self.event_id,
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
self.internal_metadata.is_outlier(),
|
||||
)
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||
@@ -392,17 +404,6 @@ class FrozenEvent(EventBase):
|
||||
def event_id(self) -> str:
|
||||
return self._event_id
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<FrozenEvent event_id=%r, type=%r, state_key=%r, outlier=%s>" % (
|
||||
self.get("event_id", None),
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
self.internal_metadata.is_outlier(),
|
||||
)
|
||||
|
||||
|
||||
class FrozenEventV2(EventBase):
|
||||
format_version = EventFormatVersions.V2 # All events of this type are V2
|
||||
@@ -478,17 +479,6 @@ class FrozenEventV2(EventBase):
|
||||
"""
|
||||
return self.auth_events
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s event_id=%r, type=%r, state_key=%r>" % (
|
||||
self.__class__.__name__,
|
||||
self.event_id,
|
||||
self.get("type", None),
|
||||
self.get("state_key", None),
|
||||
)
|
||||
|
||||
|
||||
class FrozenEventV3(FrozenEventV2):
|
||||
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||
|
||||
@@ -80,9 +80,7 @@ class EventContext:
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
FIXME: what is this for an outlier? it seems ill-defined. It seems like
|
||||
it could be either {}, or the state we were given by the remote
|
||||
server, depending on $THINGS
|
||||
For an outlier, this is {}
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||
@@ -96,7 +94,7 @@ class EventContext:
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
FIXME: again, what is this for an outlier?
|
||||
For an outlier, this is {}
|
||||
|
||||
As with _current_state_ids, this is a private attribute. It should be
|
||||
accessed via get_prev_state_ids.
|
||||
@@ -130,6 +128,14 @@ class EventContext:
|
||||
delta_ids=delta_ids,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def for_outlier():
|
||||
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||
return EventContext(
|
||||
current_state_ids={},
|
||||
prev_state_ids={},
|
||||
)
|
||||
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> dict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
deserialized by `deserialize`
|
||||
|
||||
@@ -46,6 +46,9 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
]
|
||||
USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]]
|
||||
USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]]
|
||||
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[
|
||||
[str, List[str], List[Dict[str, str]]], Awaitable[bool]
|
||||
]
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]]
|
||||
USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]]
|
||||
CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]]
|
||||
@@ -78,7 +81,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
|
||||
"""
|
||||
spam_checkers: List[Any] = []
|
||||
api = hs.get_module_api()
|
||||
for module, config in hs.config.spam_checkers:
|
||||
for module, config in hs.config.spamchecker.spam_checkers:
|
||||
# Older spam checkers don't accept the `api` argument, so we
|
||||
# try and detect support.
|
||||
spam_args = inspect.getfullargspec(module)
|
||||
@@ -164,6 +167,9 @@ class SpamChecker:
|
||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = []
|
||||
self._user_may_create_room_with_invites_callbacks: List[
|
||||
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
|
||||
] = []
|
||||
self._user_may_create_room_alias_callbacks: List[
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||
] = []
|
||||
@@ -183,6 +189,9 @@ class SpamChecker:
|
||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||
user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None,
|
||||
user_may_create_room_with_invites: Optional[
|
||||
USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK
|
||||
] = None,
|
||||
user_may_create_room_alias: Optional[
|
||||
USER_MAY_CREATE_ROOM_ALIAS_CALLBACK
|
||||
] = None,
|
||||
@@ -203,6 +212,11 @@ class SpamChecker:
|
||||
if user_may_create_room is not None:
|
||||
self._user_may_create_room_callbacks.append(user_may_create_room)
|
||||
|
||||
if user_may_create_room_with_invites is not None:
|
||||
self._user_may_create_room_with_invites_callbacks.append(
|
||||
user_may_create_room_with_invites,
|
||||
)
|
||||
|
||||
if user_may_create_room_alias is not None:
|
||||
self._user_may_create_room_alias_callbacks.append(
|
||||
user_may_create_room_alias,
|
||||
@@ -283,6 +297,34 @@ class SpamChecker:
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_create_room_with_invites(
|
||||
self,
|
||||
userid: str,
|
||||
invites: List[str],
|
||||
threepid_invites: List[Dict[str, str]],
|
||||
) -> bool:
|
||||
"""Checks if a given user may create a room with invites
|
||||
|
||||
If this method returns false, the creation request will be rejected.
|
||||
|
||||
Args:
|
||||
userid: The ID of the user attempting to create a room
|
||||
invites: The IDs of the Matrix users to be invited if the room creation is
|
||||
allowed.
|
||||
threepid_invites: The threepids to be invited if the room creation is allowed,
|
||||
as a dict including a "medium" key indicating the threepid's medium (e.g.
|
||||
"email") and an "address" key indicating the threepid's address (e.g.
|
||||
"alice@example.com")
|
||||
|
||||
Returns:
|
||||
True if the user may create the room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_with_invites_callbacks:
|
||||
if await callback(userid, invites, threepid_invites) is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def user_may_create_room_alias(
|
||||
self, userid: str, room_alias: RoomAlias
|
||||
) -> bool:
|
||||
|
||||
@@ -42,10 +42,10 @@ def load_legacy_third_party_event_rules(hs: "HomeServer"):
|
||||
"""Wrapper that loads a third party event rules module configured using the old
|
||||
configuration, and registers the hooks they implement.
|
||||
"""
|
||||
if hs.config.third_party_event_rules is None:
|
||||
if hs.config.thirdpartyrules.third_party_event_rules is None:
|
||||
return
|
||||
|
||||
module, config = hs.config.third_party_event_rules
|
||||
module, config = hs.config.thirdpartyrules.third_party_event_rules
|
||||
|
||||
api = hs.get_module_api()
|
||||
third_party_rules = module(config=config, module_api=api)
|
||||
|
||||
@@ -141,9 +141,9 @@ def prune_event_dict(room_version: RoomVersion, event_dict: dict) -> dict:
|
||||
elif event_type == EventTypes.Redaction and room_version.msc2176_redaction_rules:
|
||||
add_fields("redacts")
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_INSERTION:
|
||||
add_fields(EventContentFields.MSC2716_NEXT_CHUNK_ID)
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_CHUNK:
|
||||
add_fields(EventContentFields.MSC2716_CHUNK_ID)
|
||||
add_fields(EventContentFields.MSC2716_NEXT_BATCH_ID)
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_BATCH:
|
||||
add_fields(EventContentFields.MSC2716_BATCH_ID)
|
||||
elif room_version.msc2716_redactions and event_type == EventTypes.MSC2716_MARKER:
|
||||
add_fields(EventContentFields.MSC2716_MARKER_INSERTION)
|
||||
|
||||
|
||||
@@ -501,8 +501,6 @@ class FederationClient(FederationBase):
|
||||
destination, auth_chain, outlier=True, room_version=room_version
|
||||
)
|
||||
|
||||
signed_auth.sort(key=lambda e: e.depth)
|
||||
|
||||
return signed_auth
|
||||
|
||||
def _is_unknown_endpoint(
|
||||
|
||||
@@ -560,7 +560,7 @@ class PerDestinationQueue:
|
||||
|
||||
assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
|
||||
|
||||
return (edus, now_stream_id)
|
||||
return edus, now_stream_id
|
||||
|
||||
async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int]:
|
||||
last_device_stream_id = self._last_device_stream_id
|
||||
@@ -593,7 +593,7 @@ class PerDestinationQueue:
|
||||
stream_id,
|
||||
)
|
||||
|
||||
return (edus, stream_id)
|
||||
return edus, stream_id
|
||||
|
||||
def _start_catching_up(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -49,7 +49,9 @@ class Authenticator:
|
||||
self.keyring = hs.get_keyring()
|
||||
self.server_name = hs.hostname
|
||||
self.store = hs.get_datastore()
|
||||
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
|
||||
self.federation_domain_whitelist = (
|
||||
hs.config.federation.federation_domain_whitelist
|
||||
)
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
self.replication_client = None
|
||||
|
||||
@@ -847,16 +847,16 @@ class GroupsServerHandler(GroupsServerWorkerHandler):
|
||||
UserID.from_string(requester_user_id)
|
||||
)
|
||||
if not is_admin:
|
||||
if not self.hs.config.enable_group_creation:
|
||||
if not self.hs.config.groups.enable_group_creation:
|
||||
raise SynapseError(
|
||||
403, "Only a server admin can create groups on this server"
|
||||
)
|
||||
localpart = group_id_obj.localpart
|
||||
if not localpart.startswith(self.hs.config.group_creation_prefix):
|
||||
if not localpart.startswith(self.hs.config.groups.group_creation_prefix):
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Can only create groups with prefix %r on this server"
|
||||
% (self.hs.config.group_creation_prefix,),
|
||||
% (self.hs.config.groups.group_creation_prefix,),
|
||||
)
|
||||
|
||||
profile = content.get("profile", {})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user