Compare commits
115 Commits
hs/use-mal
...
v1.36.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c8045f674 | ||
|
|
cbf350db63 | ||
|
|
fb10a73e85 | ||
|
|
cdd985c64f | ||
|
|
5e0b4719ea | ||
|
|
c955f22e2c | ||
|
|
e0ddd82f2c | ||
|
|
684df9b21d | ||
|
|
8df9941cc2 | ||
|
|
1092718cac | ||
|
|
9e4610cc27 | ||
|
|
7dc14730d9 | ||
|
|
c842c581ed | ||
|
|
a0101fc021 | ||
|
|
0acb5010ec | ||
|
|
b2557cbf42 | ||
|
|
beb251e3ee | ||
|
|
543e423fce | ||
|
|
8942e23a69 | ||
|
|
d558292548 | ||
|
|
fa1db8f156 | ||
|
|
a0cd8ae8cb | ||
|
|
c96ab31dff | ||
|
|
d8be7d493d | ||
|
|
fd9856e4a9 | ||
|
|
9eea4646be | ||
|
|
1d143074c5 | ||
|
|
73636cab69 | ||
|
|
5325f0308c | ||
|
|
d7a646abca | ||
|
|
5666773341 | ||
|
|
57c01dca29 | ||
|
|
36a7ff0c86 | ||
|
|
0284d2a297 | ||
|
|
bf6fd9f4fd | ||
|
|
fc3d2dc269 | ||
|
|
3cf6b34b4e | ||
|
|
4deaebfe00 | ||
|
|
3ff6fe2851 | ||
|
|
3fdaf4df55 | ||
|
|
08e54345b1 | ||
|
|
a8372ad591 | ||
|
|
408ecf8ece | ||
|
|
b4b2fd2ece | ||
|
|
10e6d2abce | ||
|
|
4f41b711d8 | ||
|
|
258a9a9e8b | ||
|
|
6b6c6a02db | ||
|
|
9408b86f5c | ||
|
|
1641c5c707 | ||
|
|
84cf3e47a0 | ||
|
|
ed53bf314f | ||
|
|
3f96dbbda7 | ||
|
|
ac3e02d089 | ||
|
|
5eed6348ce | ||
|
|
8fb9af570f | ||
|
|
f828a70be3 | ||
|
|
8e132fe64e | ||
|
|
b1bc26a909 | ||
|
|
78b5102ae7 | ||
|
|
8e15c92c2f | ||
|
|
d9f44fd0b9 | ||
|
|
dcbfec919b | ||
|
|
5447a76332 | ||
|
|
fe5dad46b0 | ||
|
|
224f2f949b | ||
|
|
f42e4c4eb9 | ||
|
|
49df2c28e3 | ||
|
|
f95e7a03fa | ||
|
|
913a761a53 | ||
|
|
65e6c64d83 | ||
|
|
3e1beb75e6 | ||
|
|
557635f69a | ||
|
|
7d90d6ce9b | ||
|
|
7adcb20fc0 | ||
|
|
22a8838f62 | ||
|
|
057ce7b754 | ||
|
|
82eacb0e07 | ||
|
|
daca7b2794 | ||
|
|
c0df6bae06 | ||
|
|
316f89e87f | ||
|
|
387c297489 | ||
|
|
5f1198a67e | ||
|
|
3e831f24ff | ||
|
|
e8ac9ac8ca | ||
|
|
21bd230831 | ||
|
|
c5413d0e9e | ||
|
|
6a8643ff3d | ||
|
|
7958eadcd1 | ||
|
|
1c6a19002c | ||
|
|
64887f06fc | ||
|
|
551d2c3f4b | ||
|
|
d983ced596 | ||
|
|
141b073c7b | ||
|
|
9c76d0561b | ||
|
|
5bba1b4905 | ||
|
|
ac6bfcd52f | ||
|
|
4d6e5a5e99 | ||
|
|
206a7b5f12 | ||
|
|
9752849e2b | ||
|
|
653fe2f3cd | ||
|
|
13b0673b5a | ||
|
|
8dde0bf8b3 | ||
|
|
afb6dcf806 | ||
|
|
41ac128fd3 | ||
|
|
6660912226 | ||
|
|
6482075c95 | ||
|
|
5090f26b63 | ||
|
|
52ed9655ed | ||
|
|
ebdef256b3 | ||
|
|
bd918d874f | ||
|
|
498084228b | ||
|
|
c14f99be46 | ||
|
|
976216959b | ||
|
|
d19bccdbec |
@@ -3,7 +3,7 @@
|
||||
# CI's Docker setup at the point where this file is considered.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -16,6 +16,4 @@ database:
|
||||
database: synapse
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
trusted_key_servers: []
|
||||
|
||||
@@ -33,6 +33,10 @@ scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
|
||||
echo "+++ Run synapse_port_db against test database"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
# We should be able to run twice against the same database.
|
||||
echo "+++ Run synapse_port_db a second time"
|
||||
coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
|
||||
|
||||
#####
|
||||
|
||||
# Now do the same again, on an empty database.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# schema and run background updates on it.
|
||||
server_name: "localhost:8800"
|
||||
|
||||
signing_key_path: "/src/.buildkite/test.signing.key"
|
||||
signing_key_path: ".buildkite/test.signing.key"
|
||||
|
||||
report_stats: false
|
||||
|
||||
@@ -13,6 +13,4 @@ database:
|
||||
database: ".buildkite/test_db.db"
|
||||
|
||||
# Suppress the key server warning.
|
||||
trusted_key_servers:
|
||||
- server_name: "matrix.org"
|
||||
suppress_key_server_warning: true
|
||||
trusted_key_servers: []
|
||||
|
||||
@@ -41,7 +41,7 @@ workflows:
|
||||
- dockerhubuploadlatest:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
only: [ master, main ]
|
||||
|
||||
commands:
|
||||
docker_prepare:
|
||||
|
||||
31
.github/workflows/docs.yaml
vendored
Normal file
31
.github/workflows/docs.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Deploy the documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
pages:
|
||||
name: GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup mdbook
|
||||
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14
|
||||
with:
|
||||
mdbook-version: '0.4.9'
|
||||
|
||||
- name: Build the documentation
|
||||
run: mdbook build
|
||||
|
||||
- name: Deploy latest documentation
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
keep_files: true
|
||||
publish_dir: ./book
|
||||
destination_dir: ./develop
|
||||
10
.github/workflows/tests.yml
vendored
10
.github/workflows/tests.yml
vendored
@@ -34,7 +34,13 @@ jobs:
|
||||
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Note: This and the script can be simplified once we drop Buildkite. See:
|
||||
# https://github.com/actions/checkout/issues/266#issuecomment-638346893
|
||||
# https://github.com/actions/checkout/issues/416
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install tox
|
||||
- name: Patch Buildkite-specific test script
|
||||
@@ -226,9 +232,9 @@ jobs:
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
- name: Dump results.tap
|
||||
- name: Summarise results.tap
|
||||
if: ${{ always() }}
|
||||
run: cat /logs/results.tap
|
||||
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
|
||||
- name: Upload SyTest logs
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ always() }}
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -46,3 +46,6 @@ __pycache__/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
|
||||
# docs
|
||||
book/
|
||||
|
||||
209
CHANGES.md
209
CHANGES.md
@@ -1,10 +1,207 @@
|
||||
Synapse 1.34.0rc1 (2021-05-12)
|
||||
Synapse 1.36.0 (2021-06-15)
|
||||
===========================
|
||||
|
||||
No significant changes.
|
||||
|
||||
|
||||
Synapse 1.36.0rc2 (2021-06-11)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug which caused presence updates to stop working some time after a restart, when using a presence writer worker. Broke in v1.33.0. ([\#10149](https://github.com/matrix-org/synapse/issues/10149))
|
||||
- Fix a bug when using federation sender worker where it would send out more presence updates than necessary, leading to high resource usage. Broke in v1.33.0. ([\#10163](https://github.com/matrix-org/synapse/issues/10163))
|
||||
- Fix a bug where Synapse could send the same presence update to a remote twice. ([\#10165](https://github.com/matrix-org/synapse/issues/10165))
|
||||
|
||||
|
||||
Synapse 1.36.0rc1 (2021-06-08)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add new endpoint `/_matrix/client/r0/rooms/{roomId}/aliases` from Client-Server API r0.6.1 (previously [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)). ([\#9224](https://github.com/matrix-org/synapse/issues/9224))
|
||||
- Improve performance of incoming federation transactions in large rooms. ([\#9953](https://github.com/matrix-org/synapse/issues/9953), [\#9973](https://github.com/matrix-org/synapse/issues/9973))
|
||||
- Rewrite logic around verifying JSON object and fetching server keys to be more performant and use less memory. ([\#10035](https://github.com/matrix-org/synapse/issues/10035))
|
||||
- Add new admin APIs for unprotecting local media from quarantine. Contributed by @dklimpel. ([\#10040](https://github.com/matrix-org/synapse/issues/10040))
|
||||
- Add new admin APIs to remove media by media ID from quarantine. Contributed by @dklimpel. ([\#10044](https://github.com/matrix-org/synapse/issues/10044))
|
||||
- Make reason and score parameters optional for reporting content. Implements [MSC2414](https://github.com/matrix-org/matrix-doc/pull/2414). Contributed by Callum Brown. ([\#10077](https://github.com/matrix-org/synapse/issues/10077))
|
||||
- Add support for routing more requests to workers. ([\#10084](https://github.com/matrix-org/synapse/issues/10084))
|
||||
- Report OpenTracing spans for database activity. ([\#10113](https://github.com/matrix-org/synapse/issues/10113), [\#10136](https://github.com/matrix-org/synapse/issues/10136), [\#10141](https://github.com/matrix-org/synapse/issues/10141))
|
||||
- Significantly reduce memory usage of joining large remote rooms. ([\#10117](https://github.com/matrix-org/synapse/issues/10117))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. ([\#10082](https://github.com/matrix-org/synapse/issues/10082))
|
||||
- Fix a bug in the `force_tracing_for_users` option introduced in Synapse v1.35 which meant that the OpenTracing spans produced were missing most tags. ([\#10092](https://github.com/matrix-org/synapse/issues/10092))
|
||||
- Fixed a bug that could cause Synapse to stop notifying application services. Contributed by Willem Mulder. ([\#10107](https://github.com/matrix-org/synapse/issues/10107))
|
||||
- Fix bug where the server would attempt to fetch the same history in the room from a remote server multiple times in parallel. ([\#10116](https://github.com/matrix-org/synapse/issues/10116))
|
||||
- Fix a bug introduced in Synapse 1.33.0 which caused replication requests to fail when receiving a lot of very large events via federation. ([\#10118](https://github.com/matrix-org/synapse/issues/10118))
|
||||
- Fix bug when using workers where pagination requests failed if a remote server returned zero events from `/backfill`. Introduced in 1.35.0. ([\#10133](https://github.com/matrix-org/synapse/issues/10133))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Clarify security note regarding hosting Synapse on the same domain as other web applications. ([\#9221](https://github.com/matrix-org/synapse/issues/9221))
|
||||
- Update CAPTCHA documentation to mention turning off the verify origin feature. Contributed by @aaronraimist. ([\#10046](https://github.com/matrix-org/synapse/issues/10046))
|
||||
- Tweak wording of database recommendation in `INSTALL.md`. Contributed by @aaronraimist. ([\#10057](https://github.com/matrix-org/synapse/issues/10057))
|
||||
- Add initial infrastructure for rendering Synapse documentation with mdbook. ([\#10086](https://github.com/matrix-org/synapse/issues/10086))
|
||||
- Convert the remaining Admin API documentation files to markdown. ([\#10089](https://github.com/matrix-org/synapse/issues/10089))
|
||||
- Make a link in docs use HTTPS. Contributed by @RhnSharma. ([\#10130](https://github.com/matrix-org/synapse/issues/10130))
|
||||
- Fix broken link in Docker docs. ([\#10132](https://github.com/matrix-org/synapse/issues/10132))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove the experimental `spaces_enabled` flag. The spaces features are always available now. ([\#10063](https://github.com/matrix-org/synapse/issues/10063))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Tell CircleCI to build Docker images from `main` branch. ([\#9906](https://github.com/matrix-org/synapse/issues/9906))
|
||||
- Simplify naming convention for release branches to only include the major and minor version numbers. ([\#10013](https://github.com/matrix-org/synapse/issues/10013))
|
||||
- Add `parse_strings_from_args` for parsing an array from query parameters. ([\#10048](https://github.com/matrix-org/synapse/issues/10048), [\#10137](https://github.com/matrix-org/synapse/issues/10137))
|
||||
- Remove some dead code regarding TLS certificate handling. ([\#10054](https://github.com/matrix-org/synapse/issues/10054))
|
||||
- Remove redundant, unmaintained `convert_server_keys` script. ([\#10055](https://github.com/matrix-org/synapse/issues/10055))
|
||||
- Improve the error message printed by synctl when synapse fails to start. ([\#10059](https://github.com/matrix-org/synapse/issues/10059))
|
||||
- Fix GitHub Actions lint for newsfragments. ([\#10069](https://github.com/matrix-org/synapse/issues/10069))
|
||||
- Update opentracing to inject the right context into the carrier. ([\#10074](https://github.com/matrix-org/synapse/issues/10074))
|
||||
- Fix up `BatchingQueue` implementation. ([\#10078](https://github.com/matrix-org/synapse/issues/10078))
|
||||
- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091))
|
||||
- In Github Actions workflows, summarize the Sytest results in an easy-to-read format. ([\#10094](https://github.com/matrix-org/synapse/issues/10094))
|
||||
- Make `/sync` do fewer state resolutions. ([\#10102](https://github.com/matrix-org/synapse/issues/10102))
|
||||
- Add missing type hints to the admin API servlets. ([\#10105](https://github.com/matrix-org/synapse/issues/10105))
|
||||
- Improve opentracing annotations for `Notifier`. ([\#10111](https://github.com/matrix-org/synapse/issues/10111))
|
||||
- Enable Prometheus metrics for the jaeger client library. ([\#10112](https://github.com/matrix-org/synapse/issues/10112))
|
||||
- Work to improve the responsiveness of `/sync` requests. ([\#10124](https://github.com/matrix-org/synapse/issues/10124))
|
||||
- OpenTracing: use a consistent name for background processes. ([\#10135](https://github.com/matrix-org/synapse/issues/10135))
|
||||
|
||||
|
||||
Synapse 1.35.1 (2021-06-03)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to all users in a space, regardless of if the user had access to it. ([\#10109](https://github.com/matrix-org/synapse/issues/10109))
|
||||
|
||||
|
||||
Synapse 1.35.0 (2021-06-01)
|
||||
===========================
|
||||
|
||||
Note that [the tag](https://github.com/matrix-org/synapse/releases/tag/v1.35.0rc3) and [docker images](https://hub.docker.com/layers/matrixdotorg/synapse/v1.35.0rc3/images/sha256-34ccc87bd99a17e2cbc0902e678b5937d16bdc1991ead097eee6096481ecf2c4?context=explore) for `v1.35.0rc3` were incorrectly built. If you are experiencing issues with either, it is recommended to upgrade to the equivalent tag or docker image for the `v1.35.0` release.
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. ([\#10101](https://github.com/matrix-org/synapse/issues/10101))
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. Introduced in v1.33.0. ([\#10082](https://github.com/matrix-org/synapse/issues/10082))
|
||||
- Fix HTTP response size limit to allow joining very large rooms over federation. Introduced in v1.33.0. ([\#10093](https://github.com/matrix-org/synapse/issues/10093))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091))
|
||||
|
||||
|
||||
Synapse 1.35.0rc2 (2021-05-27)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. ([\#10079](https://github.com/matrix-org/synapse/issues/10079))
|
||||
|
||||
|
||||
Synapse 1.35.0rc1 (2021-05-25)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add experimental support to allow a user who could join a restricted room to view it in the spaces summary. ([\#9922](https://github.com/matrix-org/synapse/issues/9922), [\#10007](https://github.com/matrix-org/synapse/issues/10007), [\#10038](https://github.com/matrix-org/synapse/issues/10038))
|
||||
- Reduce memory usage when joining very large rooms over federation. ([\#9958](https://github.com/matrix-org/synapse/issues/9958))
|
||||
- Add a configuration option which allows enabling opentracing by user id. ([\#9978](https://github.com/matrix-org/synapse/issues/9978))
|
||||
- Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. ([\#10011](https://github.com/matrix-org/synapse/issues/10011))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. ([\#9991](https://github.com/matrix-org/synapse/issues/9991))
|
||||
- Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. ([\#9995](https://github.com/matrix-org/synapse/issues/9995))
|
||||
- Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. ([\#10002](https://github.com/matrix-org/synapse/issues/10002))
|
||||
- Fixed deletion of new presence stream states from database. ([\#10014](https://github.com/matrix-org/synapse/issues/10014), [\#10033](https://github.com/matrix-org/synapse/issues/10033))
|
||||
- Fixed a bug with very high resolution image uploads throwing internal server errors. ([\#10029](https://github.com/matrix-org/synapse/issues/10029))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. ([\#10045](https://github.com/matrix-org/synapse/issues/10045))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. ([\#9803](https://github.com/matrix-org/synapse/issues/9803))
|
||||
- Clarify documentation around SSO mapping providers generating unique IDs and localparts. ([\#9980](https://github.com/matrix-org/synapse/issues/9980))
|
||||
- Updates to the PostgreSQL documentation (`postgres.md`). ([\#9988](https://github.com/matrix-org/synapse/issues/9988), [\#9989](https://github.com/matrix-org/synapse/issues/9989))
|
||||
- Fix broken link in user directory documentation. Contributed by @junquera. ([\#10016](https://github.com/matrix-org/synapse/issues/10016))
|
||||
- Add missing room state entry to the table of contents of room admin API. ([\#10043](https://github.com/matrix-org/synapse/issues/10043))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. ([\#9280](https://github.com/matrix-org/synapse/issues/9280))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. ([\#9823](https://github.com/matrix-org/synapse/issues/9823))
|
||||
- Update comments in the space summary handler. ([\#9974](https://github.com/matrix-org/synapse/issues/9974))
|
||||
- Minor enhancements to the `@cachedList` descriptor. ([\#9975](https://github.com/matrix-org/synapse/issues/9975))
|
||||
- Split multipart email sending into a dedicated handler. ([\#9977](https://github.com/matrix-org/synapse/issues/9977))
|
||||
- Run `black` on files in the `scripts` directory. ([\#9981](https://github.com/matrix-org/synapse/issues/9981))
|
||||
- Add missing type hints to `synapse.util` module. ([\#9982](https://github.com/matrix-org/synapse/issues/9982))
|
||||
- Simplify a few helper functions. ([\#9984](https://github.com/matrix-org/synapse/issues/9984), [\#9985](https://github.com/matrix-org/synapse/issues/9985), [\#9986](https://github.com/matrix-org/synapse/issues/9986))
|
||||
- Remove unnecessary property from SQLBaseStore. ([\#9987](https://github.com/matrix-org/synapse/issues/9987))
|
||||
- Remove `keylen` param on `LruCache`. ([\#9993](https://github.com/matrix-org/synapse/issues/9993))
|
||||
- Update the Grafana dashboard in `contrib/`. ([\#10001](https://github.com/matrix-org/synapse/issues/10001))
|
||||
- Add a batching queue implementation. ([\#10017](https://github.com/matrix-org/synapse/issues/10017))
|
||||
- Reduce memory usage when verifying signatures on large numbers of events at once. ([\#10018](https://github.com/matrix-org/synapse/issues/10018))
|
||||
- Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). ([\#10036](https://github.com/matrix-org/synapse/issues/10036))
|
||||
- Fix running complement tests with Synapse workers. ([\#10039](https://github.com/matrix-org/synapse/issues/10039))
|
||||
- Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. ([\#10050](https://github.com/matrix-org/synapse/issues/10050))
|
||||
|
||||
|
||||
Synapse 1.34.0 (2021-05-17)
|
||||
===========================
|
||||
|
||||
This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting.
|
||||
|
||||
This release also deprecates the `POST /_synapse/admin/v1/rooms/<room_id>/delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/<room_id>` route instead.
|
||||
|
||||
|
||||
No significant changes since v1.34.0rc1.
|
||||
|
||||
|
||||
Synapse 1.34.0rc1 (2021-05-12)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
@@ -174,7 +371,7 @@ Synapse 1.32.1 (2021-04-21)
|
||||
===========================
|
||||
|
||||
This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
in Synapse 1.32.0 that caused connected Prometheus instances to become unstable.
|
||||
in Synapse 1.32.0 that caused connected Prometheus instances to become unstable.
|
||||
|
||||
However, as this release is still subject to the `LoggingContext` change in 1.32.0,
|
||||
it is recommended to remain on or downgrade to 1.31.0.
|
||||
@@ -190,11 +387,11 @@ Synapse 1.32.0 (2021-04-20)
|
||||
|
||||
**Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853)
|
||||
that can overwhelm connected Prometheus instances. This issue was not present in
|
||||
1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and
|
||||
1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and
|
||||
follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183)
|
||||
to clean up any excess writeahead logs.
|
||||
|
||||
**Note:** This release also mistakenly included a change that may affected Synapse
|
||||
**Note:** This release also mistakenly included a change that may affected Synapse
|
||||
modules that import `synapse.logging.context.LoggingContext`, such as
|
||||
[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider).
|
||||
This will be fixed in a later Synapse version.
|
||||
@@ -205,8 +402,8 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/<user_id>` adm
|
||||
|
||||
This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date.
|
||||
|
||||
If you are using the `packages.matrix.org` Debian repository for Synapse packages,
|
||||
note that we have recently updated the expiry date on the gpg signing key. If you see an
|
||||
If you are using the `packages.matrix.org` Debian repository for Synapse packages,
|
||||
note that we have recently updated the expiry date on the gpg signing key. If you see an
|
||||
error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you
|
||||
will need to get a fresh copy of the keys. You can do so with:
|
||||
|
||||
|
||||
12
INSTALL.md
12
INSTALL.md
@@ -399,11 +399,9 @@ Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
### Using PostgreSQL
|
||||
|
||||
By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience.
|
||||
SQLite is only recommended in Synapse for testing purposes or for servers with
|
||||
very light workloads.
|
||||
|
||||
Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include:
|
||||
By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
|
||||
performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
|
||||
instead. Advantages include:
|
||||
|
||||
- significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
@@ -412,6 +410,10 @@ Almost all installations should opt to use [PostgreSQL](https://www.postgresql.o
|
||||
For information on how to install and use PostgreSQL in Synapse, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
SQLite is only acceptable for testing purposes. SQLite should not be used in
|
||||
a production server. Synapse will perform poorly when using
|
||||
SQLite, especially when participating in large rooms.
|
||||
|
||||
### TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
|
||||
@@ -40,6 +40,7 @@ exclude mypy.ini
|
||||
exclude sytest-blacklist
|
||||
exclude test_postgresql.sh
|
||||
|
||||
include book.toml
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
|
||||
46
README.rst
46
README.rst
@@ -149,21 +149,45 @@ For details on having Synapse manage your federation TLS certificates
|
||||
automatically, please see `<docs/ACME.md>`_.
|
||||
|
||||
|
||||
Security Note
|
||||
Security note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
Matrix serves raw, user-supplied data in some APIs -- specifically the `content
|
||||
repository endpoints`_.
|
||||
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||
server on the same domain.
|
||||
.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid
|
||||
|
||||
See https://github.com/vector-im/riot-web/issues/1977 and
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||
Whilst we make a reasonable effort to mitigate against XSS attacks (for
|
||||
instance, by using `CSP`_), a Matrix homeserver should not be hosted on a
|
||||
domain hosting other web applications. This especially applies to sharing
|
||||
the domain with Matrix web clients and other sensitive applications like
|
||||
webmail. See
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more
|
||||
information.
|
||||
|
||||
.. _CSP: https://github.com/matrix-org/synapse/pull/1021
|
||||
|
||||
Ideally, the homeserver should not simply be on a different subdomain, but on
|
||||
a completely different `registered domain`_ (also known as top-level site or
|
||||
eTLD+1). This is because `some attacks`_ are still possible as long as the two
|
||||
applications share the same registered domain.
|
||||
|
||||
.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3
|
||||
|
||||
.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie
|
||||
|
||||
To illustrate this with an example, if your Element Web or other sensitive web
|
||||
application is hosted on ``A.example1.com``, you should ideally host Synapse on
|
||||
``example2.com``. Some amount of protection is offered by hosting on
|
||||
``B.example1.com`` instead, so this is also acceptable in some scenarios.
|
||||
However, you should *not* host your Synapse on ``A.example1.com``.
|
||||
|
||||
Note that all of the above refers exclusively to the domain used in Synapse's
|
||||
``public_baseurl`` setting. In particular, it has no bearing on the domain
|
||||
mentioned in MXIDs hosted on that server.
|
||||
|
||||
Following this advice ensures that even if an XSS is found in Synapse, the
|
||||
impact to other applications will be minimal.
|
||||
|
||||
|
||||
Upgrading an existing Synapse
|
||||
|
||||
11
UPGRADE.rst
11
UPGRADE.rst
@@ -88,7 +88,7 @@ for example:
|
||||
Upgrading to v1.34.0
|
||||
====================
|
||||
|
||||
`room_invite_state_types` configuration setting
|
||||
``room_invite_state_types`` configuration setting
|
||||
-----------------------------------------------
|
||||
|
||||
The ``room_invite_state_types`` configuration setting has been deprecated and
|
||||
@@ -106,13 +106,10 @@ remove it from your configuration file. The default value used to be:
|
||||
- "m.room.encryption"
|
||||
- "m.room.name"
|
||||
|
||||
If you have customised this value by adding addition state types, you should
|
||||
remove ``room_invite_state_types`` and configure ``additional_event_types`` with
|
||||
your customisations.
|
||||
If you have customised this value, you should remove ``room_invite_state_types`` and
|
||||
configure ``room_prejoin_state`` instead.
|
||||
|
||||
|
||||
If you have customised this value by removing state types, you should rename
|
||||
``room_invite_state_types`` to ``additional_event_types``, and set
|
||||
``disable_default_event_types`` to ``true``.
|
||||
|
||||
Upgrading to v1.33.0
|
||||
====================
|
||||
|
||||
39
book.toml
Normal file
39
book.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
# Documentation for possible options in this file is at
|
||||
# https://rust-lang.github.io/mdBook/format/config.html
|
||||
[book]
|
||||
title = "Synapse"
|
||||
authors = ["The Matrix.org Foundation C.I.C."]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
|
||||
# The directory that documentation files are stored in
|
||||
src = "docs"
|
||||
|
||||
[build]
|
||||
# Prevent markdown pages from being automatically generated when they're
|
||||
# linked to in SUMMARY.md
|
||||
create-missing = false
|
||||
|
||||
[output.html]
|
||||
# The URL visitors will be directed to when they try to edit a page
|
||||
edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}"
|
||||
|
||||
# Remove the numbers that appear before each item in the sidebar, as they can
|
||||
# get quite messy as we nest deeper
|
||||
no-section-label = true
|
||||
|
||||
# The source code URL of the repository
|
||||
git-repository-url = "https://github.com/matrix-org/synapse"
|
||||
|
||||
# The path that the docs are hosted on
|
||||
site-url = "/synapse/"
|
||||
|
||||
# Additional HTML, JS, CSS that's injected into each page of the book.
|
||||
# More information available in docs/website_files/README.md
|
||||
additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
File diff suppressed because it is too large
Load Diff
71
contrib/systemd/override-hardened.conf
Normal file
71
contrib/systemd/override-hardened.conf
Normal file
@@ -0,0 +1,71 @@
|
||||
[Service]
|
||||
# The following directives give the synapse service R/W access to:
|
||||
# - /run/matrix-synapse
|
||||
# - /var/lib/matrix-synapse
|
||||
# - /var/log/matrix-synapse
|
||||
|
||||
RuntimeDirectory=matrix-synapse
|
||||
StateDirectory=matrix-synapse
|
||||
LogsDirectory=matrix-synapse
|
||||
|
||||
######################
|
||||
## Security Sandbox ##
|
||||
######################
|
||||
|
||||
# Make sure that the service has its own unshared tmpfs at /tmp and that it
|
||||
# cannot see or change any real devices
|
||||
PrivateTmp=true
|
||||
PrivateDevices=true
|
||||
|
||||
# We give no capabilities to a service by default
|
||||
CapabilityBoundingSet=
|
||||
AmbientCapabilities=
|
||||
|
||||
# Protect the following from modification:
|
||||
# - The entire filesystem
|
||||
# - sysctl settings and loaded kernel modules
|
||||
# - No modifications allowed to Control Groups
|
||||
# - Hostname
|
||||
# - System Clock
|
||||
ProtectSystem=strict
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
ProtectControlGroups=true
|
||||
ProtectClock=true
|
||||
ProtectHostname=true
|
||||
|
||||
# Prevent access to the following:
|
||||
# - /home directory
|
||||
# - Kernel logs
|
||||
ProtectHome=tmpfs
|
||||
ProtectKernelLogs=true
|
||||
|
||||
# Make sure that the process can only see PIDs and process details of itself,
|
||||
# and the second option disables seeing details of things like system load and
|
||||
# I/O etc
|
||||
ProtectProc=invisible
|
||||
ProcSubset=pid
|
||||
|
||||
# While not needed, we set these options explicitly
|
||||
# - This process has been given access to the host network
|
||||
# - It can also communicate with any IP Address
|
||||
PrivateNetwork=false
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
IPAddressAllow=any
|
||||
|
||||
# Restrict system calls to a sane bunch
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallFilter=~@privileged @resources @obsolete
|
||||
|
||||
# Misc restrictions
|
||||
# - Since the process is a python process it needs to be able to write and
|
||||
# execute memory regions, so we set MemoryDenyWriteExecute to false
|
||||
RestrictSUIDSGID=true
|
||||
RemoveIPC=true
|
||||
NoNewPrivileges=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
LockPersonality=true
|
||||
PrivateUsers=true
|
||||
MemoryDenyWriteExecute=false
|
||||
24
debian/changelog
vendored
24
debian/changelog
vendored
@@ -1,3 +1,27 @@
|
||||
matrix-synapse-py3 (1.36.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.36.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Jun 2021 15:41:53 +0100
|
||||
|
||||
matrix-synapse-py3 (1.35.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.35.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 03 Jun 2021 08:11:29 -0400
|
||||
|
||||
matrix-synapse-py3 (1.35.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.35.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Jun 2021 13:23:35 +0100
|
||||
|
||||
matrix-synapse-py3 (1.34.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.34.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 17 May 2021 11:34:18 +0100
|
||||
|
||||
matrix-synapse-py3 (1.33.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.33.2.
|
||||
|
||||
@@ -226,4 +226,4 @@ healthcheck:
|
||||
## Using jemalloc
|
||||
|
||||
Jemalloc is embedded in the image and will be used instead of the default allocator.
|
||||
You can read about jemalloc by reading the Synapse [README](../README.md).
|
||||
You can read about jemalloc by reading the Synapse [README](../README.rst).
|
||||
|
||||
@@ -9,10 +9,11 @@ formatters:
|
||||
{% endif %}
|
||||
|
||||
handlers:
|
||||
{% if LOG_FILE_PATH %}
|
||||
file:
|
||||
class: logging.handlers.TimedRotatingFileHandler
|
||||
formatter: precise
|
||||
filename: {{ LOG_FILE_PATH or "homeserver.log" }}
|
||||
filename: {{ LOG_FILE_PATH }}
|
||||
when: "midnight"
|
||||
backupCount: 6 # Does not include the current log file.
|
||||
encoding: utf8
|
||||
@@ -29,6 +30,7 @@ handlers:
|
||||
# be written to disk.
|
||||
capacity: 10
|
||||
flushLevel: 30 # Flush for WARNING logs as well
|
||||
{% endif %}
|
||||
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
|
||||
@@ -184,18 +184,18 @@ stderr_logfile_maxbytes=0
|
||||
"""
|
||||
|
||||
NGINX_LOCATION_CONFIG_BLOCK = """
|
||||
location ~* {endpoint} {
|
||||
location ~* {endpoint} {{
|
||||
proxy_pass {upstream};
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
}}
|
||||
"""
|
||||
|
||||
NGINX_UPSTREAM_CONFIG_BLOCK = """
|
||||
upstream {upstream_worker_type} {
|
||||
upstream {upstream_worker_type} {{
|
||||
{body}
|
||||
}
|
||||
}}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
@@ -1,31 +1,37 @@
|
||||
# Overview
|
||||
Captcha can be enabled for this home server. This file explains how to do that.
|
||||
The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google.
|
||||
A captcha can be enabled on your homeserver to help prevent bots from registering
|
||||
accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys
|
||||
from Google.
|
||||
|
||||
## Getting keys
|
||||
|
||||
Requires a site/secret key pair from:
|
||||
|
||||
<https://developers.google.com/recaptcha/>
|
||||
|
||||
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
|
||||
|
||||
## Setting ReCaptcha Keys
|
||||
|
||||
The keys are a config option on the home server config. If they are not
|
||||
visible, you can generate them via `--generate-config`. Set the following value:
|
||||
## Getting API keys
|
||||
|
||||
1. Create a new site at <https://www.google.com/recaptcha/admin/create>
|
||||
1. Set the label to anything you want
|
||||
1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option.
|
||||
This is the only type of captcha that works with Synapse.
|
||||
1. Add the public hostname for your server, as set in `public_baseurl`
|
||||
in `homeserver.yaml`, to the list of authorized domains. If you have not set
|
||||
`public_baseurl`, use `server_name`.
|
||||
1. Agree to the terms of service and submit.
|
||||
1. Copy your site key and secret key and add them to your `homeserver.yaml`
|
||||
configuration file
|
||||
```
|
||||
recaptcha_public_key: YOUR_SITE_KEY
|
||||
recaptcha_private_key: YOUR_SECRET_KEY
|
||||
|
||||
In addition, you MUST enable captchas via:
|
||||
|
||||
```
|
||||
1. Enable the CAPTCHA for new registrations
|
||||
```
|
||||
enable_registration_captcha: true
|
||||
```
|
||||
1. Go to the settings page for the CAPTCHA you just created
|
||||
1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the
|
||||
captcha can be displayed in any client. If you do not disable this option then you
|
||||
must specify the domains of every client that is allowed to display the CAPTCHA.
|
||||
|
||||
## Configuring IP used for auth
|
||||
|
||||
The ReCaptcha API requires that the IP address of the user who solved the
|
||||
captcha is sent. If the client is connecting through a proxy or load balancer,
|
||||
The reCAPTCHA API requires that the IP address of the user who solved the
|
||||
CAPTCHA is sent. If the client is connecting through a proxy or load balancer,
|
||||
it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin
|
||||
IP address. This can be configured using the `x_forwarded` directive in the
|
||||
listeners section of the homeserver.yaml configuration file.
|
||||
listeners section of the `homeserver.yaml` configuration file.
|
||||
|
||||
@@ -1,7 +1,72 @@
|
||||
# Synapse Documentation
|
||||
|
||||
This directory contains documentation specific to the `synapse` homeserver.
|
||||
**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).**
|
||||
Please update any links to point to the new website instead.
|
||||
|
||||
All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc)
|
||||
## About
|
||||
|
||||
(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.)
|
||||
This directory currently holds a series of markdown files documenting how to install, use
|
||||
and develop Synapse, the reference Matrix homeserver. The documentation is readable directly
|
||||
from this repository, but it is recommended to instead browse through the
|
||||
[website](https://matrix-org.github.io/synapse) for easier discoverability.
|
||||
|
||||
## Adding to the documentation
|
||||
|
||||
Most of the documentation currently exists as top-level files, as when organising them into
|
||||
a structured website, these files were kept in place so that existing links would not break.
|
||||
The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development`
|
||||
etc. **All new documentation files should be placed in structured folders.** For example:
|
||||
|
||||
To create a new user-facing documentation page about a new Single Sign-On protocol named
|
||||
"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md".
|
||||
This file might fit into the documentation structure at:
|
||||
|
||||
- Usage
|
||||
- Configuration
|
||||
- User Authentication
|
||||
- Single Sign-On
|
||||
- **My Cool Protocol**
|
||||
|
||||
Given that, one would place the new file under
|
||||
`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`.
|
||||
|
||||
Note that the structure of the documentation (and thus the left sidebar on the website) is determined
|
||||
by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new
|
||||
line linking to the new documentation file:
|
||||
|
||||
```markdown
|
||||
- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md)
|
||||
```
|
||||
|
||||
## Building the documentation
|
||||
|
||||
The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the
|
||||
documentation is determined by the structure of [SUMMARY.md](SUMMARY.md).
|
||||
|
||||
First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**,
|
||||
build the documentation with:
|
||||
|
||||
```sh
|
||||
mdbook build
|
||||
```
|
||||
|
||||
The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can
|
||||
browse the book by opening `book/index.html` in a web browser.
|
||||
|
||||
You can also have mdbook host the docs on a local webserver with hot-reload functionality via:
|
||||
|
||||
```sh
|
||||
mdbook serve
|
||||
```
|
||||
|
||||
The URL at which the docs can be viewed at will be logged.
|
||||
|
||||
## Configuration and theming
|
||||
|
||||
The look and behaviour of the website is configured by the [book.toml](../book.toml) file
|
||||
at the root of the repository. See
|
||||
[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
for available options.
|
||||
|
||||
The site can be themed and additionally extended with extra UI and features. See
|
||||
[website_files/README.md](website_files/README.md) for details.
|
||||
|
||||
87
docs/SUMMARY.md
Normal file
87
docs/SUMMARY.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Summary
|
||||
|
||||
# Introduction
|
||||
- [Welcome and Overview](welcome_and_overview.md)
|
||||
|
||||
# Setup
|
||||
- [Installation](setup/installation.md)
|
||||
- [Using Postgres](postgres.md)
|
||||
- [Configuring a Reverse Proxy](reverse_proxy.md)
|
||||
- [Configuring a Turn Server](turn-howto.md)
|
||||
- [Delegation](delegate.md)
|
||||
|
||||
# Upgrading
|
||||
- [Upgrading between Synapse Versions](upgrading/README.md)
|
||||
- [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md)
|
||||
|
||||
# Usage
|
||||
- [Federation](federate.md)
|
||||
- [Configuration](usage/configuration/README.md)
|
||||
- [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md)
|
||||
- [Logging Sample Config File](usage/configuration/logging_sample_config.md)
|
||||
- [Structured Logging](structured_logging.md)
|
||||
- [User Authentication](usage/configuration/user_authentication/README.md)
|
||||
- [Single-Sign On]()
|
||||
- [OpenID Connect](openid.md)
|
||||
- [SAML]()
|
||||
- [CAS]()
|
||||
- [SSO Mapping Providers](sso_mapping_providers.md)
|
||||
- [Password Auth Providers](password_auth_providers.md)
|
||||
- [JSON Web Tokens](jwt.md)
|
||||
- [Registration Captcha](CAPTCHA_SETUP.md)
|
||||
- [Application Services](application_services.md)
|
||||
- [Server Notices](server_notices.md)
|
||||
- [Consent Tracking](consent_tracking.md)
|
||||
- [URL Previews](url_previews.md)
|
||||
- [User Directory](user_directory.md)
|
||||
- [Message Retention Policies](message_retention_policies.md)
|
||||
- [Pluggable Modules]()
|
||||
- [Third Party Rules]()
|
||||
- [Spam Checker](spam_checker.md)
|
||||
- [Presence Router](presence_router_module.md)
|
||||
- [Media Storage Providers]()
|
||||
- [Workers](workers.md)
|
||||
- [Using `synctl` with Workers](synctl_workers.md)
|
||||
- [Systemd](systemd-with-workers/README.md)
|
||||
- [Administration](usage/administration/README.md)
|
||||
- [Admin API](usage/administration/admin_api/README.md)
|
||||
- [Account Validity](admin_api/account_validity.md)
|
||||
- [Delete Group](admin_api/delete_group.md)
|
||||
- [Event Reports](admin_api/event_reports.md)
|
||||
- [Media](admin_api/media_admin_api.md)
|
||||
- [Purge History](admin_api/purge_history_api.md)
|
||||
- [Purge Rooms](admin_api/purge_room.md)
|
||||
- [Register Users](admin_api/register_api.md)
|
||||
- [Manipulate Room Membership](admin_api/room_membership.md)
|
||||
- [Rooms](admin_api/rooms.md)
|
||||
- [Server Notices](admin_api/server_notices.md)
|
||||
- [Shutdown Room](admin_api/shutdown_room.md)
|
||||
- [Statistics](admin_api/statistics.md)
|
||||
- [Users](admin_api/user_admin_api.md)
|
||||
- [Server Version](admin_api/version_api.md)
|
||||
- [Manhole](manhole.md)
|
||||
- [Monitoring](metrics-howto.md)
|
||||
- [Scripts]()
|
||||
|
||||
# Development
|
||||
- [Contributing Guide](development/contributing_guide.md)
|
||||
- [Code Style](code_style.md)
|
||||
- [Git Usage](dev/git.md)
|
||||
- [Testing]()
|
||||
- [OpenTracing](opentracing.md)
|
||||
- [Synapse Architecture]()
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
- [Internal Documentation](development/internal_documentation/README.md)
|
||||
- [Single Sign-On]()
|
||||
- [SAML](dev/saml.md)
|
||||
- [CAS](dev/cas.md)
|
||||
- [State Resolution]()
|
||||
- [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md)
|
||||
- [Media Repository](media_repository.md)
|
||||
- [Room and User Statistics](room_and_user_statistics.md)
|
||||
- [Scripts]()
|
||||
|
||||
# Other
|
||||
- [Dependency Deprecation Policy](deprecation_policy.md)
|
||||
@@ -1,28 +1,14 @@
|
||||
Admin APIs
|
||||
==========
|
||||
|
||||
**Note**: The latest documentation can be viewed `here <https://matrix-org.github.io/synapse>`_.
|
||||
See `docs/README.md <../docs/README.md>`_ for more information.
|
||||
|
||||
**Please update links to point to the website instead.** Existing files in this directory
|
||||
are preserved to maintain historical links, but may be moved in the future.
|
||||
|
||||
This directory includes documentation for the various synapse specific admin
|
||||
APIs available.
|
||||
APIs available. Updates to the existing Admin API documentation should still
|
||||
be made to these files, but any new documentation files should instead be placed under
|
||||
`docs/usage/administration/admin_api <../docs/usage/administration/admin_api>`_.
|
||||
|
||||
Authenticating as a server admin
|
||||
--------------------------------
|
||||
|
||||
Many of the API calls in the admin api will require an `access_token` for a
|
||||
server admin. (Note that a server admin is distinct from a room admin.)
|
||||
|
||||
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||
|
||||
.. code-block:: sql
|
||||
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
|
||||
A new server admin user can also be created using the
|
||||
``register_new_matrix_user`` script.
|
||||
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header:
|
||||
|
||||
``curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>``
|
||||
|
||||
Fore more details, please refer to the complete `matrix spec documentation <https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens>`_.
|
||||
|
||||
42
docs/admin_api/account_validity.md
Normal file
42
docs/admin_api/account_validity.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Account validity API
|
||||
|
||||
This API allows a server administrator to manage the validity of an account. To
|
||||
use it, you must enable the account validity feature (under
|
||||
`account_validity`) in Synapse's configuration.
|
||||
|
||||
## Renew account
|
||||
|
||||
This API extends the validity of an account by as much time as configured in the
|
||||
`period` parameter from the `account_validity` configuration.
|
||||
|
||||
The API is:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/account_validity/validity
|
||||
```
|
||||
|
||||
with the following body:
|
||||
|
||||
```json
|
||||
{
|
||||
"user_id": "<user ID for the account to renew>",
|
||||
"expiration_ts": 0,
|
||||
"enable_renewal_emails": true
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
`expiration_ts` is an optional parameter and overrides the expiration date,
|
||||
which otherwise defaults to now + validity period.
|
||||
|
||||
`enable_renewal_emails` is also an optional parameter and enables/disables
|
||||
sending renewal emails to the user. Defaults to true.
|
||||
|
||||
The API returns with the new expiration date for this account, as a timestamp in
|
||||
milliseconds since epoch:
|
||||
|
||||
```json
|
||||
{
|
||||
"expiration_ts": 0
|
||||
}
|
||||
```
|
||||
@@ -1,42 +0,0 @@
|
||||
Account validity API
|
||||
====================
|
||||
|
||||
This API allows a server administrator to manage the validity of an account. To
|
||||
use it, you must enable the account validity feature (under
|
||||
``account_validity``) in Synapse's configuration.
|
||||
|
||||
Renew account
|
||||
-------------
|
||||
|
||||
This API extends the validity of an account by as much time as configured in the
|
||||
``period`` parameter from the ``account_validity`` configuration.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/account_validity/validity
|
||||
|
||||
with the following body:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"user_id": "<user ID for the account to renew>",
|
||||
"expiration_ts": 0,
|
||||
"enable_renewal_emails": true
|
||||
}
|
||||
|
||||
|
||||
``expiration_ts`` is an optional parameter and overrides the expiration date,
|
||||
which otherwise defaults to now + validity period.
|
||||
|
||||
``enable_renewal_emails`` is also an optional parameter and enables/disables
|
||||
sending renewal emails to the user. Defaults to true.
|
||||
|
||||
The API returns with the new expiration date for this account, as a timestamp in
|
||||
milliseconds since epoch:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"expiration_ts": 0
|
||||
}
|
||||
@@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/<group_id>
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
@@ -7,7 +7,7 @@ The api is:
|
||||
GET /_synapse/admin/v1/event_reports?from=0&limit=10
|
||||
```
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
@@ -75,9 +75,9 @@ The following fields are returned in the JSON response body:
|
||||
* `name`: string - The name of the room.
|
||||
* `event_id`: string - The ID of the reported event.
|
||||
* `user_id`: string - This is the user who reported the event and wrote the reason.
|
||||
* `reason`: string - Comment made by the `user_id` in this report. May be blank.
|
||||
* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`.
|
||||
* `score`: integer - Content is reported based upon a negative score, where -100 is
|
||||
"most offensive" and 0 is "inoffensive".
|
||||
"most offensive" and 0 is "inoffensive". May be `null`.
|
||||
* `sender`: string - This is the ID of the user who sent the original message/event that
|
||||
was reported.
|
||||
* `canonical_alias`: string - The canonical alias of the room. `null` if the room does not
|
||||
@@ -95,7 +95,7 @@ The api is:
|
||||
GET /_synapse/admin/v1/event_reports/<report_id>
|
||||
```
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
|
||||
@@ -4,9 +4,11 @@
|
||||
* [List all media uploaded by a user](#list-all-media-uploaded-by-a-user)
|
||||
- [Quarantine media](#quarantine-media)
|
||||
* [Quarantining media by ID](#quarantining-media-by-id)
|
||||
* [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id)
|
||||
* [Quarantining media in a room](#quarantining-media-in-a-room)
|
||||
* [Quarantining all media of a user](#quarantining-all-media-of-a-user)
|
||||
* [Protecting media from being quarantined](#protecting-media-from-being-quarantined)
|
||||
* [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined)
|
||||
- [Delete local media](#delete-local-media)
|
||||
* [Delete a specific local media](#delete-a-specific-local-media)
|
||||
* [Delete local media by date or size](#delete-local-media-by-date-or-size)
|
||||
@@ -26,7 +28,7 @@ The API is:
|
||||
GET /_synapse/admin/v1/room/<room_id>/media
|
||||
```
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
The API returns a JSON body like the following:
|
||||
```json
|
||||
@@ -76,6 +78,27 @@ Response:
|
||||
{}
|
||||
```
|
||||
|
||||
## Remove media from quarantine by ID
|
||||
|
||||
This API removes a single piece of local or remote media from quarantine.
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/unquarantine/<server_name>/<media_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `server_name` is in the form of `example.org`, and `media_id` is in the
|
||||
form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
## Quarantining media in a room
|
||||
|
||||
This API quarantines all local and remote media in a room.
|
||||
@@ -159,6 +182,26 @@ Response:
|
||||
{}
|
||||
```
|
||||
|
||||
## Unprotecting media from being quarantined
|
||||
|
||||
This API reverts the protection of a media.
|
||||
|
||||
Request:
|
||||
|
||||
```
|
||||
POST /_synapse/admin/v1/media/unprotect/<media_id>
|
||||
|
||||
{}
|
||||
```
|
||||
|
||||
Where `media_id` is in the form of `abcdefg12345...`.
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{}
|
||||
```
|
||||
|
||||
# Delete local media
|
||||
This API deletes the *local* media from the disk of your own server.
|
||||
This includes any local thumbnails and copies of media downloaded from
|
||||
@@ -268,7 +311,7 @@ The following fields are returned in the JSON response body:
|
||||
* `deleted`: integer - The number of media items successfully deleted
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
If the user re-requests purged remote media, synapse will re-request the media
|
||||
from the originating server.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
Purge History API
|
||||
=================
|
||||
# Purge History API
|
||||
|
||||
The purge history API allows server admins to purge historic events from their
|
||||
database, reclaiming disk space.
|
||||
@@ -13,10 +12,12 @@ delete the last message in a room.
|
||||
|
||||
The API is:
|
||||
|
||||
``POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]``
|
||||
```
|
||||
POST /_synapse/admin/v1/purge_history/<room_id>[/<event_id>]
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: [Admin API](../../usage/administration/admin_api)
|
||||
|
||||
By default, events sent by local users are not deleted, as they may represent
|
||||
the only copies of this content in existence. (Events sent by remote users are
|
||||
@@ -24,54 +25,54 @@ deleted.)
|
||||
|
||||
Room state data (such as joins, leaves, topic) is always preserved.
|
||||
|
||||
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||
To delete local message events as well, set `delete_local_events` in the body:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"delete_local_events": true
|
||||
}
|
||||
```
|
||||
{
|
||||
"delete_local_events": true
|
||||
}
|
||||
```
|
||||
|
||||
The caller must specify the point in the room to purge up to. This can be
|
||||
specified by including an event_id in the URI, or by setting a
|
||||
``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event
|
||||
`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event
|
||||
id is given, that event (and others at the same graph depth) will be retained.
|
||||
If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch,
|
||||
If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch,
|
||||
in milliseconds.
|
||||
|
||||
The API starts the purge running, and returns immediately with a JSON body with
|
||||
a purge id:
|
||||
|
||||
.. code:: json
|
||||
```json
|
||||
{
|
||||
"purge_id": "<opaque id>"
|
||||
}
|
||||
```
|
||||
|
||||
{
|
||||
"purge_id": "<opaque id>"
|
||||
}
|
||||
|
||||
Purge status query
|
||||
------------------
|
||||
## Purge status query
|
||||
|
||||
It is possible to poll for updates on recent purges with a second API;
|
||||
|
||||
``GET /_synapse/admin/v1/purge_history_status/<purge_id>``
|
||||
```
|
||||
GET /_synapse/admin/v1/purge_history_status/<purge_id>
|
||||
```
|
||||
|
||||
Again, you will need to authenticate by providing an ``access_token`` for a
|
||||
Again, you will need to authenticate by providing an `access_token` for a
|
||||
server admin.
|
||||
|
||||
This API returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
```json
|
||||
{
|
||||
"status": "active"
|
||||
}
|
||||
```
|
||||
|
||||
{
|
||||
"status": "active"
|
||||
}
|
||||
The status will be one of `active`, `complete`, or `failed`.
|
||||
|
||||
The status will be one of ``active``, ``complete``, or ``failed``.
|
||||
|
||||
Reclaim disk space (Postgres)
|
||||
-----------------------------
|
||||
## Reclaim disk space (Postgres)
|
||||
|
||||
To reclaim the disk space and return it to the operating system, you need to run
|
||||
`VACUUM FULL;` on the database.
|
||||
|
||||
https://www.postgresql.org/docs/current/sql-vacuum.html
|
||||
<https://www.postgresql.org/docs/current/sql-vacuum.html>
|
||||
73
docs/admin_api/register_api.md
Normal file
73
docs/admin_api/register_api.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Shared-Secret Registration
|
||||
|
||||
This API allows for the creation of users in an administrative and
|
||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
(`registration_shared_secret` in the homeserver configuration), and a
|
||||
one-time nonce. If the registration shared secret is not configured, this API
|
||||
is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API:
|
||||
|
||||
```
|
||||
> GET /_synapse/admin/v1/register
|
||||
|
||||
< {"nonce": "thisisanonce"}
|
||||
```
|
||||
|
||||
Once you have the nonce, you can make a `POST` to the same URL with a JSON
|
||||
body containing the nonce, username, password, whether they are an admin
|
||||
(optional, False by default), and a HMAC digest of the content. Also you can
|
||||
set the displayname (optional, `username` by default).
|
||||
|
||||
As an example:
|
||||
|
||||
```
|
||||
> POST /_synapse/admin/v1/register
|
||||
> {
|
||||
"nonce": "thisisanonce",
|
||||
"username": "pepper_roni",
|
||||
"displayname": "Pepper Roni",
|
||||
"password": "pizza",
|
||||
"admin": true,
|
||||
"mac": "mac_digest_here"
|
||||
}
|
||||
|
||||
< {
|
||||
"access_token": "token_here",
|
||||
"user_id": "@pepper_roni:localhost",
|
||||
"home_server": "test",
|
||||
"device_id": "device_id_here"
|
||||
}
|
||||
```
|
||||
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, either the
|
||||
string "admin" or "notadmin", and optionally the user_type
|
||||
each separated by NULs. For an example of generation in Python:
|
||||
|
||||
```python
|
||||
import hmac, hashlib
|
||||
|
||||
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(user.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
if user_type:
|
||||
mac.update(b"\x00")
|
||||
mac.update(user_type.encode('utf8'))
|
||||
|
||||
return mac.hexdigest()
|
||||
```
|
||||
@@ -1,68 +0,0 @@
|
||||
Shared-Secret Registration
|
||||
==========================
|
||||
|
||||
This API allows for the creation of users in an administrative and
|
||||
non-interactive way. This is generally used for bootstrapping a Synapse
|
||||
instance with administrator accounts.
|
||||
|
||||
To authenticate yourself to the server, you will need both the shared secret
|
||||
(``registration_shared_secret`` in the homeserver configuration), and a
|
||||
one-time nonce. If the registration shared secret is not configured, this API
|
||||
is not enabled.
|
||||
|
||||
To fetch the nonce, you need to request one from the API::
|
||||
|
||||
> GET /_synapse/admin/v1/register
|
||||
|
||||
< {"nonce": "thisisanonce"}
|
||||
|
||||
Once you have the nonce, you can make a ``POST`` to the same URL with a JSON
|
||||
body containing the nonce, username, password, whether they are an admin
|
||||
(optional, False by default), and a HMAC digest of the content. Also you can
|
||||
set the displayname (optional, ``username`` by default).
|
||||
|
||||
As an example::
|
||||
|
||||
> POST /_synapse/admin/v1/register
|
||||
> {
|
||||
"nonce": "thisisanonce",
|
||||
"username": "pepper_roni",
|
||||
"displayname": "Pepper Roni",
|
||||
"password": "pizza",
|
||||
"admin": true,
|
||||
"mac": "mac_digest_here"
|
||||
}
|
||||
|
||||
< {
|
||||
"access_token": "token_here",
|
||||
"user_id": "@pepper_roni:localhost",
|
||||
"home_server": "test",
|
||||
"device_id": "device_id_here"
|
||||
}
|
||||
|
||||
The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being
|
||||
the shared secret and the content being the nonce, user, password, either the
|
||||
string "admin" or "notadmin", and optionally the user_type
|
||||
each separated by NULs. For an example of generation in Python::
|
||||
|
||||
import hmac, hashlib
|
||||
|
||||
def generate_mac(nonce, user, password, admin=False, user_type=None):
|
||||
|
||||
mac = hmac.new(
|
||||
key=shared_secret,
|
||||
digestmod=hashlib.sha1,
|
||||
)
|
||||
|
||||
mac.update(nonce.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(user.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(password.encode('utf8'))
|
||||
mac.update(b"\x00")
|
||||
mac.update(b"admin" if admin else b"notadmin")
|
||||
if user_type:
|
||||
mac.update(b"\x00")
|
||||
mac.update(user_type.encode('utf8'))
|
||||
|
||||
return mac.hexdigest()
|
||||
@@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/<room_id_or_alias>
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
Response:
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
* [Usage](#usage)
|
||||
- [Room Details API](#room-details-api)
|
||||
- [Room Members API](#room-members-api)
|
||||
- [Room State API](#room-state-api)
|
||||
- [Delete Room API](#delete-room-api)
|
||||
* [Parameters](#parameters-1)
|
||||
* [Response](#response)
|
||||
@@ -442,7 +443,7 @@ with a body of:
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see [README.rst](README.rst).
|
||||
server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ GET /_synapse/admin/v1/statistics/users/media
|
||||
```
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [README.rst](README.rst).
|
||||
for a server admin: see [Admin API](../../usage/administration/admin_api).
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
|
||||
1001
docs/admin_api/user_admin_api.md
Normal file
1001
docs/admin_api/user_admin_api.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,981 +0,0 @@
|
||||
.. contents::
|
||||
|
||||
Query User Account
|
||||
==================
|
||||
|
||||
This API returns information about a specific user account.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v2/users/<user_id>
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"displayname": "User",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": 0,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"password_hash": "$2b$12$p9B4GkqYdRTPGD",
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null
|
||||
}
|
||||
|
||||
URL parameters:
|
||||
|
||||
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
|
||||
|
||||
Create or modify Account
|
||||
========================
|
||||
|
||||
This API allows an administrator to create or modify a user account with a
|
||||
specific ``user_id``.
|
||||
|
||||
This api is::
|
||||
|
||||
PUT /_synapse/admin/v2/users/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"password": "user_password",
|
||||
"displayname": "User",
|
||||
"threepids": [
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_1>"
|
||||
},
|
||||
{
|
||||
"medium": "email",
|
||||
"address": "<user_mail_2>"
|
||||
}
|
||||
],
|
||||
"avatar_url": "<avatar_url>",
|
||||
"admin": false,
|
||||
"deactivated": false
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
URL parameters:
|
||||
|
||||
- ``user_id``: fully-qualified user id: for example, ``@user:server.com``.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- ``password``, optional. If provided, the user's password is updated and all
|
||||
devices are logged out.
|
||||
|
||||
- ``displayname``, optional, defaults to the value of ``user_id``.
|
||||
|
||||
- ``threepids``, optional, allows setting the third-party IDs (email, msisdn)
|
||||
belonging to a user.
|
||||
|
||||
- ``avatar_url``, optional, must be a
|
||||
`MXC URI <https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris>`_.
|
||||
|
||||
- ``admin``, optional, defaults to ``false``.
|
||||
|
||||
- ``deactivated``, optional. If unspecified, deactivation state will be left
|
||||
unchanged on existing accounts and set to ``false`` for new accounts.
|
||||
A user cannot be erased by deactivating with this API. For details on deactivating users see
|
||||
`Deactivate Account <#deactivate-account>`_.
|
||||
|
||||
If the user already exists then optional parameters default to the current value.
|
||||
|
||||
In order to re-activate an account ``deactivated`` must be set to ``false``. If
|
||||
users do not login via single-sign-on, a new ``password`` must be provided.
|
||||
|
||||
List Accounts
|
||||
=============
|
||||
|
||||
This API returns all local user accounts.
|
||||
By default, the response is ordered by ascending user ID.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"name": "<user_id1>",
|
||||
"is_guest": 0,
|
||||
"admin": 0,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"is_guest": 0,
|
||||
"admin": 1,
|
||||
"user_type": null,
|
||||
"deactivated": 0,
|
||||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>"
|
||||
}
|
||||
],
|
||||
"next_token": "100",
|
||||
"total": 200
|
||||
}
|
||||
|
||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
|
||||
If the endpoint does not return a ``next_token`` then there are no more users
|
||||
to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - Is optional and filters to only return users with user IDs
|
||||
that contain this value. This parameter is ignored when using the ``name`` parameter.
|
||||
- ``name`` - Is optional and filters to only return users with user ID localparts
|
||||
**or** displaynames that contain this value.
|
||||
- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users.
|
||||
Defaults to ``true`` to include guest users.
|
||||
- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users.
|
||||
Defaults to ``false`` to exclude deactivated users.
|
||||
- ``limit`` - string representing a positive integer - Is optional but is used for pagination,
|
||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
||||
- ``from`` - string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of users.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``name``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``name`` - Users are ordered alphabetically by ``name``. This is the default.
|
||||
- ``is_guest`` - Users are ordered by ``is_guest`` status.
|
||||
- ``admin`` - Users are ordered by ``admin`` status.
|
||||
- ``user_type`` - Users are ordered alphabetically by ``user_type``.
|
||||
- ``deactivated`` - Users are ordered by ``deactivated`` status.
|
||||
- ``shadow_banned`` - Users are ordered by ``shadow_banned`` status.
|
||||
- ``displayname`` - Users are ordered alphabetically by ``displayname``.
|
||||
- ``avatar_url`` - Users are ordered alphabetically by avatar URL.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
Caution. The database only has indexes on the columns ``name`` and ``created_ts``.
|
||||
This means that if a different sort order is used (``is_guest``, ``admin``,
|
||||
``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``),
|
||||
this can cause a large load on the database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``users`` - An array of objects, each containing information about an user.
|
||||
User objects contain the following fields:
|
||||
|
||||
- ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``).
|
||||
- ``is_guest`` - bool - Status if that user is a guest account.
|
||||
- ``admin`` - bool - Status if that user is a server administrator.
|
||||
- ``user_type`` - string - Type of the user. Normal users are type ``None``.
|
||||
This allows user type specific behaviour. There are also types ``support`` and ``bot``.
|
||||
- ``deactivated`` - bool - Status if that user has been marked as deactivated.
|
||||
- ``shadow_banned`` - bool - Status if that user has been marked as shadow banned.
|
||||
- ``displayname`` - string - The user's display name if they have set one.
|
||||
- ``avatar_url`` - string - The user's avatar URL if they have set one.
|
||||
|
||||
- ``next_token``: string representing a positive integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
|
||||
Query current sessions for a user
|
||||
=================================
|
||||
|
||||
This API returns information about the active sessions for a specific user.
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v1/whois/<user_id>
|
||||
|
||||
and::
|
||||
|
||||
GET /_matrix/client/r0/admin/whois/<userId>
|
||||
|
||||
See also: `Client Server API Whois
|
||||
<https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid>`_
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"user_id": "<user_id>",
|
||||
"devices": {
|
||||
"": {
|
||||
"sessions": [
|
||||
{
|
||||
"connections": [
|
||||
{
|
||||
"ip": "1.2.3.4",
|
||||
"last_seen": 1417222374433,
|
||||
"user_agent": "Mozilla/5.0 ..."
|
||||
},
|
||||
{
|
||||
"ip": "1.2.3.10",
|
||||
"last_seen": 1417222374500,
|
||||
"user_agent": "Dalvik/2.1.0 ..."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
``last_seen`` is measured in milliseconds since the Unix epoch.
|
||||
|
||||
Deactivate Account
|
||||
==================
|
||||
|
||||
This API deactivates an account. It removes active access tokens, resets the
|
||||
password, and deletes third-party IDs (to prevent the user requesting a
|
||||
password reset).
|
||||
|
||||
It can also mark the user as GDPR-erased. This means messages sent by the
|
||||
user will still be visible by anyone that was in the room when these messages
|
||||
were sent, but hidden from users joining the room afterwards.
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_synapse/admin/v1/deactivate/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"erase": true
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The erase parameter is optional and defaults to ``false``.
|
||||
An empty body may be passed for backwards compatibility.
|
||||
|
||||
The following actions are performed when deactivating an user:
|
||||
|
||||
- Try to unpind 3PIDs from the identity server
|
||||
- Remove all 3PIDs from the homeserver
|
||||
- Delete all devices and E2EE keys
|
||||
- Delete all access tokens
|
||||
- Delete the password hash
|
||||
- Removal from all rooms the user is a member of
|
||||
- Remove the user from the user directory
|
||||
- Reject all pending invites
|
||||
- Remove all account validity information related to the user
|
||||
|
||||
The following additional actions are performed during deactivation if ``erase``
|
||||
is set to ``true``:
|
||||
|
||||
- Remove the user's display name
|
||||
- Remove the user's avatar URL
|
||||
- Mark the user as erased
|
||||
|
||||
|
||||
Reset password
|
||||
==============
|
||||
|
||||
Changes the password of another user. This will automatically log the user out of all their devices.
|
||||
|
||||
The api is::
|
||||
|
||||
POST /_synapse/admin/v1/reset_password/<user_id>
|
||||
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"new_password": "<secret>",
|
||||
"logout_devices": true
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
The parameter ``new_password`` is required.
|
||||
The parameter ``logout_devices`` is optional and defaults to ``true``.
|
||||
|
||||
Get whether a user is a server administrator or not
|
||||
===================================================
|
||||
|
||||
|
||||
The api is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/admin
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"admin": true
|
||||
}
|
||||
|
||||
|
||||
Change whether a user is a server administrator or not
|
||||
======================================================
|
||||
|
||||
Note that you cannot demote yourself.
|
||||
|
||||
The api is::
|
||||
|
||||
PUT /_synapse/admin/v1/users/<user_id>/admin
|
||||
|
||||
with a body of:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"admin": true
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
|
||||
List room memberships of an user
|
||||
================================
|
||||
Gets a list of all ``room_id`` that a specific ``user_id`` is member.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/joined_rooms
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"joined_rooms": [
|
||||
"!DuGcnbhHGaSZQoNQR:matrix.org",
|
||||
"!ZtSaPCawyWtxfWiIy:matrix.org"
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
|
||||
The server returns the list of rooms of which the user and the server
|
||||
are member. If the user is local, all the rooms of which the user is
|
||||
member are returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``joined_rooms`` - An array of ``room_id``.
|
||||
- ``total`` - Number of rooms.
|
||||
|
||||
|
||||
List media of a user
|
||||
====================
|
||||
Gets a list of all local media that a specific ``user_id`` has created.
|
||||
By default, the response is ordered by descending creation date and ascending media ID.
|
||||
The newest media is on top. You can change the order with parameters
|
||||
``order_by`` and ``dir``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/media
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"media": [
|
||||
{
|
||||
"created_ts": 100400,
|
||||
"last_access_ts": null,
|
||||
"media_id": "qXhyRzulkwLsNHTbpHreuEgo",
|
||||
"media_length": 67,
|
||||
"media_type": "image/png",
|
||||
"quarantined_by": null,
|
||||
"safe_from_quarantine": false,
|
||||
"upload_name": "test1.png"
|
||||
},
|
||||
{
|
||||
"created_ts": 200400,
|
||||
"last_access_ts": null,
|
||||
"media_id": "FHfiSnzoINDatrXHQIXBtahw",
|
||||
"media_length": 67,
|
||||
"media_type": "image/png",
|
||||
"quarantined_by": null,
|
||||
"safe_from_quarantine": false,
|
||||
"upload_name": "test2.png"
|
||||
}
|
||||
],
|
||||
"next_token": 3,
|
||||
"total": 2
|
||||
}
|
||||
|
||||
To paginate, check for ``next_token`` and if present, call the endpoint again
|
||||
with ``from`` set to the value of ``next_token``. This will return a new page.
|
||||
|
||||
If the endpoint does not return a ``next_token`` then there are no more
|
||||
reports to paginate through.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - string - fully qualified: for example, ``@user:server.com``.
|
||||
- ``limit``: string representing a positive integer - Is optional but is used for pagination,
|
||||
denoting the maximum number of items to return in this call. Defaults to ``100``.
|
||||
- ``from``: string representing a positive integer - Is optional but used for pagination,
|
||||
denoting the offset in the returned results. This should be treated as an opaque value and
|
||||
not explicitly set to anything other than the return value of ``next_token`` from a previous call.
|
||||
Defaults to ``0``.
|
||||
- ``order_by`` - The method by which to sort the returned list of media.
|
||||
If the ordered field has duplicates, the second order is always by ascending ``media_id``,
|
||||
which guarantees a stable ordering. Valid values are:
|
||||
|
||||
- ``media_id`` - Media are ordered alphabetically by ``media_id``.
|
||||
- ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with.
|
||||
- ``created_ts`` - Media are ordered by when the content was uploaded in ms.
|
||||
Smallest to largest. This is the default.
|
||||
- ``last_access_ts`` - Media are ordered by when the content was last accessed in ms.
|
||||
Smallest to largest.
|
||||
- ``media_length`` - Media are ordered by length of the media in bytes.
|
||||
Smallest to largest.
|
||||
- ``media_type`` - Media are ordered alphabetically by MIME-type.
|
||||
- ``quarantined_by`` - Media are ordered alphabetically by the user ID that
|
||||
initiated the quarantine request for this media.
|
||||
- ``safe_from_quarantine`` - Media are ordered by the status if this media is safe
|
||||
from quarantining.
|
||||
|
||||
- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards.
|
||||
Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``.
|
||||
|
||||
If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top
|
||||
(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``).
|
||||
|
||||
Caution. The database only has indexes on the columns ``media_id``,
|
||||
``user_id`` and ``created_ts``. This means that if a different sort order is used
|
||||
(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``,
|
||||
``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the
|
||||
database, especially for large environments.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``media`` - An array of objects, each containing information about a media.
|
||||
Media objects contain the following fields:
|
||||
|
||||
- ``created_ts`` - integer - Timestamp when the content was uploaded in ms.
|
||||
- ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms.
|
||||
- ``media_id`` - string - The id used to refer to the media.
|
||||
- ``media_length`` - integer - Length of the media in bytes.
|
||||
- ``media_type`` - string - The MIME-type of the media.
|
||||
- ``quarantined_by`` - string - The user ID that initiated the quarantine request
|
||||
for this media.
|
||||
|
||||
- ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining.
|
||||
- ``upload_name`` - string - The name the media was uploaded with.
|
||||
|
||||
- ``next_token``: integer - Indication for pagination. See above.
|
||||
- ``total`` - integer - Total number of media.
|
||||
|
||||
Login as a user
|
||||
===============
|
||||
|
||||
Get an access token that can be used to authenticate as that user. Useful for
|
||||
when admins wish to do actions on behalf of a user.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/login
|
||||
{}
|
||||
|
||||
An optional ``valid_until_ms`` field can be specified in the request body as an
|
||||
integer timestamp that specifies when the token should expire. By default tokens
|
||||
do not expire.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"access_token": "<opaque_access_token_string>"
|
||||
}
|
||||
|
||||
|
||||
This API does *not* generate a new device for the user, and so will not appear
|
||||
their ``/devices`` list, and in general the target user should not be able to
|
||||
tell they have been logged in as.
|
||||
|
||||
To expire the token call the standard ``/logout`` API with the token.
|
||||
|
||||
Note: The token will expire if the *admin* user calls ``/logout/all`` from any
|
||||
of their devices, but the token will *not* expire if the target user does the
|
||||
same.
|
||||
|
||||
|
||||
User devices
|
||||
============
|
||||
|
||||
List all devices
|
||||
----------------
|
||||
Gets information about all devices for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users/<user_id>/devices
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"devices": [
|
||||
{
|
||||
"device_id": "QBUAZIFURK",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
},
|
||||
{
|
||||
"device_id": "AUIECTSRND",
|
||||
"display_name": "ios",
|
||||
"last_seen_ip": "1.2.3.5",
|
||||
"last_seen_ts": 1474491775025,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
],
|
||||
"total": 2
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``devices`` - An array of objects, each containing information about a device.
|
||||
Device objects contain the following fields:
|
||||
|
||||
- ``device_id`` - Identifier of device.
|
||||
- ``display_name`` - Display name set by the user for this device.
|
||||
Absent if no name has been set.
|
||||
- ``last_seen_ip`` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- ``user_id`` - Owner of device.
|
||||
|
||||
- ``total`` - Total number of user's devices.
|
||||
|
||||
Delete multiple devices
|
||||
------------------
|
||||
Deletes the given devices for a specific ``user_id``, and invalidates
|
||||
any access token associated with them.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v2/users/<user_id>/delete_devices
|
||||
|
||||
{
|
||||
"devices": [
|
||||
"QBUAZIFURK",
|
||||
"AUIECTSRND"
|
||||
],
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
The following fields are required in the JSON request body:
|
||||
|
||||
- ``devices`` - The list of device IDs to delete.
|
||||
|
||||
Show a device
|
||||
---------------
|
||||
Gets information on a single device, by ``device_id`` for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"device_id": "<device_id>",
|
||||
"display_name": "android",
|
||||
"last_seen_ip": "1.2.3.4",
|
||||
"last_seen_ts": 1474491775024,
|
||||
"user_id": "<user_id>"
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to retrieve.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``device_id`` - Identifier of device.
|
||||
- ``display_name`` - Display name set by the user for this device.
|
||||
Absent if no name has been set.
|
||||
- ``last_seen_ip`` - The IP address where this device was last seen.
|
||||
(May be a few minutes out of date, for efficiency reasons).
|
||||
- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this
|
||||
devices was last seen. (May be a few minutes out of date, for efficiency reasons).
|
||||
- ``user_id`` - Owner of device.
|
||||
|
||||
Update a device
|
||||
---------------
|
||||
Updates the metadata on the given ``device_id`` for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
PUT /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
||||
|
||||
{
|
||||
"display_name": "My other phone"
|
||||
}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to update.
|
||||
|
||||
The following fields are required in the JSON request body:
|
||||
|
||||
- ``display_name`` - The new display name for this device. If not given,
|
||||
the display name is unchanged.
|
||||
|
||||
Delete a device
|
||||
---------------
|
||||
Deletes the given ``device_id`` for a specific ``user_id``,
|
||||
and invalidates any access token associated with it.
|
||||
|
||||
The API is::
|
||||
|
||||
DELETE /_synapse/admin/v2/users/<user_id>/devices/<device_id>
|
||||
|
||||
{}
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
- ``device_id`` - The device to delete.
|
||||
|
||||
List all pushers
|
||||
================
|
||||
Gets information about all pushers for a specific ``user_id``.
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/pushers
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"pushers": [
|
||||
{
|
||||
"app_display_name":"HTTP Push Notifications",
|
||||
"app_id":"m.http",
|
||||
"data": {
|
||||
"url":"example.com"
|
||||
},
|
||||
"device_display_name":"pushy push",
|
||||
"kind":"http",
|
||||
"lang":"None",
|
||||
"profile_tag":"",
|
||||
"pushkey":"a@example.com"
|
||||
}
|
||||
],
|
||||
"total": 1
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - fully qualified: for example, ``@user:server.com``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``pushers`` - An array containing the current pushers for the user
|
||||
|
||||
- ``app_display_name`` - string - A string that will allow the user to identify
|
||||
what application owns this pusher.
|
||||
|
||||
- ``app_id`` - string - This is a reverse-DNS style identifier for the application.
|
||||
Max length, 64 chars.
|
||||
|
||||
- ``data`` - A dictionary of information for the pusher implementation itself.
|
||||
|
||||
- ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send
|
||||
notifications to.
|
||||
|
||||
- ``format`` - string - The format to use when sending notifications to the
|
||||
Push Gateway.
|
||||
|
||||
- ``device_display_name`` - string - A string that will allow the user to identify
|
||||
what device owns this pusher.
|
||||
|
||||
- ``profile_tag`` - string - This string determines which set of device specific rules
|
||||
this pusher executes.
|
||||
|
||||
- ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes.
|
||||
- ``lang`` - string - The preferred language for receiving notifications
|
||||
(e.g. 'en' or 'en-US')
|
||||
|
||||
- ``profile_tag`` - string - This string determines which set of device specific rules
|
||||
this pusher executes.
|
||||
|
||||
- ``pushkey`` - string - This is a unique identifier for this pusher.
|
||||
Max length, 512 bytes.
|
||||
|
||||
- ``total`` - integer - Number of pushers.
|
||||
|
||||
See also `Client-Server API Spec <https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers>`_
|
||||
|
||||
Shadow-banning users
|
||||
====================
|
||||
|
||||
Shadow-banning is a useful tool for moderating malicious or egregiously abusive users.
|
||||
A shadow-banned users receives successful responses to their client-server API requests,
|
||||
but the events are not propagated into rooms. This can be an effective tool as it
|
||||
(hopefully) takes longer for the user to realise they are being moderated before
|
||||
pivoting to another account.
|
||||
|
||||
Shadow-banning a user should be used as a tool of last resort and may lead to confusing
|
||||
or broken behaviour for the client. A shadow-banned user will not receive any
|
||||
notification and it is generally more appropriate to ban or kick abusive users.
|
||||
A shadow-banned user will be unable to contact anyone on the server.
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/shadow_ban
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
Override ratelimiting for users
|
||||
===============================
|
||||
|
||||
This API allows to override or disable ratelimiting for a specific user.
|
||||
There are specific APIs to set, get and delete a ratelimit.
|
||||
|
||||
Get status of ratelimit
|
||||
-----------------------
|
||||
|
||||
The API is::
|
||||
|
||||
GET /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"messages_per_second": 0,
|
||||
"burst_count": 0
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``messages_per_second`` - integer - The number of actions that can
|
||||
be performed in a second. `0` mean that ratelimiting is disabled for this user.
|
||||
- ``burst_count`` - integer - How many actions that can be performed before
|
||||
being limited.
|
||||
|
||||
If **no** custom ratelimit is set, an empty JSON dict is returned.
|
||||
|
||||
.. code:: json
|
||||
|
||||
{}
|
||||
|
||||
Set ratelimit
|
||||
-------------
|
||||
|
||||
The API is::
|
||||
|
||||
POST /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
A response body like the following is returned:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"messages_per_second": 0,
|
||||
"burst_count": 0
|
||||
}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
Body parameters:
|
||||
|
||||
- ``messages_per_second`` - positive integer, optional. The number of actions that can
|
||||
be performed in a second. Defaults to ``0``.
|
||||
- ``burst_count`` - positive integer, optional. How many actions that can be performed
|
||||
before being limited. Defaults to ``0``.
|
||||
|
||||
To disable users' ratelimit set both values to ``0``.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are returned in the JSON response body:
|
||||
|
||||
- ``messages_per_second`` - integer - The number of actions that can
|
||||
be performed in a second.
|
||||
- ``burst_count`` - integer - How many actions that can be performed before
|
||||
being limited.
|
||||
|
||||
Delete ratelimit
|
||||
----------------
|
||||
|
||||
The API is::
|
||||
|
||||
DELETE /_synapse/admin/v1/users/<user_id>/override_ratelimit
|
||||
|
||||
To use it, you will need to authenticate by providing an ``access_token`` for a
|
||||
server admin: see `README.rst <README.rst>`_.
|
||||
|
||||
An empty JSON dict is returned.
|
||||
|
||||
.. code:: json
|
||||
|
||||
{}
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following parameters should be set in the URL:
|
||||
|
||||
- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must
|
||||
be local.
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
Version API
|
||||
===========
|
||||
# Version API
|
||||
|
||||
This API returns the running Synapse version and the Python version
|
||||
on which Synapse is being run. This is useful when a Synapse instance
|
||||
is behind a proxy that does not forward the 'Server' header (which also
|
||||
contains Synapse version information).
|
||||
|
||||
The api is::
|
||||
The api is:
|
||||
|
||||
GET /_synapse/admin/v1/server_version
|
||||
```
|
||||
GET /_synapse/admin/v1/server_version
|
||||
```
|
||||
|
||||
It returns a JSON body like the following:
|
||||
|
||||
.. code:: json
|
||||
|
||||
{
|
||||
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
|
||||
"python_version": "3.6.8"
|
||||
}
|
||||
```json
|
||||
{
|
||||
"server_version": "0.99.2rc1 (b=develop, abcdef123)",
|
||||
"python_version": "3.6.8"
|
||||
}
|
||||
```
|
||||
@@ -122,15 +122,15 @@ So, what counts as a more- or less-stable branch? A little reflection will show
|
||||
that our active branches are ordered thus, from more-stable to less-stable:
|
||||
|
||||
* `master` (tracks our last release).
|
||||
* `release-vX.Y.Z` (the branch where we prepare the next release)<sup
|
||||
* `release-vX.Y` (the branch where we prepare the next release)<sup
|
||||
id="a3">[3](#f3)</sup>.
|
||||
* PR branches which are targeting the release.
|
||||
* `develop` (our "mainline" branch containing our bleeding-edge).
|
||||
* regular PR branches.
|
||||
|
||||
The corollary is: if you have a bugfix that needs to land in both
|
||||
`release-vX.Y.Z` *and* `develop`, then you should base your PR on
|
||||
`release-vX.Y.Z`, get it merged there, and then merge from `release-vX.Y.Z` to
|
||||
`release-vX.Y` *and* `develop`, then you should base your PR on
|
||||
`release-vX.Y`, get it merged there, and then merge from `release-vX.Y` to
|
||||
`develop`. (If a fix lands in `develop` and we later need it in a
|
||||
release-branch, we can of course cherry-pick it, but landing it in the release
|
||||
branch first helps reduce the chance of annoying conflicts.)
|
||||
@@ -145,4 +145,4 @@ most intuitive name. [^](#a1)
|
||||
|
||||
<b id="f3">[3]</b>: Very, very occasionally (I think this has happened once in
|
||||
the history of Synapse), we've had two releases in flight at once. Obviously,
|
||||
`release-v1.2.3` is more-stable than `release-v1.3.0`. [^](#a3)
|
||||
`release-v1.2` is more-stable than `release-v1.3`. [^](#a3)
|
||||
|
||||
7
docs/development/contributing_guide.md
Normal file
7
docs/development/contributing_guide.md
Normal file
@@ -0,0 +1,7 @@
|
||||
<!--
|
||||
Include the contents of CONTRIBUTING.md from the project root (where GitHub likes it
|
||||
to be)
|
||||
-->
|
||||
# Contributing
|
||||
|
||||
{{#include ../../CONTRIBUTING.md}}
|
||||
12
docs/development/internal_documentation/README.md
Normal file
12
docs/development/internal_documentation/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Internal Documentation
|
||||
|
||||
This section covers implementation documentation for various parts of Synapse.
|
||||
|
||||
If a developer is planning to make a change to a feature of Synapse, it can be useful for
|
||||
general documentation of how that feature is implemented to be available. This saves the
|
||||
developer time in place of needing to understand how the feature works by reading the
|
||||
code.
|
||||
|
||||
Documentation that would be more useful for the perspective of a system administrator,
|
||||
rather than a developer who's intending to change to code, should instead be placed
|
||||
under the Usage section of the documentation.
|
||||
BIN
docs/favicon.png
Normal file
BIN
docs/favicon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.7 KiB |
58
docs/favicon.svg
Normal file
58
docs/favicon.svg
Normal file
@@ -0,0 +1,58 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
viewBox="0 0 199.7 184.2"
|
||||
version="1.1"
|
||||
id="svg62"
|
||||
sodipodi:docname="mdbook-favicon.svg"
|
||||
inkscape:version="1.0.2 (e86c870879, 2021-01-15, custom)">
|
||||
<metadata
|
||||
id="metadata68">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<defs
|
||||
id="defs66" />
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1026"
|
||||
id="namedview64"
|
||||
showgrid="false"
|
||||
inkscape:zoom="3.2245912"
|
||||
inkscape:cx="84.790185"
|
||||
inkscape:cy="117.96478"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="svg62" />
|
||||
<style
|
||||
id="style58">
|
||||
@media (prefers-color-scheme: dark) {
|
||||
svg { fill: white; }
|
||||
}
|
||||
</style>
|
||||
<path
|
||||
d="m 189.5,36.8 c 0.2,2.8 0,5.1 -0.6,6.8 L 153,162 c -0.6,2.1 -2,3.7 -4.2,5 -2.2,1.2 -4.4,1.9 -6.7,1.9 H 31.4 c -9.6,0 -15.3,-2.8 -17.3,-8.4 -0.8,-2.2 -0.8,-3.9 0.1,-5.2 0.9,-1.2 2.4,-1.8 4.6,-1.8 H 123 c 7.4,0 12.6,-1.4 15.4,-4.1 2.8,-2.7 5.7,-8.9 8.6,-18.4 L 179.9,22.4 c 1.8,-5.9 1,-11.1 -2.2,-15.6 C 174.5,2.3 169.9,0 164,0 H 72.7 c -1,0 -3.1,0.4 -6.1,1.1 L 66.7,0.7 C 64.5,0.2 62.6,0 61,0.1 c -1.6,0.1 -3,0.5 -4.3,1.4 -1.3,0.9 -2.4,1.8 -3.2,2.8 -0.8,1 -1.5,2.2 -2.3,3.8 -0.8,1.6 -1.4,3 -1.9,4.3 -0.5,1.3 -1.1,2.7 -1.8,4.2 -0.7,1.5 -1.3,2.7 -2,3.7 -0.5,0.6 -1.2,1.5 -2,2.5 -0.8,1 -1.6,2 -2.2,2.8 -0.6,0.8 -0.9,1.5 -1.1,2.2 -0.2,0.7 -0.1,1.8 0.2,3.2 0.3,1.4 0.4,2.4 0.4,3.1 -0.3,3 -1.4,6.9 -3.3,11.6 -1.9,4.7 -3.6,8.1 -5.1,10.1 -0.3,0.4 -1.2,1.3 -2.6,2.7 -1.4,1.4 -2.3,2.6 -2.6,3.7 -0.3,0.4 -0.3,1.5 -0.1,3.4 0.3,1.8 0.4,3.1 0.3,3.8 -0.3,2.7 -1.3,6.3 -3,10.8 -2.406801,6.370944 -3.4,8.2 -5,11 -0.2,0.5 -0.9,1.4 -2,2.8 -1.1,1.4 -1.8,2.5 -2,3.4 -0.2,0.6 -0.1,1.8 0.1,3.4 0.2,1.6 0.2,2.8 -0.1,3.6 -0.6,3 -1.8,6.7 -3.6,11 -1.8,4.3 -3.6,7.9 -5.4,11 -0.5,0.8 -1.1,1.7 -2,2.8 -0.8,1.1 -1.5,2 -2,2.8 -0.5,0.8 -0.8,1.6 -1,2.5 -0.1,0.5 0,1.3 0.4,2.3 0.3,1.1 0.4,1.9 0.4,2.6 -0.1,1.1 -0.2,2.6 -0.5,4.4 -0.2,1.8 -0.4,2.9 -0.4,3.2 -1.8,4.8 -1.7,9.9 0.2,15.2 2.2,6.2 6.2,11.5 11.9,15.8 5.7,4.3 11.7,6.4 17.8,6.4 h 110.7 c 5.2,0 10.1,-1.7 14.7,-5.2 4.6,-3.5 7.7,-7.8 9.2,-12.9 l 33,-108.6 c 1.8,-5.8 1,-10.9 -2.2,-15.5 -1.7,-2.5 -4,-4.2 -7.1,-5.4 z M 38.14858,105.59813 60.882735,41.992545 h 10.8 c 6.340631,0 33.351895,0.778957 70.804135,0.970479 -18.18245,63.254766 0,0 -18.18245,63.254766 -23.00947,-0.10382 -63.362955,-0.6218 -72.55584,-0.51966 -18,0.2 -13.6,-0.1 -13.6,-0.1 z m 80.621,-5.891206 c 15.19043,-50.034423 0,1e-5 15.19043,-50.034423 l -11.90624,-0.13228 2.73304,-9.302941 -44.32863,0.07339 -2.532953,8.036036 -11.321128,-0.18864 -17.955519,51.440073 c 0.02698,0.027 4.954586,0.0514 12.187488,0.0717 l -2.997994,9.804886 c 11.36463,0.0271 1.219679,-0.0736 46.117666,-0.31499 l 2.65246,-9.571696 c 7.08021,0.14819 11.59705,0.13117 12.16138,0.1189 z m -56.149615,-3.855606 13.7,-42.5 h 9.8 l 1.194896,32.99936 23.205109,-32.99936 h 9.9 l -13.6,42.5 h -7.099996 l 12.499996,-35.4 -24.50001,35.4 h -6.799995 l -0.8,-35 -10.8,35 z"
|
||||
id="path60"
|
||||
sodipodi:nodetypes="ccccssccsssccsssccsssssscsssscssscccscscscsccsccccccssssccccccsccsccccccccccccccccccccccccccccc" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.9 KiB |
@@ -42,17 +42,17 @@ To receive OpenTracing spans, start up a Jaeger server. This can be done
|
||||
using docker like so:
|
||||
|
||||
```sh
|
||||
docker run -d --name jaeger
|
||||
docker run -d --name jaeger \
|
||||
-p 6831:6831/udp \
|
||||
-p 6832:6832/udp \
|
||||
-p 5778:5778 \
|
||||
-p 16686:16686 \
|
||||
-p 14268:14268 \
|
||||
jaegertracing/all-in-one:1.13
|
||||
jaegertracing/all-in-one:1
|
||||
```
|
||||
|
||||
Latest documentation is probably at
|
||||
<https://www.jaegertracing.io/docs/1.13/getting-started/>
|
||||
https://www.jaegertracing.io/docs/latest/getting-started.
|
||||
|
||||
## Enable OpenTracing in Synapse
|
||||
|
||||
@@ -62,7 +62,7 @@ as shown in the [sample config](./sample_config.yaml). For example:
|
||||
|
||||
```yaml
|
||||
opentracing:
|
||||
tracer_enabled: true
|
||||
enabled: true
|
||||
homeserver_whitelist:
|
||||
- "mytrustedhomeserver.org"
|
||||
- "*.myotherhomeservers.com"
|
||||
@@ -90,4 +90,4 @@ to two problems, namely:
|
||||
## Configuring Jaeger
|
||||
|
||||
Sampling strategies can be set as in this document:
|
||||
<https://www.jaegertracing.io/docs/1.13/sampling/>
|
||||
<https://www.jaegertracing.io/docs/latest/sampling/>.
|
||||
|
||||
200
docs/postgres.md
200
docs/postgres.md
@@ -1,6 +1,6 @@
|
||||
# Using Postgres
|
||||
|
||||
Postgres version 9.5 or later is known to work.
|
||||
Synapse supports PostgreSQL versions 9.6 or later.
|
||||
|
||||
## Install postgres client libraries
|
||||
|
||||
@@ -33,28 +33,15 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate
|
||||
# Or, if your system uses sudo to get administrative rights
|
||||
sudo -u postgres bash
|
||||
|
||||
Then, create a user ``synapse_user`` with:
|
||||
Then, create a postgres user and a database with:
|
||||
|
||||
# this will prompt for a password for the new user
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
Before you can authenticate with the `synapse_user`, you must create a
|
||||
database that it can access. To create a database, first connect to the
|
||||
database with your database user:
|
||||
createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse
|
||||
|
||||
su - postgres # Or: sudo -u postgres bash
|
||||
psql
|
||||
|
||||
and then run:
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
LC_COLLATE='C'
|
||||
LC_CTYPE='C'
|
||||
template=template0
|
||||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named `synapse` owned by the
|
||||
`synapse_user` user (which must already have been created as above).
|
||||
The above will create a user called `synapse_user`, and a database called
|
||||
`synapse`.
|
||||
|
||||
Note that the PostgreSQL database *must* have the correct encoding set
|
||||
(as shown above), otherwise it will not be able to store UTF8 strings.
|
||||
@@ -63,79 +50,6 @@ You may need to enable password authentication so `synapse_user` can
|
||||
connect to the database. See
|
||||
<https://www.postgresql.org/docs/current/auth-pg-hba-conf.html>.
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to take a dump and recreate the database with
|
||||
the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
|
||||
## Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
scale deployments tuning some of the settings is recommended, details of
|
||||
which can be found at
|
||||
<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
|
||||
|
||||
In particular, we've found tuning the following values helpful for
|
||||
performance:
|
||||
|
||||
- `shared_buffers`
|
||||
- `effective_cache_size`
|
||||
- `work_mem`
|
||||
- `maintenance_work_mem`
|
||||
- `autovacuum_work_mem`
|
||||
|
||||
Note that the appropriate values for those fields depend on the amount
|
||||
of free memory the database host has available.
|
||||
|
||||
## Synapse config
|
||||
|
||||
When you are ready to start using PostgreSQL, edit the `database`
|
||||
@@ -165,18 +79,42 @@ may block for an extended period while it waits for a response from the
|
||||
database server. Example values might be:
|
||||
|
||||
```yaml
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
database:
|
||||
args:
|
||||
# ... as above
|
||||
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
# seconds of inactivity after which TCP should send a keepalive message to the server
|
||||
keepalives_idle: 10
|
||||
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
# the number of seconds after which a TCP keepalive message that is not
|
||||
# acknowledged by the server should be retransmitted
|
||||
keepalives_interval: 10
|
||||
|
||||
# the number of TCP keepalives that can be lost before the client's connection
|
||||
# to the server is considered dead
|
||||
keepalives_count: 3
|
||||
```
|
||||
|
||||
## Tuning Postgres
|
||||
|
||||
The default settings should be fine for most deployments. For larger
|
||||
scale deployments tuning some of the settings is recommended, details of
|
||||
which can be found at
|
||||
<https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server>.
|
||||
|
||||
In particular, we've found tuning the following values helpful for
|
||||
performance:
|
||||
|
||||
- `shared_buffers`
|
||||
- `effective_cache_size`
|
||||
- `work_mem`
|
||||
- `maintenance_work_mem`
|
||||
- `autovacuum_work_mem`
|
||||
|
||||
Note that the appropriate values for those fields depend on the amount
|
||||
of free memory the database host has available.
|
||||
|
||||
|
||||
## Porting from SQLite
|
||||
|
||||
### Overview
|
||||
@@ -185,9 +123,8 @@ The script `synapse_port_db` allows porting an existing synapse server
|
||||
backed by SQLite to using PostgreSQL. This is done in as a two phase
|
||||
process:
|
||||
|
||||
1. Copy the existing SQLite database to a separate location (while the
|
||||
server is down) and running the port script against that offline
|
||||
database.
|
||||
1. Copy the existing SQLite database to a separate location and run
|
||||
the port script against that offline database.
|
||||
2. Shut down the server. Rerun the port script to port any data that
|
||||
has come in since taking the first snapshot. Restart server against
|
||||
the PostgreSQL database.
|
||||
@@ -245,3 +182,60 @@ PostgreSQL database configuration file `homeserver-postgres.yaml`:
|
||||
./synctl start
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Alternative auth methods
|
||||
|
||||
If you get an error along the lines of `FATAL: Ident authentication failed for
|
||||
user "synapse_user"`, you may need to use an authentication method other than
|
||||
`ident`:
|
||||
|
||||
* If the `synapse_user` user has a password, add the password to the `database:`
|
||||
section of `homeserver.yaml`. Then add the following to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that
|
||||
```
|
||||
|
||||
* If the `synapse_user` user does not have a password, then a password doesn't
|
||||
have to be added to `homeserver.yaml`. But the following does need to be added
|
||||
to `pg_hba.conf`:
|
||||
|
||||
```
|
||||
host synapse synapse_user ::1/128 trust
|
||||
```
|
||||
|
||||
Note that line order matters in `pg_hba.conf`, so make sure that if you do add a
|
||||
new line, it is inserted before:
|
||||
|
||||
```
|
||||
host all all ::1/128 ident
|
||||
```
|
||||
|
||||
### Fixing incorrect `COLLATE` or `CTYPE`
|
||||
|
||||
Synapse will refuse to set up a new database if it has the wrong values of
|
||||
`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using
|
||||
different locales can cause issues if the locale library is updated from
|
||||
underneath the database, or if a different version of the locale is used on any
|
||||
replicas.
|
||||
|
||||
The safest way to fix the issue is to dump the database and recreate it with
|
||||
the correct locale parameter (as shown above). It is also possible to change the
|
||||
parameters on a live database and run a `REINDEX` on the entire database,
|
||||
however extreme care must be taken to avoid database corruption.
|
||||
|
||||
Note that the above may fail with an error about duplicate rows if corruption
|
||||
has already occurred, and such duplicate rows will need to be manually removed.
|
||||
|
||||
### Fixing inconsistent sequences error
|
||||
|
||||
Synapse uses Postgres sequences to generate IDs for various tables. A sequence
|
||||
and associated table can get out of sync if, for example, Synapse has been
|
||||
downgraded and then upgraded again.
|
||||
|
||||
To fix the issue shut down Synapse (including any and all workers) and run the
|
||||
SQL command included in the error message. Once done Synapse should start
|
||||
successfully.
|
||||
|
||||
@@ -28,7 +28,11 @@ async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None
|
||||
which can be given a list of local or remote MXIDs to broadcast known, online user
|
||||
presence to (for those users that the receiving user is considered interested in).
|
||||
It does not include state for users who are currently offline, and it can only be
|
||||
called on workers that support sending federation.
|
||||
called on workers that support sending federation. Additionally, this method must
|
||||
only be called from the process that has been configured to write to the
|
||||
the [presence stream](https://github.com/matrix-org/synapse/blob/master/docs/workers.md#stream-writers).
|
||||
By default, this is the main process, but another worker can be configured to do
|
||||
so.
|
||||
|
||||
### Module structure
|
||||
|
||||
|
||||
@@ -683,33 +683,6 @@ acme:
|
||||
#
|
||||
account_key_file: DATADIR/acme_account.key
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
|
||||
|
||||
## Federation ##
|
||||
|
||||
@@ -2845,7 +2818,8 @@ opentracing:
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
@@ -2854,19 +2828,26 @@ opentracing:
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# A list of the matrix IDs of users whose requests will always be traced,
|
||||
# even if the tracing system would otherwise drop the traces due to
|
||||
# probabilistic sampling.
|
||||
#
|
||||
# By default, the list is empty.
|
||||
#
|
||||
#force_tracing_for_users:
|
||||
# - "@user1:server_name"
|
||||
# - "@user2:server_name"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# Jaeger's configuration is mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
# https://www.jaegertracing.io/docs/latest/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
|
||||
|
||||
7
docs/setup/installation.md
Normal file
7
docs/setup/installation.md
Normal file
@@ -0,0 +1,7 @@
|
||||
<!--
|
||||
Include the contents of INSTALL.md from the project root without moving it, which may
|
||||
break links around the internet. Additionally, note that SUMMARY.md is unable to
|
||||
directly link to content outside of the docs/ directory. So we use this file as a
|
||||
redirection.
|
||||
-->
|
||||
{{#include ../../INSTALL.md}}
|
||||
@@ -67,8 +67,8 @@ A custom mapping provider must specify the following methods:
|
||||
- Arguments:
|
||||
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
|
||||
information from.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``sub`` claim of the response.
|
||||
- This method must return a string, which is the unique, immutable identifier
|
||||
for the user. Commonly the `sub` claim of the response.
|
||||
* `map_user_attributes(self, userinfo, token, failures)`
|
||||
- This method must be async.
|
||||
- Arguments:
|
||||
@@ -87,7 +87,9 @@ A custom mapping provider must specify the following methods:
|
||||
`localpart` value, such as `john.doe1`.
|
||||
- Returns a dictionary with two keys:
|
||||
- `localpart`: A string, used to generate the Matrix ID. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
- `displayname`: An optional string, the display name for the user.
|
||||
* `get_extra_attributes(self, userinfo, token)`
|
||||
- This method must be async.
|
||||
@@ -153,8 +155,8 @@ A custom mapping provider must specify the following methods:
|
||||
information from.
|
||||
- `client_redirect_url` - A string, the URL that the client will be
|
||||
redirected to.
|
||||
- This method must return a string, which is the unique identifier for the
|
||||
user. Commonly the ``uid`` claim of the response.
|
||||
- This method must return a string, which is the unique, immutable identifier
|
||||
for the user. Commonly the `uid` claim of the response.
|
||||
* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)`
|
||||
- Arguments:
|
||||
- `saml_response` - A `saml2.response.AuthnResponse` object to extract user
|
||||
@@ -172,8 +174,10 @@ A custom mapping provider must specify the following methods:
|
||||
redirected to.
|
||||
- This method must return a dictionary, which will then be used by Synapse
|
||||
to build a new user. The following keys are allowed:
|
||||
* `mxid_localpart` - The mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username.
|
||||
* `mxid_localpart` - A string, the mxid localpart of the new user. If this is
|
||||
`None`, the user is prompted to pick their own username. This is only used
|
||||
during a user's first login. Once a localpart has been associated with a
|
||||
remote user ID (see `get_remote_user_id`) it cannot be updated.
|
||||
* `displayname` - The displayname of the new user. If not provided, will default to
|
||||
the value of `mxid_localpart`.
|
||||
* `emails` - A list of emails for the new user. If not provided, will
|
||||
|
||||
@@ -65,3 +65,33 @@ systemctl restart matrix-synapse-worker@federation_reader.service
|
||||
systemctl enable matrix-synapse-worker@federation_writer.service
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
## Hardening
|
||||
|
||||
**Optional:** If further hardening is desired, the file
|
||||
`override-hardened.conf` may be copied from
|
||||
`contrib/systemd/override-hardened.conf` in this repository to the location
|
||||
`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
|
||||
directory may have to be created). It enables certain sandboxing features in
|
||||
systemd to further secure the synapse service. You may read the comments to
|
||||
understand what the override file is doing. The same file will need to be copied
|
||||
to
|
||||
`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
|
||||
(this directory may also have to be created) in order to apply the same
|
||||
hardening options to any worker processes.
|
||||
|
||||
Once these files have been copied to their appropriate locations, simply reload
|
||||
systemd's manager config files and restart all Synapse services to apply the hardening options. They will automatically
|
||||
be applied at every restart as long as the override files are present at the
|
||||
specified locations.
|
||||
|
||||
```sh
|
||||
systemctl daemon-reload
|
||||
|
||||
# Restart services
|
||||
systemctl restart matrix-synapse.target
|
||||
```
|
||||
|
||||
In order to see their effect, you may run `systemd-analyze security
|
||||
matrix-synapse.service` before and after applying the hardening options to see
|
||||
the changes being applied at a glance.
|
||||
|
||||
@@ -4,7 +4,7 @@ This document explains how to enable VoIP relaying on your Home Server with
|
||||
TURN.
|
||||
|
||||
The synapse Matrix Home Server supports integration with TURN server via the
|
||||
[TURN server REST API](<http://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
|
||||
[TURN server REST API](<https://tools.ietf.org/html/draft-uberti-behave-turn-rest-00>). This
|
||||
allows the Home Server to generate credentials that are valid for use on the
|
||||
TURN server through the use of a secret shared between the Home Server and the
|
||||
TURN server.
|
||||
|
||||
7
docs/upgrading/README.md
Normal file
7
docs/upgrading/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
<!--
|
||||
Include the contents of UPGRADE.rst from the project root without moving it, which may
|
||||
break links around the internet. Additionally, note that SUMMARY.md is unable to
|
||||
directly link to content outside of the docs/ directory. So we use this file as a
|
||||
redirection.
|
||||
-->
|
||||
{{#include ../../UPGRADE.rst}}
|
||||
7
docs/usage/administration/README.md
Normal file
7
docs/usage/administration/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Administration
|
||||
|
||||
This section contains information on managing your Synapse homeserver. This includes:
|
||||
|
||||
* Managing users, rooms and media via the Admin API.
|
||||
* Setting up metrics and monitoring to give you insight into your homeserver's health.
|
||||
* Configuring structured logging.
|
||||
29
docs/usage/administration/admin_api/README.md
Normal file
29
docs/usage/administration/admin_api/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# The Admin API
|
||||
|
||||
## Authenticate as a server admin
|
||||
|
||||
Many of the API calls in the admin api will require an `access_token` for a
|
||||
server admin. (Note that a server admin is distinct from a room admin.)
|
||||
|
||||
A user can be marked as a server admin by updating the database directly, e.g.:
|
||||
|
||||
```sql
|
||||
UPDATE users SET admin = 1 WHERE name = '@foo:bar.com';
|
||||
```
|
||||
|
||||
A new server admin user can also be created using the `register_new_matrix_user`
|
||||
command. This is a script that is located in the `scripts/` directory, or possibly
|
||||
already on your `$PATH` depending on how Synapse was installed.
|
||||
|
||||
Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
|
||||
|
||||
## Making an Admin API request
|
||||
Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by
|
||||
providing the token as either a query parameter or a request header. To add it as a request header in cURL:
|
||||
|
||||
```sh
|
||||
curl --header "Authorization: Bearer <access_token>" <the_rest_of_your_API_request>
|
||||
```
|
||||
|
||||
For more details on access tokens in Matrix, please refer to the complete
|
||||
[matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens).
|
||||
4
docs/usage/configuration/README.md
Normal file
4
docs/usage/configuration/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Configuration
|
||||
|
||||
This section contains information on tweaking Synapse via the various options in the configuration file. A configuration
|
||||
file should have been generated when you [installed Synapse](../../setup/installation.html).
|
||||
14
docs/usage/configuration/homeserver_sample_config.md
Normal file
14
docs/usage/configuration/homeserver_sample_config.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Homeserver Sample Configuration File
|
||||
|
||||
Below is a sample homeserver configuration file. The homeserver configuration file
|
||||
can be tweaked to change the behaviour of your homeserver. A restart of the server is
|
||||
generally required to apply any changes made to this file.
|
||||
|
||||
Note that the contents below are *not* intended to be copied and used as the basis for
|
||||
a real homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
a fresh config using Synapse by following the instructions in
|
||||
[Installation](../../setup/installation.md).
|
||||
|
||||
```yaml
|
||||
{{#include ../../sample_config.yaml}}
|
||||
```
|
||||
14
docs/usage/configuration/logging_sample_config.md
Normal file
14
docs/usage/configuration/logging_sample_config.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Logging Sample Configuration File
|
||||
|
||||
Below is a sample logging configuration file. This file can be tweaked to control how your
|
||||
homeserver will output logs. A restart of the server is generally required to apply any
|
||||
changes made to this file.
|
||||
|
||||
Note that the contents below are *not* intended to be copied and used as the basis for
|
||||
a real homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
a fresh config using Synapse by following the instructions in
|
||||
[Installation](../../setup/installation.md).
|
||||
|
||||
```yaml
|
||||
{{#include ../../sample_log_config.yaml}}
|
||||
``__`
|
||||
15
docs/usage/configuration/user_authentication/README.md
Normal file
15
docs/usage/configuration/user_authentication/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# User Authentication
|
||||
|
||||
Synapse supports multiple methods of authenticating users, either out-of-the-box or through custom pluggable
|
||||
authentication modules.
|
||||
|
||||
Included in Synapse is support for authenticating users via:
|
||||
|
||||
* A username and password.
|
||||
* An email address and password.
|
||||
* Single Sign-On through the SAML, Open ID Connect or CAS protocols.
|
||||
* JSON Web Tokens.
|
||||
* An administrator's shared secret.
|
||||
|
||||
Synapse can additionally be extended to support custom authentication schemes through optional "password auth provider"
|
||||
modules.
|
||||
@@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server.
|
||||
|
||||
The directory info is stored in various tables, which can (typically after
|
||||
DB corruption) get stale or out of sync. If this happens, for now the
|
||||
solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql)
|
||||
solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql)
|
||||
and then restart synapse. This should then start a background task to
|
||||
flush the current tables and regenerate the directory.
|
||||
|
||||
30
docs/website_files/README.md
Normal file
30
docs/website_files/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Documentation Website Files and Assets
|
||||
|
||||
This directory contains extra files for modifying the look and functionality of
|
||||
[mdbook](https://github.com/rust-lang/mdBook), the documentation software that's
|
||||
used to generate Synapse's documentation website.
|
||||
|
||||
The configuration options in the `output.html` section of [book.toml](../../book.toml)
|
||||
point to additional JS/CSS in this directory that are added on each page load. In
|
||||
addition, the `theme` directory contains files that overwrite their counterparts in
|
||||
each of the default themes included with mdbook.
|
||||
|
||||
Currently we use these files to generate a floating Table of Contents panel. The code for
|
||||
which was partially taken from
|
||||
[JorelAli/mdBook-pagetoc](https://github.com/JorelAli/mdBook-pagetoc/)
|
||||
before being modified such that it scrolls with the content of the page. This is handled
|
||||
by the `table-of-contents.js/css` files. The table of contents panel only appears on pages
|
||||
that have more than one header, as well as only appearing on desktop-sized monitors.
|
||||
|
||||
We remove the navigation arrows which typically appear on the left and right side of the
|
||||
screen on desktop as they interfere with the table of contents. This is handled by
|
||||
the `remove-nav-buttons.css` file.
|
||||
|
||||
Finally, we also stylise the chapter titles in the left sidebar by indenting them
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
[customising the default themes](https://rust-lang.github.io/mdBook/format/theme/index.html).
|
||||
7
docs/website_files/indent-section-headers.css
Normal file
7
docs/website_files/indent-section-headers.css
Normal file
@@ -0,0 +1,7 @@
|
||||
/*
|
||||
* Indents each chapter title in the left sidebar so that they aren't
|
||||
* at the same level as the section headers.
|
||||
*/
|
||||
.chapter-item {
|
||||
margin-left: 1em;
|
||||
}
|
||||
8
docs/website_files/remove-nav-buttons.css
Normal file
8
docs/website_files/remove-nav-buttons.css
Normal file
@@ -0,0 +1,8 @@
|
||||
/* Remove the prev, next chapter buttons as they interfere with the
|
||||
* table of contents.
|
||||
* Note that the table of contents only appears on desktop, thus we
|
||||
* only remove the desktop (wide) chapter buttons.
|
||||
*/
|
||||
.nav-wide-wrapper {
|
||||
display: none
|
||||
}
|
||||
42
docs/website_files/table-of-contents.css
Normal file
42
docs/website_files/table-of-contents.css
Normal file
@@ -0,0 +1,42 @@
|
||||
@media only screen and (max-width:1439px) {
|
||||
.sidetoc {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
@media only screen and (min-width:1440px) {
|
||||
main {
|
||||
position: relative;
|
||||
margin-left: 100px !important;
|
||||
}
|
||||
.sidetoc {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
left: calc(100% + (var(--content-max-width))/4 - 140px);
|
||||
position: absolute;
|
||||
text-align: right;
|
||||
}
|
||||
.pagetoc {
|
||||
position: fixed;
|
||||
width: 250px;
|
||||
overflow: auto;
|
||||
right: 20px;
|
||||
height: calc(100% - var(--menu-bar-height));
|
||||
}
|
||||
.pagetoc a {
|
||||
color: var(--fg) !important;
|
||||
display: block;
|
||||
padding: 5px 15px 5px 10px;
|
||||
text-align: left;
|
||||
text-decoration: none;
|
||||
}
|
||||
.pagetoc a:hover,
|
||||
.pagetoc a.active {
|
||||
background: var(--sidebar-bg) !important;
|
||||
color: var(--sidebar-fg) !important;
|
||||
}
|
||||
.pagetoc .active {
|
||||
background: var(--sidebar-bg);
|
||||
color: var(--sidebar-fg);
|
||||
}
|
||||
}
|
||||
134
docs/website_files/table-of-contents.js
Normal file
134
docs/website_files/table-of-contents.js
Normal file
@@ -0,0 +1,134 @@
|
||||
const getPageToc = () => document.getElementsByClassName('pagetoc')[0];
|
||||
|
||||
const pageToc = getPageToc();
|
||||
const pageTocChildren = [...pageToc.children];
|
||||
const headers = [...document.getElementsByClassName('header')];
|
||||
|
||||
|
||||
// Select highlighted item in ToC when clicking an item
|
||||
pageTocChildren.forEach(child => {
|
||||
child.addEventHandler('click', () => {
|
||||
pageTocChildren.forEach(child => {
|
||||
child.classList.remove('active');
|
||||
});
|
||||
child.classList.add('active');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
/**
|
||||
* Test whether a node is in the viewport
|
||||
*/
|
||||
function isInViewport(node) {
|
||||
const rect = node.getBoundingClientRect();
|
||||
return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || document.documentElement.clientHeight) && rect.right <= (window.innerWidth || document.documentElement.clientWidth);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Set a new ToC entry.
|
||||
* Clear any previously highlighted ToC items, set the new one,
|
||||
* and adjust the ToC scroll position.
|
||||
*/
|
||||
function setTocEntry() {
|
||||
let activeEntry;
|
||||
const pageTocChildren = [...getPageToc().children];
|
||||
|
||||
// Calculate which header is the current one at the top of screen
|
||||
headers.forEach(header => {
|
||||
if (window.pageYOffset >= header.offsetTop) {
|
||||
activeEntry = header;
|
||||
}
|
||||
});
|
||||
|
||||
// Update selected item in ToC when scrolling
|
||||
pageTocChildren.forEach(child => {
|
||||
if (activeEntry.href.localeCompare(child.href) === 0) {
|
||||
child.classList.add('active');
|
||||
} else {
|
||||
child.classList.remove('active');
|
||||
}
|
||||
});
|
||||
|
||||
let tocEntryForLocation = document.querySelector(`nav a[href="${activeEntry.href}"]`);
|
||||
if (tocEntryForLocation) {
|
||||
const headingForLocation = document.querySelector(activeEntry.hash);
|
||||
if (headingForLocation && isInViewport(headingForLocation)) {
|
||||
// Update ToC scroll
|
||||
const nav = getPageToc();
|
||||
const content = document.querySelector('html');
|
||||
if (content.scrollTop !== 0) {
|
||||
nav.scrollTo({
|
||||
top: tocEntryForLocation.offsetTop - 100,
|
||||
left: 0,
|
||||
behavior: 'smooth',
|
||||
});
|
||||
} else {
|
||||
nav.scrollTop = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Populate sidebar on load
|
||||
*/
|
||||
window.addEventListener('load', () => {
|
||||
// Only create table of contents if there is more than one header on the page
|
||||
if (headers.length <= 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Create an entry in the page table of contents for each header in the document
|
||||
headers.forEach((header, index) => {
|
||||
const link = document.createElement('a');
|
||||
|
||||
// Indent shows hierarchy
|
||||
let indent = '0px';
|
||||
switch (header.parentElement.tagName) {
|
||||
case 'H1':
|
||||
indent = '5px';
|
||||
break;
|
||||
case 'H2':
|
||||
indent = '20px';
|
||||
break;
|
||||
case 'H3':
|
||||
indent = '30px';
|
||||
break;
|
||||
case 'H4':
|
||||
indent = '40px';
|
||||
break;
|
||||
case 'H5':
|
||||
indent = '50px';
|
||||
break;
|
||||
case 'H6':
|
||||
indent = '60px';
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
let tocEntry;
|
||||
if (index == 0) {
|
||||
// Create a bolded title for the first element
|
||||
tocEntry = document.createElement("strong");
|
||||
tocEntry.innerHTML = header.text;
|
||||
} else {
|
||||
// All other elements are non-bold
|
||||
tocEntry = document.createTextNode(header.text);
|
||||
}
|
||||
link.appendChild(tocEntry);
|
||||
|
||||
link.style.paddingLeft = indent;
|
||||
link.href = header.href;
|
||||
pageToc.appendChild(link);
|
||||
});
|
||||
setTocEntry.call();
|
||||
});
|
||||
|
||||
|
||||
// Handle active headers on scroll, if there is more than one header on the page
|
||||
if (headers.length > 1) {
|
||||
window.addEventListener('scroll', setTocEntry);
|
||||
}
|
||||
312
docs/website_files/theme/index.hbs
Normal file
312
docs/website_files/theme/index.hbs
Normal file
@@ -0,0 +1,312 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="{{ language }}" class="sidebar-visible no-js {{ default_theme }}">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>{{ title }}</title>
|
||||
{{#if is_print }}
|
||||
<meta name="robots" content="noindex" />
|
||||
{{/if}}
|
||||
{{#if base_url}}
|
||||
<base href="{{ base_url }}">
|
||||
{{/if}}
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
{{> head}}
|
||||
|
||||
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||||
<meta name="description" content="{{ description }}">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff" />
|
||||
|
||||
{{#if favicon_svg}}
|
||||
<link rel="icon" href="{{ path_to_root }}favicon.svg">
|
||||
{{/if}}
|
||||
{{#if favicon_png}}
|
||||
<link rel="shortcut icon" href="{{ path_to_root }}favicon.png">
|
||||
{{/if}}
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/variables.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/general.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/chrome.css">
|
||||
{{#if print_enable}}
|
||||
<link rel="stylesheet" href="{{ path_to_root }}css/print.css" media="print">
|
||||
{{/if}}
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="{{ path_to_root }}FontAwesome/css/font-awesome.css">
|
||||
{{#if copy_fonts}}
|
||||
<link rel="stylesheet" href="{{ path_to_root }}fonts/fonts.css">
|
||||
{{/if}}
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" href="{{ path_to_root }}highlight.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}tomorrow-night.css">
|
||||
<link rel="stylesheet" href="{{ path_to_root }}ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
{{#each additional_css}}
|
||||
<link rel="stylesheet" href="{{ ../path_to_root }}{{ this }}">
|
||||
{{/each}}
|
||||
|
||||
{{#if mathjax_support}}
|
||||
<!-- MathJax -->
|
||||
<script async type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
|
||||
{{/if}}
|
||||
</head>
|
||||
<body>
|
||||
<!-- Provide site root to javascript -->
|
||||
<script type="text/javascript">
|
||||
var path_to_root = "{{ path_to_root }}";
|
||||
var default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "{{ preferred_dark_theme }}" : "{{ default_theme }}";
|
||||
</script>
|
||||
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script type="text/javascript">
|
||||
try {
|
||||
var theme = localStorage.getItem('mdbook-theme');
|
||||
var sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script type="text/javascript">
|
||||
var theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
var html = document.querySelector('html');
|
||||
html.classList.remove('no-js')
|
||||
html.classList.remove('{{ default_theme }}')
|
||||
html.classList.add(theme);
|
||||
html.classList.add('js');
|
||||
</script>
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script type="text/javascript">
|
||||
var html = document.querySelector('html');
|
||||
var sidebar = 'hidden';
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
}
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<div class="sidebar-scrollbox">
|
||||
{{#toc}}{{/toc}}
|
||||
</div>
|
||||
<div id="sidebar-resize-handle" class="sidebar-resize-handle"></div>
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
{{> header}}
|
||||
<div id="menu-bar-hover-placeholder"></div>
|
||||
<div id="menu-bar" class="menu-bar sticky bordered">
|
||||
<div class="left-buttons">
|
||||
<button id="sidebar-toggle" class="icon-button" type="button" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</button>
|
||||
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button>
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">{{ theme_option "Light" }}</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">{{ theme_option "Rust" }}</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">{{ theme_option "Coal" }}</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">{{ theme_option "Navy" }}</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">{{ theme_option "Ayu" }}</button></li>
|
||||
</ul>
|
||||
{{#if search_enabled}}
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search. (Shortkey: s)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="S" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
{{#if print_enable}}
|
||||
<a href="{{ path_to_root }}print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
{{/if}}
|
||||
{{#if git_repository_url}}
|
||||
<a href="{{git_repository_url}}" title="Git repository" aria-label="Git repository">
|
||||
<i id="git-repository-button" class="fa {{git_repository_icon}}"></i>
|
||||
</a>
|
||||
{{/if}}
|
||||
{{#if git_repository_edit_url}}
|
||||
<a href="{{git_repository_edit_url}}" title="Suggest an edit" aria-label="Suggest an edit">
|
||||
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||
</a>
|
||||
{{/if}}
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{#if search_enabled}}
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
{{/if}}
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script type="text/javascript">
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<!-- Page table of contents -->
|
||||
<div class="sidetoc">
|
||||
<nav class="pagetoc"></nav>
|
||||
</div>
|
||||
|
||||
{{{ content }}}
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
{{#previous}}
|
||||
<a rel="prev" href="{{ path_to_root }}{{link}}" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
{{/previous}}
|
||||
|
||||
{{#next}}
|
||||
<a rel="next" href="{{ path_to_root }}{{link}}" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
{{/next}}
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
{{#previous}}
|
||||
<a rel="prev" href="{{ path_to_root }}{{link}}" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
{{/previous}}
|
||||
|
||||
{{#next}}
|
||||
<a rel="next" href="{{ path_to_root }}{{link}}" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
{{/next}}
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
{{#if livereload}}
|
||||
<!-- Livereload script (if served using the cli tool) -->
|
||||
<script type="text/javascript">
|
||||
var socket = new WebSocket("{{{livereload}}}");
|
||||
socket.onmessage = function (event) {
|
||||
if (event.data === "reload") {
|
||||
socket.close();
|
||||
location.reload();
|
||||
}
|
||||
};
|
||||
window.onbeforeunload = function() {
|
||||
socket.close();
|
||||
}
|
||||
</script>
|
||||
{{/if}}
|
||||
|
||||
{{#if google_analytics}}
|
||||
<!-- Google Analytics Tag -->
|
||||
<script type="text/javascript">
|
||||
var localAddrs = ["localhost", "127.0.0.1", ""];
|
||||
// make sure we don't activate google analytics if the developer is
|
||||
// inspecting the book locally...
|
||||
if (localAddrs.indexOf(document.location.hostname) === -1) {
|
||||
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
|
||||
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
|
||||
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
|
||||
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
|
||||
ga('create', '{{google_analytics}}', 'auto');
|
||||
ga('send', 'pageview');
|
||||
}
|
||||
</script>
|
||||
{{/if}}
|
||||
|
||||
{{#if playground_line_numbers}}
|
||||
<script type="text/javascript">
|
||||
window.playground_line_numbers = true;
|
||||
</script>
|
||||
{{/if}}
|
||||
|
||||
{{#if playground_copyable}}
|
||||
<script type="text/javascript">
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
{{/if}}
|
||||
|
||||
{{#if playground_js}}
|
||||
<script src="{{ path_to_root }}ace.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}editor.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}mode-rust.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}theme-dawn.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}theme-tomorrow_night.js" type="text/javascript" charset="utf-8"></script>
|
||||
{{/if}}
|
||||
|
||||
{{#if search_js}}
|
||||
<script src="{{ path_to_root }}elasticlunr.min.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}mark.min.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}searcher.js" type="text/javascript" charset="utf-8"></script>
|
||||
{{/if}}
|
||||
|
||||
<script src="{{ path_to_root }}clipboard.min.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}highlight.js" type="text/javascript" charset="utf-8"></script>
|
||||
<script src="{{ path_to_root }}book.js" type="text/javascript" charset="utf-8"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
{{#each additional_js}}
|
||||
<script type="text/javascript" src="{{ ../path_to_root }}{{this}}"></script>
|
||||
{{/each}}
|
||||
|
||||
{{#if is_print}}
|
||||
{{#if mathjax_support}}
|
||||
<script type="text/javascript">
|
||||
window.addEventListener('load', function() {
|
||||
MathJax.Hub.Register.StartupHook('End', function() {
|
||||
window.setTimeout(window.print, 100);
|
||||
});
|
||||
});
|
||||
</script>
|
||||
{{else}}
|
||||
<script type="text/javascript">
|
||||
window.addEventListener('load', function() {
|
||||
window.setTimeout(window.print, 100);
|
||||
});
|
||||
</script>
|
||||
{{/if}}
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
4
docs/welcome_and_overview.md
Normal file
4
docs/welcome_and_overview.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Introduction
|
||||
|
||||
Welcome to the documentation repository for Synapse, the reference
|
||||
[Matrix](https://matrix.org) homeserver implementation.
|
||||
@@ -228,6 +228,9 @@ expressions:
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/
|
||||
^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/search$
|
||||
|
||||
# Registration/login requests
|
||||
^/_matrix/client/(api/v1|r0|unstable)/login$
|
||||
|
||||
15
mypy.ini
15
mypy.ini
@@ -32,6 +32,7 @@ files =
|
||||
synapse/http/federation/matrix_federation_agent.py,
|
||||
synapse/http/federation/well_known_resolver.py,
|
||||
synapse/http/matrixfederationclient.py,
|
||||
synapse/http/servlet.py,
|
||||
synapse/http/server.py,
|
||||
synapse/http/site.py,
|
||||
synapse/logging,
|
||||
@@ -71,8 +72,13 @@ files =
|
||||
synapse/types.py,
|
||||
synapse/util/async_helpers.py,
|
||||
synapse/util/caches,
|
||||
synapse/util/daemonize.py,
|
||||
synapse/util/hash.py,
|
||||
synapse/util/iterutils.py,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/module_loader.py,
|
||||
synapse/util/msisdn.py,
|
||||
synapse/util/stringutils.py,
|
||||
synapse/visibility.py,
|
||||
tests/replication,
|
||||
@@ -80,6 +86,7 @@ files =
|
||||
tests/handlers/test_password_providers.py,
|
||||
tests/rest/client/v1/test_login.py,
|
||||
tests/rest/client/v2_alpha/test_auth.py,
|
||||
tests/util/test_itertools.py,
|
||||
tests/util/test_stream_change_cache.py
|
||||
|
||||
[mypy-pymacaroons.*]
|
||||
@@ -124,7 +131,7 @@ ignore_missing_imports = True
|
||||
[mypy-canonicaljson]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jaeger_client]
|
||||
[mypy-jaeger_client.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-jsonschema]
|
||||
@@ -174,3 +181,9 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-pympler.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-phonenumbers.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-ijson.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -21,18 +21,18 @@ DISTS = (
|
||||
"debian:buster",
|
||||
"debian:bullseye",
|
||||
"debian:sid",
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:groovy", # 20.10 (EOL 2021-07-07)
|
||||
"ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23)
|
||||
"ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14)
|
||||
"ubuntu:groovy", # 20.10 (EOL 2021-07-07)
|
||||
"ubuntu:hirsute", # 21.04 (EOL 2022-01-05)
|
||||
)
|
||||
|
||||
DESC = '''\
|
||||
DESC = """\
|
||||
Builds .debs for synapse, using a Docker image for the build environment.
|
||||
|
||||
By default, builds for all known distributions, but a list of distributions
|
||||
can be passed on the commandline for debugging.
|
||||
'''
|
||||
"""
|
||||
|
||||
|
||||
class Builder(object):
|
||||
@@ -46,7 +46,7 @@ class Builder(object):
|
||||
"""Build deb for a single distribution"""
|
||||
|
||||
if self._failed:
|
||||
print("not building %s due to earlier failure" % (dist, ))
|
||||
print("not building %s due to earlier failure" % (dist,))
|
||||
raise Exception("failed")
|
||||
|
||||
try:
|
||||
@@ -68,48 +68,65 @@ class Builder(object):
|
||||
# we tend to get source packages which are full of debs. (We could hack
|
||||
# around that with more magic in the build_debian.sh script, but that
|
||||
# doesn't solve the problem for natively-run dpkg-buildpakage).
|
||||
debsdir = os.path.join(projdir, '../debs')
|
||||
debsdir = os.path.join(projdir, "../debs")
|
||||
os.makedirs(debsdir, exist_ok=True)
|
||||
|
||||
if self.redirect_stdout:
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag, ))
|
||||
logfile = os.path.join(debsdir, "%s.buildlog" % (tag,))
|
||||
print("building %s: directing output to %s" % (dist, logfile))
|
||||
stdout = open(logfile, "w")
|
||||
else:
|
||||
stdout = None
|
||||
|
||||
# first build a docker image for the build environment
|
||||
subprocess.check_call([
|
||||
"docker", "build",
|
||||
"--tag", "dh-venv-builder:" + tag,
|
||||
"--build-arg", "distro=" + dist,
|
||||
"-f", "docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
subprocess.check_call(
|
||||
[
|
||||
"docker",
|
||||
"build",
|
||||
"--tag",
|
||||
"dh-venv-builder:" + tag,
|
||||
"--build-arg",
|
||||
"distro=" + dist,
|
||||
"-f",
|
||||
"docker/Dockerfile-dhvirtualenv",
|
||||
"docker",
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
container_name = "synapse_build_" + tag
|
||||
with self._lock:
|
||||
self.active_containers.add(container_name)
|
||||
|
||||
# then run the build itself
|
||||
subprocess.check_call([
|
||||
"docker", "run",
|
||||
"--rm",
|
||||
"--name", container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e", "TARGET_USERID=%i" % (os.getuid(), ),
|
||||
"-e", "TARGET_GROUPID=%i" % (os.getgid(), ),
|
||||
"-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
], stdout=stdout, stderr=subprocess.STDOUT)
|
||||
subprocess.check_call(
|
||||
[
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"--name",
|
||||
container_name,
|
||||
"--volume=" + projdir + ":/synapse/source:ro",
|
||||
"--volume=" + debsdir + ":/debs",
|
||||
"-e",
|
||||
"TARGET_USERID=%i" % (os.getuid(),),
|
||||
"-e",
|
||||
"TARGET_GROUPID=%i" % (os.getgid(),),
|
||||
"-e",
|
||||
"DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""),
|
||||
"dh-venv-builder:" + tag,
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self.active_containers.remove(container_name)
|
||||
|
||||
if stdout is not None:
|
||||
stdout.close()
|
||||
print("Completed build of %s" % (dist, ))
|
||||
print("Completed build of %s" % (dist,))
|
||||
|
||||
def kill_containers(self):
|
||||
with self._lock:
|
||||
@@ -117,9 +134,14 @@ class Builder(object):
|
||||
|
||||
for c in active:
|
||||
print("killing container %s" % (c,))
|
||||
subprocess.run([
|
||||
"docker", "kill", c,
|
||||
], stdout=subprocess.DEVNULL)
|
||||
subprocess.run(
|
||||
[
|
||||
"docker",
|
||||
"kill",
|
||||
c,
|
||||
],
|
||||
stdout=subprocess.DEVNULL,
|
||||
)
|
||||
with self._lock:
|
||||
self.active_containers.remove(c)
|
||||
|
||||
@@ -130,31 +152,38 @@ def run_builds(dists, jobs=1, skip_tests=False):
|
||||
def sig(signum, _frame):
|
||||
print("Caught SIGINT")
|
||||
builder.kill_containers()
|
||||
|
||||
signal.signal(signal.SIGINT, sig)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=jobs) as e:
|
||||
res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists)
|
||||
|
||||
# make sure we consume the iterable so that exceptions are raised.
|
||||
for r in res:
|
||||
for _ in res:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description=DESC,
|
||||
)
|
||||
parser.add_argument(
|
||||
'-j', '--jobs', type=int, default=1,
|
||||
help='specify the number of builds to run in parallel',
|
||||
"-j",
|
||||
"--jobs",
|
||||
type=int,
|
||||
default=1,
|
||||
help="specify the number of builds to run in parallel",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--no-check', action='store_true',
|
||||
help='skip running tests after building',
|
||||
"--no-check",
|
||||
action="store_true",
|
||||
help="skip running tests after building",
|
||||
)
|
||||
parser.add_argument(
|
||||
'dist', nargs='*', default=DISTS,
|
||||
help='a list of distributions to build for. Default: %(default)s',
|
||||
"dist",
|
||||
nargs="*",
|
||||
default=DISTS,
|
||||
help="a list of distributions to build for. Default: %(default)s",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
|
||||
@@ -10,6 +10,9 @@
|
||||
# checkout by setting the COMPLEMENT_DIR environment variable to the
|
||||
# filepath of a local Complement checkout.
|
||||
#
|
||||
# By default Synapse is run in monolith mode. This can be overridden by
|
||||
# setting the WORKERS environment variable.
|
||||
#
|
||||
# A regular expression of test method names can be supplied as the first
|
||||
# argument to the script. Complement will then only run those tests. If
|
||||
# no regex is supplied, all tests are run. For example;
|
||||
@@ -32,10 +35,26 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then
|
||||
echo "Checkout available at 'complement-master'"
|
||||
fi
|
||||
|
||||
# If we're using workers, modify the docker files slightly.
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
BASE_IMAGE=matrixdotorg/synapse-workers
|
||||
BASE_DOCKERFILE=docker/Dockerfile-workers
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse-workers
|
||||
COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile
|
||||
# And provide some more configuration to complement.
|
||||
export COMPLEMENT_CA=true
|
||||
export COMPLEMENT_VERSION_CHECK_ITERATIONS=500
|
||||
else
|
||||
BASE_IMAGE=matrixdotorg/synapse
|
||||
BASE_DOCKERFILE=docker/Dockerfile
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Synapse.Dockerfile
|
||||
fi
|
||||
|
||||
# Build the base Synapse image from the local checkout
|
||||
docker build -t matrixdotorg/synapse -f docker/Dockerfile .
|
||||
docker build -t $BASE_IMAGE -f "$BASE_DOCKERFILE" .
|
||||
# Build the Synapse monolith image from Complement, based on the above image we just built
|
||||
docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles"
|
||||
docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles"
|
||||
|
||||
cd "$COMPLEMENT_DIR"
|
||||
|
||||
@@ -46,4 +65,4 @@ if [[ -n "$1" ]]; then
|
||||
fi
|
||||
|
||||
# Run the tests!
|
||||
COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
import hashlib
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
import psycopg2
|
||||
import yaml
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import read_signing_keys
|
||||
from signedjson.sign import sign_json
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
db_binary_type = memoryview
|
||||
|
||||
|
||||
def select_v1_keys(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, verify_key in rows:
|
||||
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
||||
return results
|
||||
|
||||
|
||||
def select_v1_certs(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, tls_certificate in rows:
|
||||
results[server_name] = tls_certificate
|
||||
return results
|
||||
|
||||
|
||||
def select_v2_json(connection):
|
||||
cursor = connection.cursor()
|
||||
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
||||
rows = cursor.fetchall()
|
||||
cursor.close()
|
||||
results = {}
|
||||
for server_name, key_id, key_json in rows:
|
||||
results.setdefault(server_name, {})[key_id] = json.loads(
|
||||
str(key_json).decode("utf-8")
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||
return {
|
||||
"old_verify_keys": {},
|
||||
"server_name": server_name,
|
||||
"verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
|
||||
"valid_until_ts": valid_until,
|
||||
"tls_fingerprints": [fingerprint(certificate)],
|
||||
}
|
||||
|
||||
|
||||
def fingerprint(certificate):
|
||||
finger = hashlib.sha256(certificate)
|
||||
return {"sha256": encode_base64(finger.digest())}
|
||||
|
||||
|
||||
def rows_v2(server, json):
|
||||
valid_until = json["valid_until_ts"]
|
||||
key_json = encode_canonical_json(json)
|
||||
for key_id in json["verify_keys"]:
|
||||
yield (server, key_id, "-", valid_until, valid_until, db_binary_type(key_json))
|
||||
|
||||
|
||||
def main():
|
||||
config = yaml.safe_load(open(sys.argv[1]))
|
||||
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
||||
|
||||
server_name = config["server_name"]
|
||||
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
||||
|
||||
database = config["database"]
|
||||
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
||||
args = database["args"]
|
||||
args.pop("cp_max")
|
||||
args.pop("cp_min")
|
||||
connection = psycopg2.connect(**args)
|
||||
keys = select_v1_keys(connection)
|
||||
certificates = select_v1_certs(connection)
|
||||
json = select_v2_json(connection)
|
||||
|
||||
result = {}
|
||||
for server in keys:
|
||||
if server not in json:
|
||||
v2_json = convert_v1_to_v2(
|
||||
server, valid_until, keys[server], certificates[server]
|
||||
)
|
||||
v2_json = sign_json(v2_json, server_name, signing_key)
|
||||
result[server] = v2_json
|
||||
|
||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||
|
||||
rows = [row for server, json in result.items() for row in rows_v2(server, json)]
|
||||
|
||||
cursor = connection.cursor()
|
||||
cursor.executemany(
|
||||
"INSERT INTO server_keys_json ("
|
||||
" server_name, key_id, from_server,"
|
||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||
rows,
|
||||
)
|
||||
connection.commit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -80,8 +80,22 @@ else
|
||||
# then lint everything!
|
||||
if [[ -z ${files+x} ]]; then
|
||||
# Lint all source code files and directories
|
||||
# Note: this list aims the mirror the one in tox.ini
|
||||
files=("synapse" "docker" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite")
|
||||
# Note: this list aims to mirror the one in tox.ini
|
||||
files=(
|
||||
"synapse" "docker" "tests"
|
||||
# annoyingly, black doesn't find these so we have to list them
|
||||
"scripts/export_signing_key"
|
||||
"scripts/generate_config"
|
||||
"scripts/generate_log_config"
|
||||
"scripts/hash_password"
|
||||
"scripts/register_new_matrix_user"
|
||||
"scripts/synapse_port_db"
|
||||
"scripts-dev"
|
||||
"scripts-dev/build_debian_packages"
|
||||
"scripts-dev/sign_json"
|
||||
"scripts-dev/update_database"
|
||||
"contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ def run():
|
||||
click.get_current_context().abort()
|
||||
|
||||
# Switch to the release branch.
|
||||
release_branch_name = f"release-v{base_version}"
|
||||
release_branch_name = f"release-v{current_version.major}.{current_version.minor}"
|
||||
release_branch = find_ref(repo, release_branch_name)
|
||||
if release_branch:
|
||||
if release_branch.is_remote():
|
||||
|
||||
@@ -30,7 +30,11 @@ def exit(status: int = 0, message: Optional[str] = None):
|
||||
def format_plain(public_key: nacl.signing.VerifyKey):
|
||||
print(
|
||||
"%s:%s %s"
|
||||
% (public_key.alg, public_key.version, encode_verify_key_base64(public_key),)
|
||||
% (
|
||||
public_key.alg,
|
||||
public_key.version,
|
||||
encode_verify_key_base64(public_key),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -50,7 +54,10 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read",
|
||||
"key_file",
|
||||
nargs="+",
|
||||
type=argparse.FileType("r"),
|
||||
help="The key file to read",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -63,7 +70,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--expiry-ts",
|
||||
type=int,
|
||||
default=int(time.time() * 1000) + 6*3600000,
|
||||
default=int(time.time() * 1000) + 6 * 3600000,
|
||||
help=(
|
||||
"The expiry time to use for -x, in milliseconds since 1970. The default "
|
||||
"is (now+6h)."
|
||||
|
||||
@@ -11,23 +11,22 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"--config-dir",
|
||||
default="CONFDIR",
|
||||
|
||||
help="The path where the config files are kept. Used to create filenames for "
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
"things like the log config and the signing key. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--data-dir",
|
||||
default="DATADIR",
|
||||
help="The path where the data files are kept. Used to create filenames for "
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
"things like the database and media store. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--server-name",
|
||||
default="SERVERNAME",
|
||||
help="The server name. Used to initialise the server_name config param, but also "
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
"used in the names of some of the config files. Default: %(default)s",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
@@ -41,21 +40,22 @@ if __name__ == "__main__":
|
||||
"--generate-secrets",
|
||||
action="store_true",
|
||||
help="Enable generation of new secrets for things like the macaroon_secret_key."
|
||||
"By default, these parameters will be left unset."
|
||||
"By default, these parameters will be left unset.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"-o", "--output-file",
|
||||
type=argparse.FileType('w'),
|
||||
"-o",
|
||||
"--output-file",
|
||||
type=argparse.FileType("w"),
|
||||
default=sys.stdout,
|
||||
help="File to write the configuration to. Default: stdout",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--header-file",
|
||||
type=argparse.FileType('r'),
|
||||
type=argparse.FileType("r"),
|
||||
help="File from which to read a header, which will be printed before the "
|
||||
"generated config.",
|
||||
"generated config.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -41,7 +41,7 @@ if __name__ == "__main__":
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
type=argparse.FileType('r'),
|
||||
type=argparse.FileType("r"),
|
||||
help=(
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
@@ -72,8 +72,8 @@ if __name__ == "__main__":
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
hashed = bcrypt.hashpw(
|
||||
pw.encode('utf8') + password_pepper.encode("utf8"),
|
||||
pw.encode("utf8") + password_pepper.encode("utf8"),
|
||||
bcrypt.gensalt(bcrypt_rounds),
|
||||
).decode('ascii')
|
||||
).decode("ascii")
|
||||
|
||||
print(hashed)
|
||||
|
||||
@@ -294,8 +294,7 @@ class Porter(object):
|
||||
return table, already_ported, total_to_port, forward_chunk, backward_chunk
|
||||
|
||||
async def get_table_constraints(self) -> Dict[str, Set[str]]:
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on.
|
||||
"""
|
||||
"""Returns a map of tables that have foreign key constraints to tables they depend on."""
|
||||
|
||||
def _get_constraints(txn):
|
||||
# We can pull the information about foreign key constraints out from
|
||||
@@ -504,7 +503,9 @@ class Porter(object):
|
||||
return
|
||||
|
||||
def build_db_store(
|
||||
self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False,
|
||||
self,
|
||||
db_config: DatabaseConnectionConfig,
|
||||
allow_outdated_version: bool = False,
|
||||
):
|
||||
"""Builds and returns a database store using the provided configuration.
|
||||
|
||||
@@ -740,7 +741,7 @@ class Porter(object):
|
||||
return col
|
||||
|
||||
outrows = []
|
||||
for i, row in enumerate(rows):
|
||||
for row in rows:
|
||||
try:
|
||||
outrows.append(
|
||||
tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
|
||||
@@ -890,8 +891,7 @@ class Porter(object):
|
||||
await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r)
|
||||
|
||||
async def _setup_events_stream_seqs(self) -> None:
|
||||
"""Set the event stream sequences to the correct values.
|
||||
"""
|
||||
"""Set the event stream sequences to the correct values."""
|
||||
|
||||
# We get called before we've ported the events table, so we need to
|
||||
# fetch the current positions from the SQLite store.
|
||||
@@ -920,12 +920,14 @@ class Porter(object):
|
||||
)
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos,
|
||||
"_setup_events_stream_seqs",
|
||||
_setup_events_stream_seqs_set_pos,
|
||||
)
|
||||
|
||||
async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None:
|
||||
"""Set a sequence to the correct value.
|
||||
"""
|
||||
async def _setup_sequence(
|
||||
self, sequence_name: str, stream_id_tables: Iterable[str]
|
||||
) -> None:
|
||||
"""Set a sequence to the correct value."""
|
||||
current_stream_ids = []
|
||||
for stream_id_table in stream_id_tables:
|
||||
max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
@@ -939,20 +941,25 @@ class Porter(object):
|
||||
next_id = max(current_stream_ids) + 1
|
||||
|
||||
def r(txn):
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, )
|
||||
txn.execute(sql + " %s", (next_id, ))
|
||||
sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,)
|
||||
txn.execute(sql + " %s", (next_id,))
|
||||
|
||||
await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r)
|
||||
await self.postgres_store.db_pool.runInteraction(
|
||||
"_setup_%s" % (sequence_name,), r
|
||||
)
|
||||
|
||||
async def _setup_auth_chain_sequence(self) -> None:
|
||||
curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol(
|
||||
table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True
|
||||
table="event_auth_chains",
|
||||
keyvalues={},
|
||||
retcol="MAX(chain_id)",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
def r(txn):
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE event_auth_chain_id RESTART WITH %s",
|
||||
(curr_chain_id,),
|
||||
(curr_chain_id + 1,),
|
||||
)
|
||||
|
||||
if curr_chain_id is not None:
|
||||
@@ -968,8 +975,7 @@ class Porter(object):
|
||||
|
||||
|
||||
class Progress(object):
|
||||
"""Used to report progress of the port
|
||||
"""
|
||||
"""Used to report progress of the port"""
|
||||
|
||||
def __init__(self):
|
||||
self.tables = {}
|
||||
@@ -994,8 +1000,7 @@ class Progress(object):
|
||||
|
||||
|
||||
class CursesProgress(Progress):
|
||||
"""Reports progress to a curses window
|
||||
"""
|
||||
"""Reports progress to a curses window"""
|
||||
|
||||
def __init__(self, stdscr):
|
||||
self.stdscr = stdscr
|
||||
@@ -1020,7 +1025,7 @@ class CursesProgress(Progress):
|
||||
|
||||
self.total_processed = 0
|
||||
self.total_remaining = 0
|
||||
for table, data in self.tables.items():
|
||||
for data in self.tables.values():
|
||||
self.total_processed += data["num_done"] - data["start"]
|
||||
self.total_remaining += data["total"] - data["num_done"]
|
||||
|
||||
@@ -1111,8 +1116,7 @@ class CursesProgress(Progress):
|
||||
|
||||
|
||||
class TerminalProgress(Progress):
|
||||
"""Just prints progress to the terminal
|
||||
"""
|
||||
"""Just prints progress to the terminal"""
|
||||
|
||||
def update(self, table, num_done):
|
||||
super(TerminalProgress, self).update(table, num_done)
|
||||
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.34.0rc1"
|
||||
__version__ = "1.36.0"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -87,6 +87,7 @@ class Auth:
|
||||
)
|
||||
self._track_appservice_user_ips = hs.config.track_appservice_user_ips
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
@@ -205,6 +206,8 @@ class Auth:
|
||||
requester = create_requester(user_id, app_service=app_service)
|
||||
|
||||
request.requester = user_id
|
||||
if user_id in self._force_tracing_for_users:
|
||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
opentracing.set_tag("authenticated_entity", user_id)
|
||||
opentracing.set_tag("user_id", user_id)
|
||||
opentracing.set_tag("appservice_id", app_service.id)
|
||||
@@ -256,6 +259,8 @@ class Auth:
|
||||
)
|
||||
|
||||
request.requester = requester
|
||||
if user_info.token_owner in self._force_tracing_for_users:
|
||||
opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1)
|
||||
opentracing.set_tag("authenticated_entity", user_info.token_owner)
|
||||
opentracing.set_tag("user_id", user_info.user_id)
|
||||
if device_id:
|
||||
|
||||
@@ -181,6 +181,6 @@ KNOWN_ROOM_VERSIONS = {
|
||||
RoomVersions.V5,
|
||||
RoomVersions.V6,
|
||||
RoomVersions.MSC2176,
|
||||
RoomVersions.MSC3083,
|
||||
)
|
||||
# Note that we do not include MSC3083 here unless it is enabled in the config.
|
||||
} # type: Dict[str, RoomVersion]
|
||||
|
||||
@@ -261,13 +261,10 @@ def refresh_certificate(hs):
|
||||
Refresh the TLS certificates that Synapse is using by re-reading them from
|
||||
disk and updating the TLS context factories to use them.
|
||||
"""
|
||||
|
||||
if not hs.config.has_tls_listener():
|
||||
# attempt to reload the certs for the good of the tls_fingerprints
|
||||
hs.config.read_certificate_from_disk(require_cert_and_key=False)
|
||||
return
|
||||
|
||||
hs.config.read_certificate_from_disk(require_cert_and_key=True)
|
||||
hs.config.read_certificate_from_disk()
|
||||
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
|
||||
|
||||
if hs._listening_services:
|
||||
|
||||
@@ -61,7 +61,6 @@ from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||
from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client.v1 import events, login, presence, room
|
||||
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
|
||||
@@ -110,7 +109,7 @@ from synapse.storage.databases.main.monthly_active_users import (
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.search import SearchWorkerStore
|
||||
from synapse.storage.databases.main.search import SearchStore
|
||||
from synapse.storage.databases.main.stats import StatsStore
|
||||
from synapse.storage.databases.main.transactions import TransactionWorkerStore
|
||||
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
|
||||
@@ -237,14 +236,13 @@ class GenericWorkerSlavedStore(
|
||||
DirectoryStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedTransactionStore,
|
||||
SlavedProfileStore,
|
||||
SlavedClientIpStore,
|
||||
SlavedFilteringStore,
|
||||
MonthlyActiveUsersWorkerStore,
|
||||
MediaRepositoryStore,
|
||||
ServerMetricsStore,
|
||||
SearchWorkerStore,
|
||||
SearchStore,
|
||||
TransactionWorkerStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||
from synapse.config._base import Config
|
||||
from synapse.types import JsonDict
|
||||
|
||||
@@ -28,10 +27,5 @@ class ExperimentalConfig(Config):
|
||||
# MSC2858 (multiple SSO identity providers)
|
||||
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
|
||||
|
||||
# Spaces (MSC1772, MSC2946, MSC3083, etc)
|
||||
self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool
|
||||
if self.spaces_enabled:
|
||||
KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083
|
||||
|
||||
# MSC3026 (busy presence state)
|
||||
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
|
||||
|
||||
@@ -57,7 +57,6 @@ class HomeServerConfig(RootConfig):
|
||||
|
||||
config_classes = [
|
||||
ServerConfig,
|
||||
ExperimentalConfig,
|
||||
TlsConfig,
|
||||
FederationConfig,
|
||||
CacheConfig,
|
||||
@@ -94,4 +93,5 @@ class HomeServerConfig(RootConfig):
|
||||
TracerConfig,
|
||||
WorkerConfig,
|
||||
RedisConfig,
|
||||
ExperimentalConfig,
|
||||
]
|
||||
|
||||
@@ -349,4 +349,4 @@ class RegistrationConfig(Config):
|
||||
|
||||
def read_arguments(self, args):
|
||||
if args.enable_registration is not None:
|
||||
self.enable_registration = bool(strtobool(str(args.enable_registration)))
|
||||
self.enable_registration = strtobool(str(args.enable_registration))
|
||||
|
||||
@@ -164,7 +164,13 @@ class SAML2Config(Config):
|
||||
config_path = saml2_config.get("config_path", None)
|
||||
if config_path is not None:
|
||||
mod = load_python_module(config_path)
|
||||
_dict_merge(merge_dict=mod.CONFIG, into_dict=saml2_config_dict)
|
||||
config = getattr(mod, "CONFIG", None)
|
||||
if config is None:
|
||||
raise ConfigError(
|
||||
"Config path specified by saml2_config.config_path does not "
|
||||
"have a CONFIG property."
|
||||
)
|
||||
_dict_merge(merge_dict=config, into_dict=saml2_config_dict)
|
||||
|
||||
import saml2.config
|
||||
|
||||
|
||||
@@ -16,11 +16,8 @@ import logging
|
||||
import os
|
||||
import warnings
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
from typing import List, Optional, Pattern
|
||||
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import Certificate, trustRootFromCertificates
|
||||
|
||||
@@ -83,13 +80,6 @@ class TlsConfig(Config):
|
||||
"configured."
|
||||
)
|
||||
|
||||
self._original_tls_fingerprints = config.get("tls_fingerprints", [])
|
||||
|
||||
if self._original_tls_fingerprints is None:
|
||||
self._original_tls_fingerprints = []
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
# Whether to verify certificates on outbound federation traffic
|
||||
self.federation_verify_certificates = config.get(
|
||||
"federation_verify_certificates", True
|
||||
@@ -225,41 +215,12 @@ class TlsConfig(Config):
|
||||
days_remaining = (expires_on - now).days
|
||||
return days_remaining
|
||||
|
||||
def read_certificate_from_disk(self, require_cert_and_key: bool):
|
||||
def read_certificate_from_disk(self):
|
||||
"""
|
||||
Read the certificates and private key from disk.
|
||||
|
||||
Args:
|
||||
require_cert_and_key: set to True to throw an error if the certificate
|
||||
and key file are not given
|
||||
"""
|
||||
if require_cert_and_key:
|
||||
self.tls_private_key = self.read_tls_private_key()
|
||||
self.tls_certificate = self.read_tls_certificate()
|
||||
elif self.tls_certificate_file:
|
||||
# we only need the certificate for the tls_fingerprints. Reload it if we
|
||||
# can, but it's not a fatal error if we can't.
|
||||
try:
|
||||
self.tls_certificate = self.read_tls_certificate()
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Unable to read TLS certificate (%s). Ignoring as no "
|
||||
"tls listeners enabled.",
|
||||
e,
|
||||
)
|
||||
|
||||
self.tls_fingerprints = list(self._original_tls_fingerprints)
|
||||
|
||||
if self.tls_certificate:
|
||||
# Check that our own certificate is included in the list of fingerprints
|
||||
# and include it if it is not.
|
||||
x509_certificate_bytes = crypto.dump_certificate(
|
||||
crypto.FILETYPE_ASN1, self.tls_certificate
|
||||
)
|
||||
sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest())
|
||||
sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints}
|
||||
if sha256_fingerprint not in sha256_fingerprints:
|
||||
self.tls_fingerprints.append({"sha256": sha256_fingerprint})
|
||||
self.tls_private_key = self.read_tls_private_key()
|
||||
self.tls_certificate = self.read_tls_certificate()
|
||||
|
||||
def generate_config_section(
|
||||
self,
|
||||
@@ -443,33 +404,6 @@ class TlsConfig(Config):
|
||||
# If unspecified, we will use CONFDIR/client.key.
|
||||
#
|
||||
account_key_file: %(default_acme_account_file)s
|
||||
|
||||
# List of allowed TLS fingerprints for this server to publish along
|
||||
# with the signing keys for this server. Other matrix servers that
|
||||
# make HTTPS requests to this server will check that the TLS
|
||||
# certificates returned by this server match one of the fingerprints.
|
||||
#
|
||||
# Synapse automatically adds the fingerprint of its own certificate
|
||||
# to the list. So if federation traffic is handled directly by synapse
|
||||
# then no modification to the list is required.
|
||||
#
|
||||
# If synapse is run behind a load balancer that handles the TLS then it
|
||||
# will be necessary to add the fingerprints of the certificates used by
|
||||
# the loadbalancers to this list if they are different to the one
|
||||
# synapse is using.
|
||||
#
|
||||
# Homeservers are permitted to cache the list of TLS fingerprints
|
||||
# returned in the key responses up to the "valid_until_ts" returned in
|
||||
# key. It may be necessary to publish the fingerprints of a new
|
||||
# certificate and wait until the "valid_until_ts" of the previous key
|
||||
# responses have passed before deploying it.
|
||||
#
|
||||
# You can calculate a fingerprint from a given TLS listener via:
|
||||
# openssl s_client -connect $host:$port < /dev/null 2> /dev/null |
|
||||
# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '='
|
||||
# or by checking matrix.org/federationtester/api/report?server_name=$host
|
||||
#
|
||||
#tls_fingerprints: [{"sha256": "<base64_encoded_sha256_fingerprint>"}]
|
||||
"""
|
||||
# Lowercase the string representation of boolean values
|
||||
% {
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Set
|
||||
|
||||
from synapse.python_dependencies import DependencyException, check_requirements
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
@@ -32,6 +34,8 @@ class TracerConfig(Config):
|
||||
{"sampler": {"type": "const", "param": 1}, "logging": False},
|
||||
)
|
||||
|
||||
self.force_tracing_for_users: Set[str] = set()
|
||||
|
||||
if not self.opentracer_enabled:
|
||||
return
|
||||
|
||||
@@ -48,6 +52,19 @@ class TracerConfig(Config):
|
||||
if not isinstance(self.opentracer_whitelist, list):
|
||||
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
||||
|
||||
force_tracing_for_users = opentracing_config.get("force_tracing_for_users", [])
|
||||
if not isinstance(force_tracing_for_users, list):
|
||||
raise ConfigError(
|
||||
"Expected a list", ("opentracing", "force_tracing_for_users")
|
||||
)
|
||||
for i, u in enumerate(force_tracing_for_users):
|
||||
if not isinstance(u, str):
|
||||
raise ConfigError(
|
||||
"Expected a string",
|
||||
("opentracing", "force_tracing_for_users", f"index {i}"),
|
||||
)
|
||||
self.force_tracing_for_users.add(u)
|
||||
|
||||
def generate_config_section(cls, **kwargs):
|
||||
return """\
|
||||
## Opentracing ##
|
||||
@@ -64,7 +81,8 @@ class TracerConfig(Config):
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See docs/opentracing.rst
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
#
|
||||
@@ -73,19 +91,26 @@ class TracerConfig(Config):
|
||||
#homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
||||
# A list of the matrix IDs of users whose requests will always be traced,
|
||||
# even if the tracing system would otherwise drop the traces due to
|
||||
# probabilistic sampling.
|
||||
#
|
||||
# By default, the list is empty.
|
||||
#
|
||||
#force_tracing_for_users:
|
||||
# - "@user1:server_name"
|
||||
# - "@user2:server_name"
|
||||
|
||||
# Jaeger can be configured to sample traces at different rates.
|
||||
# All configuration options provided by Jaeger can be set here.
|
||||
# Jaeger's configuration mostly related to trace sampling which
|
||||
# Jaeger's configuration is mostly related to trace sampling which
|
||||
# is documented here:
|
||||
# https://www.jaegertracing.io/docs/1.13/sampling/.
|
||||
# https://www.jaegertracing.io/docs/latest/sampling/.
|
||||
#
|
||||
#jaeger_config:
|
||||
# sampler:
|
||||
# type: const
|
||||
# param: 1
|
||||
|
||||
# Logging whether spans were started and reported
|
||||
#
|
||||
# logging:
|
||||
# false
|
||||
"""
|
||||
|
||||
@@ -16,8 +16,7 @@
|
||||
import abc
|
||||
import logging
|
||||
import urllib
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
|
||||
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from signedjson.key import (
|
||||
@@ -42,17 +41,14 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.config.key import TrustedKeyServer
|
||||
from synapse.logging.context import (
|
||||
PreserveLoggingContext,
|
||||
make_deferred_yieldable,
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import prune_event_dict
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.storage.keys import FetchKeyResult
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.batching_queue import BatchingQueue
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -69,40 +65,98 @@ class VerifyJsonRequest:
|
||||
Attributes:
|
||||
server_name: The name of the server to verify against.
|
||||
|
||||
json_object: The JSON object to verify.
|
||||
get_json_object: A callback to fetch the JSON object to verify.
|
||||
A callback is used to allow deferring the creation of the JSON
|
||||
object to verify until needed, e.g. for events we can defer
|
||||
creating the redacted copy. This reduces the memory usage when
|
||||
there are large numbers of in flight requests.
|
||||
|
||||
minimum_valid_until_ts: time at which we require the signing key to
|
||||
be valid. (0 implies we don't care)
|
||||
|
||||
request_name: The name of the request.
|
||||
|
||||
key_ids: The set of key_ids to that could be used to verify the JSON object
|
||||
|
||||
key_ready (Deferred[str, str, nacl.signing.VerifyKey]):
|
||||
A deferred (server_name, key_id, verify_key) tuple that resolves when
|
||||
a verify key has been fetched. The deferreds' callbacks are run with no
|
||||
logcontext.
|
||||
|
||||
If we are unable to find a key which satisfies the request, the deferred
|
||||
errbacks with an M_UNAUTHORIZED SynapseError.
|
||||
"""
|
||||
|
||||
server_name = attr.ib(type=str)
|
||||
json_object = attr.ib(type=JsonDict)
|
||||
get_json_object = attr.ib(type=Callable[[], JsonDict])
|
||||
minimum_valid_until_ts = attr.ib(type=int)
|
||||
request_name = attr.ib(type=str)
|
||||
key_ids = attr.ib(init=False, type=List[str])
|
||||
key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred)
|
||||
key_ids = attr.ib(type=List[str])
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
self.key_ids = signature_ids(self.json_object, self.server_name)
|
||||
@staticmethod
|
||||
def from_json_object(
|
||||
server_name: str,
|
||||
json_object: JsonDict,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
"""Create a VerifyJsonRequest to verify all signatures on a signed JSON
|
||||
object for the given server.
|
||||
"""
|
||||
key_ids = signature_ids(json_object, server_name)
|
||||
return VerifyJsonRequest(
|
||||
server_name,
|
||||
lambda: json_object,
|
||||
minimum_valid_until_ms,
|
||||
key_ids=key_ids,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_event(
|
||||
server_name: str,
|
||||
event: EventBase,
|
||||
minimum_valid_until_ms: int,
|
||||
):
|
||||
"""Create a VerifyJsonRequest to verify all signatures on an event
|
||||
object for the given server.
|
||||
"""
|
||||
key_ids = list(event.signatures.get(server_name, []))
|
||||
return VerifyJsonRequest(
|
||||
server_name,
|
||||
# We defer creating the redacted json object, as it uses a lot more
|
||||
# memory than the Event object itself.
|
||||
lambda: prune_event_dict(event.room_version, event.get_pdu_json()),
|
||||
minimum_valid_until_ms,
|
||||
key_ids=key_ids,
|
||||
)
|
||||
|
||||
def to_fetch_key_request(self) -> "_FetchKeyRequest":
|
||||
"""Create a key fetch request for all keys needed to satisfy the
|
||||
verification request.
|
||||
"""
|
||||
return _FetchKeyRequest(
|
||||
server_name=self.server_name,
|
||||
minimum_valid_until_ts=self.minimum_valid_until_ts,
|
||||
key_ids=self.key_ids,
|
||||
)
|
||||
|
||||
|
||||
class KeyLookupError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _FetchKeyRequest:
|
||||
"""A request for keys for a given server.
|
||||
|
||||
We will continue to try and fetch until we have all the keys listed under
|
||||
`key_ids` (with an appropriate `valid_until_ts` property) or we run out of
|
||||
places to fetch keys from.
|
||||
|
||||
Attributes:
|
||||
server_name: The name of the server that owns the keys.
|
||||
minimum_valid_until_ts: The timestamp which the keys must be valid until.
|
||||
key_ids: The IDs of the keys to attempt to fetch
|
||||
"""
|
||||
|
||||
server_name = attr.ib(type=str)
|
||||
minimum_valid_until_ts = attr.ib(type=int)
|
||||
key_ids = attr.ib(type=List[str])
|
||||
|
||||
|
||||
class Keyring:
|
||||
"""Handles verifying signed JSON objects and fetching the keys needed to do
|
||||
so.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None
|
||||
):
|
||||
@@ -116,22 +170,22 @@ class Keyring:
|
||||
)
|
||||
self._key_fetchers = key_fetchers
|
||||
|
||||
# map from server name to Deferred. Has an entry for each server with
|
||||
# an ongoing key download; the Deferred completes once the download
|
||||
# completes.
|
||||
#
|
||||
# These are regular, logcontext-agnostic Deferreds.
|
||||
self.key_downloads = {} # type: Dict[str, defer.Deferred]
|
||||
self._server_queue = BatchingQueue(
|
||||
"keyring_server",
|
||||
clock=hs.get_clock(),
|
||||
process_batch_callback=self._inner_fetch_key_requests,
|
||||
) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]]
|
||||
|
||||
def verify_json_for_server(
|
||||
async def verify_json_for_server(
|
||||
self,
|
||||
server_name: str,
|
||||
json_object: JsonDict,
|
||||
validity_time: int,
|
||||
request_name: str,
|
||||
) -> defer.Deferred:
|
||||
) -> None:
|
||||
"""Verify that a JSON object has been signed by a given server
|
||||
|
||||
Completes if the the object was correctly signed, otherwise raises.
|
||||
|
||||
Args:
|
||||
server_name: name of the server which must have signed this object
|
||||
|
||||
@@ -139,356 +193,265 @@ class Keyring:
|
||||
|
||||
validity_time: timestamp at which we require the signing key to
|
||||
be valid. (0 implies we don't care)
|
||||
|
||||
request_name: an identifier for this json object (eg, an event id)
|
||||
for logging.
|
||||
|
||||
Returns:
|
||||
Deferred[None]: completes if the the object was correctly signed, otherwise
|
||||
errbacks with an error
|
||||
"""
|
||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
requests = (req,)
|
||||
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||
request = VerifyJsonRequest.from_json_object(
|
||||
server_name,
|
||||
json_object,
|
||||
validity_time,
|
||||
)
|
||||
return await self.process_request(request)
|
||||
|
||||
def verify_json_objects_for_server(
|
||||
self, server_and_json: Iterable[Tuple[str, dict, int, str]]
|
||||
self, server_and_json: Iterable[Tuple[str, dict, int]]
|
||||
) -> List[defer.Deferred]:
|
||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||
necessary.
|
||||
|
||||
Args:
|
||||
server_and_json:
|
||||
Iterable of (server_name, json_object, validity_time, request_name)
|
||||
Iterable of (server_name, json_object, validity_time)
|
||||
tuples.
|
||||
|
||||
validity_time is a timestamp at which the signing key must be
|
||||
valid.
|
||||
|
||||
request_name is an identifier for this json object (eg, an event id)
|
||||
for logging.
|
||||
|
||||
Returns:
|
||||
List<Deferred[None]>: for each input triplet, a deferred indicating success
|
||||
or failure to verify each json object's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
"""
|
||||
return self._verify_objects(
|
||||
VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
for server_name, json_object, validity_time, request_name in server_and_json
|
||||
return [
|
||||
run_in_background(
|
||||
self.process_request,
|
||||
VerifyJsonRequest.from_json_object(
|
||||
server_name,
|
||||
json_object,
|
||||
validity_time,
|
||||
),
|
||||
)
|
||||
for server_name, json_object, validity_time in server_and_json
|
||||
]
|
||||
|
||||
async def verify_event_for_server(
|
||||
self,
|
||||
server_name: str,
|
||||
event: EventBase,
|
||||
validity_time: int,
|
||||
) -> None:
|
||||
await self.process_request(
|
||||
VerifyJsonRequest.from_event(
|
||||
server_name,
|
||||
event,
|
||||
validity_time,
|
||||
)
|
||||
)
|
||||
|
||||
def _verify_objects(
|
||||
self, verify_requests: Iterable[VerifyJsonRequest]
|
||||
) -> List[defer.Deferred]:
|
||||
"""Does the work of verify_json_[objects_]for_server
|
||||
|
||||
|
||||
Args:
|
||||
verify_requests: Iterable of verification requests.
|
||||
|
||||
Returns:
|
||||
List<Deferred[None]>: for each input item, a deferred indicating success
|
||||
or failure to verify each json object's signature for the given
|
||||
server_name. The deferreds run their callbacks in the sentinel
|
||||
logcontext.
|
||||
async def process_request(self, verify_request: VerifyJsonRequest) -> None:
|
||||
"""Processes the `VerifyJsonRequest`. Raises if the object is not signed
|
||||
by the server, the signatures don't match or we failed to fetch the
|
||||
necessary keys.
|
||||
"""
|
||||
# a list of VerifyJsonRequests which are awaiting a key lookup
|
||||
key_lookups = []
|
||||
handle = preserve_fn(_handle_key_deferred)
|
||||
|
||||
def process(verify_request: VerifyJsonRequest) -> defer.Deferred:
|
||||
"""Process an entry in the request list
|
||||
if not verify_request.key_ids:
|
||||
raise SynapseError(
|
||||
400,
|
||||
f"Not signed by {verify_request.server_name}",
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
Adds a key request to key_lookups, and returns a deferred which
|
||||
will complete or fail (in the sentinel context) when verification completes.
|
||||
"""
|
||||
if not verify_request.key_ids:
|
||||
return defer.fail(
|
||||
SynapseError(
|
||||
400,
|
||||
"Not signed by %s" % (verify_request.server_name,),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
# Add the keys we need to verify to the queue for retrieval. We queue
|
||||
# up requests for the same server so we don't end up with many in flight
|
||||
# requests for the same keys.
|
||||
key_request = verify_request.to_fetch_key_request()
|
||||
found_keys_by_server = await self._server_queue.add_to_queue(
|
||||
key_request, key=verify_request.server_name
|
||||
)
|
||||
|
||||
# Since we batch up requests the returned set of keys may contain keys
|
||||
# from other servers, so we pull out only the ones we care about.s
|
||||
found_keys = found_keys_by_server.get(verify_request.server_name, {})
|
||||
|
||||
# Verify each signature we got valid keys for, raising if we can't
|
||||
# verify any of them.
|
||||
verified = False
|
||||
for key_id in verify_request.key_ids:
|
||||
key_result = found_keys.get(key_id)
|
||||
if not key_result:
|
||||
continue
|
||||
|
||||
if key_result.valid_until_ts < verify_request.minimum_valid_until_ts:
|
||||
continue
|
||||
|
||||
verify_key = key_result.verify_key
|
||||
json_object = verify_request.get_json_object()
|
||||
try:
|
||||
verify_signed_json(
|
||||
json_object,
|
||||
verify_request.server_name,
|
||||
verify_key,
|
||||
)
|
||||
verified = True
|
||||
except SignatureVerifyException as e:
|
||||
logger.debug(
|
||||
"Error verifying signature for %s:%s:%s with key %s: %s",
|
||||
verify_request.server_name,
|
||||
verify_key.alg,
|
||||
verify_key.version,
|
||||
encode_verify_key_base64(verify_key),
|
||||
str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s: %s"
|
||||
% (
|
||||
verify_request.server_name,
|
||||
verify_key.alg,
|
||||
verify_key.version,
|
||||
str(e),
|
||||
),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Verifying %s for %s with key_ids %s, min_validity %i",
|
||||
verify_request.request_name,
|
||||
if not verified:
|
||||
raise SynapseError(
|
||||
401,
|
||||
f"Failed to find any key to satisfy: {key_request}",
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
async def _inner_fetch_key_requests(
|
||||
self, requests: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""Processing function for the queue of `_FetchKeyRequest`."""
|
||||
|
||||
logger.debug("Starting fetch for %s", requests)
|
||||
|
||||
# First we need to deduplicate requests for the same key. We do this by
|
||||
# taking the *maximum* requested `minimum_valid_until_ts` for each pair
|
||||
# of server name/key ID.
|
||||
server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]]
|
||||
for request in requests:
|
||||
by_server = server_to_key_to_ts.setdefault(request.server_name, {})
|
||||
for key_id in request.key_ids:
|
||||
existing_ts = by_server.get(key_id, 0)
|
||||
by_server[key_id] = max(request.minimum_valid_until_ts, existing_ts)
|
||||
|
||||
deduped_requests = [
|
||||
_FetchKeyRequest(server_name, minimum_valid_ts, [key_id])
|
||||
for server_name, by_server in server_to_key_to_ts.items()
|
||||
for key_id, minimum_valid_ts in by_server.items()
|
||||
]
|
||||
|
||||
logger.debug("Deduplicated key requests to %s", deduped_requests)
|
||||
|
||||
# For each key we call `_inner_verify_request` which will handle
|
||||
# fetching each key. Note these shouldn't throw if we fail to contact
|
||||
# other servers etc.
|
||||
results_per_request = await yieldable_gather_results(
|
||||
self._inner_fetch_key_request,
|
||||
deduped_requests,
|
||||
)
|
||||
|
||||
# We now convert the returned list of results into a map from server
|
||||
# name to key ID to FetchKeyResult, to return.
|
||||
to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]]
|
||||
for (request, results) in zip(deduped_requests, results_per_request):
|
||||
to_return_by_server = to_return.setdefault(request.server_name, {})
|
||||
for key_id, key_result in results.items():
|
||||
existing = to_return_by_server.get(key_id)
|
||||
if not existing or existing.valid_until_ts < key_result.valid_until_ts:
|
||||
to_return_by_server[key_id] = key_result
|
||||
|
||||
return to_return
|
||||
|
||||
async def _inner_fetch_key_request(
|
||||
self, verify_request: _FetchKeyRequest
|
||||
) -> Dict[str, FetchKeyResult]:
|
||||
"""Attempt to fetch the given key by calling each key fetcher one by
|
||||
one.
|
||||
"""
|
||||
logger.debug("Starting fetch for %s", verify_request)
|
||||
|
||||
found_keys: Dict[str, FetchKeyResult] = {}
|
||||
missing_key_ids = set(verify_request.key_ids)
|
||||
|
||||
for fetcher in self._key_fetchers:
|
||||
if not missing_key_ids:
|
||||
break
|
||||
|
||||
logger.debug("Getting keys from %s for %s", fetcher, verify_request)
|
||||
keys = await fetcher.get_keys(
|
||||
verify_request.server_name,
|
||||
verify_request.key_ids,
|
||||
list(missing_key_ids),
|
||||
verify_request.minimum_valid_until_ts,
|
||||
)
|
||||
|
||||
# add the key request to the queue, but don't start it off yet.
|
||||
key_lookups.append(verify_request)
|
||||
|
||||
# now run _handle_key_deferred, which will wait for the key request
|
||||
# to complete and then do the verification.
|
||||
#
|
||||
# We want _handle_key_request to log to the right context, so we
|
||||
# wrap it with preserve_fn (aka run_in_background)
|
||||
return handle(verify_request)
|
||||
|
||||
results = [process(r) for r in verify_requests]
|
||||
|
||||
if key_lookups:
|
||||
run_in_background(self._start_key_lookups, key_lookups)
|
||||
|
||||
return results
|
||||
|
||||
async def _start_key_lookups(
|
||||
self, verify_requests: List[VerifyJsonRequest]
|
||||
) -> None:
|
||||
"""Sets off the key fetches for each verify request
|
||||
|
||||
Once each fetch completes, verify_request.key_ready will be resolved.
|
||||
|
||||
Args:
|
||||
verify_requests:
|
||||
"""
|
||||
|
||||
try:
|
||||
# map from server name to a set of outstanding request ids
|
||||
server_to_request_ids = {} # type: Dict[str, Set[int]]
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
# Wait for any previous lookups to complete before proceeding.
|
||||
await self.wait_for_previous_lookups(server_to_request_ids.keys())
|
||||
|
||||
# take out a lock on each of the servers by sticking a Deferred in
|
||||
# key_downloads
|
||||
for server_name in server_to_request_ids.keys():
|
||||
self.key_downloads[server_name] = defer.Deferred()
|
||||
logger.debug("Got key lookup lock on %s", server_name)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# drop the lock by resolving the deferred in key_downloads.
|
||||
def drop_server_lock(server_name):
|
||||
d = self.key_downloads.pop(server_name)
|
||||
d.callback(None)
|
||||
|
||||
def lookup_done(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
server_requests = server_to_request_ids[server_name]
|
||||
server_requests.remove(id(verify_request))
|
||||
|
||||
# if there are no more requests for this server, we can drop the lock.
|
||||
if not server_requests:
|
||||
logger.debug("Releasing key lookup lock on %s", server_name)
|
||||
drop_server_lock(server_name)
|
||||
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.key_ready.addBoth(lookup_done, verify_request)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
except Exception:
|
||||
logger.exception("Error starting key lookups")
|
||||
|
||||
async def wait_for_previous_lookups(self, server_names: Iterable[str]) -> None:
|
||||
"""Waits for any previous key lookups for the given servers to finish.
|
||||
|
||||
Args:
|
||||
server_names: list of servers which we want to look up
|
||||
|
||||
Returns:
|
||||
Resolves once all key lookups for the given servers have
|
||||
completed. Follows the synapse rules of logcontext preservation.
|
||||
"""
|
||||
loop_count = 1
|
||||
while True:
|
||||
wait_on = [
|
||||
(server_name, self.key_downloads[server_name])
|
||||
for server_name in server_names
|
||||
if server_name in self.key_downloads
|
||||
]
|
||||
if not wait_on:
|
||||
break
|
||||
logger.info(
|
||||
"Waiting for existing lookups for %s to complete [loop %i]",
|
||||
[w[0] for w in wait_on],
|
||||
loop_count,
|
||||
)
|
||||
with PreserveLoggingContext():
|
||||
await defer.DeferredList((w[1] for w in wait_on))
|
||||
|
||||
loop_count += 1
|
||||
|
||||
def _get_server_verify_keys(self, verify_requests: List[VerifyJsonRequest]) -> None:
|
||||
"""Tries to find at least one key for each verify request
|
||||
|
||||
For each verify_request, verify_request.key_ready is called back with
|
||||
params (server_name, key_id, VerifyKey) if a key is found, or errbacked
|
||||
with a SynapseError if none of the keys are found.
|
||||
|
||||
Args:
|
||||
verify_requests: list of verify requests
|
||||
"""
|
||||
|
||||
remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called}
|
||||
|
||||
async def do_iterations():
|
||||
try:
|
||||
with Measure(self.clock, "get_server_verify_keys"):
|
||||
for f in self._key_fetchers:
|
||||
if not remaining_requests:
|
||||
return
|
||||
await self._attempt_key_fetches_with_fetcher(
|
||||
f, remaining_requests
|
||||
)
|
||||
|
||||
# look for any requests which weren't satisfied
|
||||
while remaining_requests:
|
||||
verify_request = remaining_requests.pop()
|
||||
rq_str = (
|
||||
"VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)"
|
||||
% (
|
||||
verify_request.server_name,
|
||||
verify_request.key_ids,
|
||||
verify_request.minimum_valid_until_ts,
|
||||
)
|
||||
)
|
||||
|
||||
# If we run the errback immediately, it may cancel our
|
||||
# loggingcontext while we are still in it, so instead we
|
||||
# schedule it for the next time round the reactor.
|
||||
#
|
||||
# (this also ensures that we don't get a stack overflow if we
|
||||
# has a massive queue of lookups waiting for this server).
|
||||
self.clock.call_later(
|
||||
0,
|
||||
verify_request.key_ready.errback,
|
||||
SynapseError(
|
||||
401,
|
||||
"Failed to find any key to satisfy %s" % (rq_str,),
|
||||
Codes.UNAUTHORIZED,
|
||||
),
|
||||
)
|
||||
except Exception as err:
|
||||
# we don't really expect to get here, because any errors should already
|
||||
# have been caught and logged. But if we do, let's log the error and make
|
||||
# sure that all of the deferreds are resolved.
|
||||
logger.error("Unexpected error in _get_server_verify_keys: %s", err)
|
||||
with PreserveLoggingContext():
|
||||
for verify_request in remaining_requests:
|
||||
if not verify_request.key_ready.called:
|
||||
verify_request.key_ready.errback(err)
|
||||
|
||||
run_in_background(do_iterations)
|
||||
|
||||
async def _attempt_key_fetches_with_fetcher(
|
||||
self, fetcher: "KeyFetcher", remaining_requests: Set[VerifyJsonRequest]
|
||||
):
|
||||
"""Use a key fetcher to attempt to satisfy some key requests
|
||||
|
||||
Args:
|
||||
fetcher: fetcher to use to fetch the keys
|
||||
remaining_requests: outstanding key requests.
|
||||
Any successfully-completed requests will be removed from the list.
|
||||
"""
|
||||
# The keys to fetch.
|
||||
# server_name -> key_id -> min_valid_ts
|
||||
missing_keys = defaultdict(dict) # type: Dict[str, Dict[str, int]]
|
||||
|
||||
for verify_request in remaining_requests:
|
||||
# any completed requests should already have been removed
|
||||
assert not verify_request.key_ready.called
|
||||
keys_for_server = missing_keys[verify_request.server_name]
|
||||
|
||||
for key_id in verify_request.key_ids:
|
||||
# If we have several requests for the same key, then we only need to
|
||||
# request that key once, but we should do so with the greatest
|
||||
# min_valid_until_ts of the requests, so that we can satisfy all of
|
||||
# the requests.
|
||||
keys_for_server[key_id] = max(
|
||||
keys_for_server.get(key_id, -1),
|
||||
verify_request.minimum_valid_until_ts,
|
||||
)
|
||||
|
||||
results = await fetcher.get_keys(missing_keys)
|
||||
|
||||
completed = []
|
||||
for verify_request in remaining_requests:
|
||||
server_name = verify_request.server_name
|
||||
|
||||
# see if any of the keys we got this time are sufficient to
|
||||
# complete this VerifyJsonRequest.
|
||||
result_keys = results.get(server_name, {})
|
||||
for key_id in verify_request.key_ids:
|
||||
fetch_key_result = result_keys.get(key_id)
|
||||
if not fetch_key_result:
|
||||
# we didn't get a result for this key
|
||||
for key_id, key in keys.items():
|
||||
if not key:
|
||||
continue
|
||||
|
||||
if (
|
||||
fetch_key_result.valid_until_ts
|
||||
< verify_request.minimum_valid_until_ts
|
||||
):
|
||||
# key was not valid at this point
|
||||
continue
|
||||
# If we already have a result for the given key ID we keep the
|
||||
# one with the highest `valid_until_ts`.
|
||||
existing_key = found_keys.get(key_id)
|
||||
if existing_key:
|
||||
if key.valid_until_ts <= existing_key.valid_until_ts:
|
||||
continue
|
||||
|
||||
# we have a valid key for this request. If we run the callback
|
||||
# immediately, it may cancel our loggingcontext while we are still in
|
||||
# it, so instead we schedule it for the next time round the reactor.
|
||||
# We always store the returned key even if it doesn't the
|
||||
# `minimum_valid_until_ts` requirement, as some verification
|
||||
# requests may still be able to be satisfied by it.
|
||||
#
|
||||
# (this also ensures that we don't get a stack overflow if we had
|
||||
# a massive queue of lookups waiting for this server).
|
||||
logger.debug(
|
||||
"Found key %s:%s for %s",
|
||||
server_name,
|
||||
key_id,
|
||||
verify_request.request_name,
|
||||
)
|
||||
self.clock.call_later(
|
||||
0,
|
||||
verify_request.key_ready.callback,
|
||||
(server_name, key_id, fetch_key_result.verify_key),
|
||||
)
|
||||
completed.append(verify_request)
|
||||
break
|
||||
# We still keep looking for the key from other fetchers in that
|
||||
# case though.
|
||||
found_keys[key_id] = key
|
||||
|
||||
remaining_requests.difference_update(completed)
|
||||
if key.valid_until_ts < verify_request.minimum_valid_until_ts:
|
||||
continue
|
||||
|
||||
missing_key_ids.discard(key_id)
|
||||
|
||||
return found_keys
|
||||
|
||||
|
||||
class KeyFetcher(metaclass=abc.ABCMeta):
|
||||
@abc.abstractmethod
|
||||
async def get_keys(
|
||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""
|
||||
Args:
|
||||
keys_to_fetch:
|
||||
the keys to be fetched. server_name -> key_id -> min_valid_ts
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._queue = BatchingQueue(
|
||||
self.__class__.__name__, hs.get_clock(), self._fetch_keys
|
||||
)
|
||||
|
||||
Returns:
|
||||
Map from server_name -> key_id -> FetchKeyResult
|
||||
"""
|
||||
raise NotImplementedError
|
||||
async def get_keys(
|
||||
self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
|
||||
) -> Dict[str, FetchKeyResult]:
|
||||
results = await self._queue.add_to_queue(
|
||||
_FetchKeyRequest(
|
||||
server_name=server_name,
|
||||
key_ids=key_ids,
|
||||
minimum_valid_until_ts=minimum_valid_until_ts,
|
||||
)
|
||||
)
|
||||
return results.get(server_name, {})
|
||||
|
||||
@abc.abstractmethod
|
||||
async def _fetch_keys(
|
||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
pass
|
||||
|
||||
|
||||
class StoreKeyFetcher(KeyFetcher):
|
||||
"""KeyFetcher impl which fetches keys from our data store"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
async def get_keys(
|
||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""see KeyFetcher.get_keys"""
|
||||
|
||||
async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]):
|
||||
key_ids_to_fetch = (
|
||||
(server_name, key_id)
|
||||
for server_name, keys_for_server in keys_to_fetch.items()
|
||||
for key_id in keys_for_server.keys()
|
||||
(queue_value.server_name, key_id)
|
||||
for queue_value in keys_to_fetch
|
||||
for key_id in queue_value.key_ids
|
||||
)
|
||||
|
||||
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
|
||||
@@ -500,6 +463,8 @@ class StoreKeyFetcher(KeyFetcher):
|
||||
|
||||
class BaseV2KeyFetcher(KeyFetcher):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.config = hs.config
|
||||
|
||||
@@ -607,10 +572,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
self.client = hs.get_federation_http_client()
|
||||
self.key_servers = self.config.key_servers
|
||||
|
||||
async def get_keys(
|
||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
||||
async def _fetch_keys(
|
||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""see KeyFetcher.get_keys"""
|
||||
"""see KeyFetcher._fetch_keys"""
|
||||
|
||||
async def get_key(key_server: TrustedKeyServer) -> Dict:
|
||||
try:
|
||||
@@ -646,12 +611,12 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
return union_of_keys
|
||||
|
||||
async def get_server_verify_key_v2_indirect(
|
||||
self, keys_to_fetch: Dict[str, Dict[str, int]], key_server: TrustedKeyServer
|
||||
self, keys_to_fetch: List[_FetchKeyRequest], key_server: TrustedKeyServer
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""
|
||||
Args:
|
||||
keys_to_fetch:
|
||||
the keys to be fetched. server_name -> key_id -> min_valid_ts
|
||||
the keys to be fetched.
|
||||
|
||||
key_server: notary server to query for the keys
|
||||
|
||||
@@ -665,7 +630,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
perspective_name = key_server.server_name
|
||||
logger.info(
|
||||
"Requesting keys %s from notary server %s",
|
||||
keys_to_fetch.items(),
|
||||
keys_to_fetch,
|
||||
perspective_name,
|
||||
)
|
||||
|
||||
@@ -675,11 +640,13 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
||||
path="/_matrix/key/v2/query",
|
||||
data={
|
||||
"server_keys": {
|
||||
server_name: {
|
||||
key_id: {"minimum_valid_until_ts": min_valid_ts}
|
||||
for key_id, min_valid_ts in server_keys.items()
|
||||
queue_value.server_name: {
|
||||
key_id: {
|
||||
"minimum_valid_until_ts": queue_value.minimum_valid_until_ts,
|
||||
}
|
||||
for key_id in queue_value.key_ids
|
||||
}
|
||||
for server_name, server_keys in keys_to_fetch.items()
|
||||
for queue_value in keys_to_fetch
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -780,7 +747,20 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
self.client = hs.get_federation_http_client()
|
||||
|
||||
async def get_keys(
|
||||
self, keys_to_fetch: Dict[str, Dict[str, int]]
|
||||
self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int
|
||||
) -> Dict[str, FetchKeyResult]:
|
||||
results = await self._queue.add_to_queue(
|
||||
_FetchKeyRequest(
|
||||
server_name=server_name,
|
||||
key_ids=key_ids,
|
||||
minimum_valid_until_ts=minimum_valid_until_ts,
|
||||
),
|
||||
key=server_name,
|
||||
)
|
||||
return results.get(server_name, {})
|
||||
|
||||
async def _fetch_keys(
|
||||
self, keys_to_fetch: List[_FetchKeyRequest]
|
||||
) -> Dict[str, Dict[str, FetchKeyResult]]:
|
||||
"""
|
||||
Args:
|
||||
@@ -793,8 +773,10 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
|
||||
results = {}
|
||||
|
||||
async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None:
|
||||
server_name, key_ids = key_to_fetch_item
|
||||
async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None:
|
||||
server_name = key_to_fetch_item.server_name
|
||||
key_ids = key_to_fetch_item.key_ids
|
||||
|
||||
try:
|
||||
keys = await self.get_server_verify_key_v2_direct(server_name, key_ids)
|
||||
results[server_name] = keys
|
||||
@@ -805,7 +787,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
except Exception:
|
||||
logger.exception("Error getting keys %s from %s", key_ids, server_name)
|
||||
|
||||
await yieldable_gather_results(get_key, keys_to_fetch.items())
|
||||
await yieldable_gather_results(get_key, keys_to_fetch)
|
||||
return results
|
||||
|
||||
async def get_server_verify_key_v2_direct(
|
||||
@@ -877,37 +859,3 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
|
||||
keys.update(response_keys)
|
||||
|
||||
return keys
|
||||
|
||||
|
||||
async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None:
|
||||
"""Waits for the key to become available, and then performs a verification
|
||||
|
||||
Args:
|
||||
verify_request:
|
||||
|
||||
Raises:
|
||||
SynapseError if there was a problem performing the verification
|
||||
"""
|
||||
server_name = verify_request.server_name
|
||||
with PreserveLoggingContext():
|
||||
_, key_id, verify_key = await verify_request.key_ready
|
||||
|
||||
json_object = verify_request.json_object
|
||||
|
||||
try:
|
||||
verify_signed_json(json_object, server_name, verify_key)
|
||||
except SignatureVerifyException as e:
|
||||
logger.debug(
|
||||
"Error verifying signature for %s:%s:%s with key %s: %s",
|
||||
server_name,
|
||||
verify_key.alg,
|
||||
verify_key.version,
|
||||
encode_verify_key_base64(verify_key),
|
||||
str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
"Invalid signature for server %s with key %s:%s: %s"
|
||||
% (server_name, verify_key.alg, verify_key.version, str(e)),
|
||||
Codes.UNAUTHORIZED,
|
||||
)
|
||||
|
||||
@@ -14,11 +14,6 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
from typing import Iterable, List
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import Deferred, DeferredList
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
@@ -28,11 +23,6 @@ from synapse.crypto.keyring import Keyring
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.utils import prune_event, validate_canonicaljson
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
PreserveLoggingContext,
|
||||
current_context,
|
||||
make_deferred_yieldable,
|
||||
)
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -48,116 +38,82 @@ class FederationBase:
|
||||
self.store = hs.get_datastore()
|
||||
self._clock = hs.get_clock()
|
||||
|
||||
def _check_sigs_and_hash(
|
||||
async def _check_sigs_and_hash(
|
||||
self, room_version: RoomVersion, pdu: EventBase
|
||||
) -> Deferred:
|
||||
return make_deferred_yieldable(
|
||||
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
||||
)
|
||||
|
||||
def _check_sigs_and_hashes(
|
||||
self, room_version: RoomVersion, pdus: List[EventBase]
|
||||
) -> List[Deferred]:
|
||||
"""Checks that each of the received events is correctly signed by the
|
||||
sending server.
|
||||
) -> EventBase:
|
||||
"""Checks that event is correctly signed by the sending server.
|
||||
|
||||
Args:
|
||||
room_version: The room version of the PDUs
|
||||
pdus: the events to be checked
|
||||
room_version: The room version of the PDU
|
||||
pdu: the event to be checked
|
||||
|
||||
Returns:
|
||||
For each input event, a deferred which:
|
||||
* returns the original event if the checks pass
|
||||
* returns a redacted version of the event (if the signature
|
||||
* the original event if the checks pass
|
||||
* a redacted version of the event (if the signature
|
||||
matched but the hash did not)
|
||||
* throws a SynapseError if the signature check failed.
|
||||
The deferreds run their callbacks in the sentinel
|
||||
"""
|
||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
||||
|
||||
ctx = current_context()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def callback(_, pdu: EventBase):
|
||||
with PreserveLoggingContext(ctx):
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||
# incidents.
|
||||
#
|
||||
# This is just a heuristic, so we just assume that if the keys are
|
||||
# about the same between the redacted and received events, then the
|
||||
# received event was probably a redacted copy (but we then use our
|
||||
# *actual* redacted copy to be on the safe side.)
|
||||
redacted_event = prune_event(pdu)
|
||||
if set(redacted_event.keys()) == set(pdu.keys()) and set(
|
||||
redacted_event.content.keys()
|
||||
) == set(pdu.content.keys()):
|
||||
logger.info(
|
||||
"Event %s seems to have been redacted; using our redacted "
|
||||
"copy",
|
||||
pdu.event_id,
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Event %s content has been tampered, redacting",
|
||||
pdu.event_id,
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
result = yield defer.ensureDeferred(
|
||||
self.spam_checker.check_event_for_spam(pdu)
|
||||
)
|
||||
|
||||
if result:
|
||||
logger.warning(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(),
|
||||
)
|
||||
return prune_event(pdu)
|
||||
|
||||
return pdu
|
||||
|
||||
def errback(failure: Failure, pdu: EventBase):
|
||||
failure.trap(SynapseError)
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.warning(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
failure.getErrorMessage(),
|
||||
)
|
||||
return failure
|
||||
|
||||
for deferred, pdu in zip(deferreds, pdus):
|
||||
deferred.addCallbacks(
|
||||
callback, errback, callbackArgs=[pdu], errbackArgs=[pdu]
|
||||
* throws a SynapseError if the signature check failed."""
|
||||
try:
|
||||
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
|
||||
except SynapseError as e:
|
||||
logger.warning(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
return deferreds
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||
# incidents.
|
||||
#
|
||||
# This is just a heuristic, so we just assume that if the keys are
|
||||
# about the same between the redacted and received events, then the
|
||||
# received event was probably a redacted copy (but we then use our
|
||||
# *actual* redacted copy to be on the safe side.)
|
||||
redacted_event = prune_event(pdu)
|
||||
if set(redacted_event.keys()) == set(pdu.keys()) and set(
|
||||
redacted_event.content.keys()
|
||||
) == set(pdu.content.keys()):
|
||||
logger.info(
|
||||
"Event %s seems to have been redacted; using our redacted copy",
|
||||
pdu.event_id,
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Event %s content has been tampered, redacting",
|
||||
pdu.event_id,
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
result = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
||||
if result:
|
||||
logger.warning(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(),
|
||||
)
|
||||
return prune_event(pdu)
|
||||
|
||||
return pdu
|
||||
|
||||
|
||||
class PduToCheckSig(
|
||||
namedtuple(
|
||||
"PduToCheckSig", ["pdu", "redacted_pdu_json", "sender_domain", "deferreds"]
|
||||
)
|
||||
):
|
||||
class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])):
|
||||
pass
|
||||
|
||||
|
||||
def _check_sigs_on_pdus(
|
||||
keyring: Keyring, room_version: RoomVersion, pdus: Iterable[EventBase]
|
||||
) -> List[Deferred]:
|
||||
async def _check_sigs_on_pdu(
|
||||
keyring: Keyring, room_version: RoomVersion, pdu: EventBase
|
||||
) -> None:
|
||||
"""Check that the given events are correctly signed
|
||||
|
||||
Raise a SynapseError if the event wasn't correctly signed.
|
||||
|
||||
Args:
|
||||
keyring: keyring object to do the checks
|
||||
room_version: the room version of the PDUs
|
||||
pdus: the events to be checked
|
||||
|
||||
Returns:
|
||||
A Deferred for each event in pdus, which will either succeed if
|
||||
the signatures are valid, or fail (with a SynapseError) if not.
|
||||
"""
|
||||
|
||||
# we want to check that the event is signed by:
|
||||
@@ -181,93 +137,47 @@ def _check_sigs_on_pdus(
|
||||
# let's start by getting the domain for each pdu, and flattening the event back
|
||||
# to JSON.
|
||||
|
||||
pdus_to_check = [
|
||||
PduToCheckSig(
|
||||
pdu=p,
|
||||
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||
sender_domain=get_domain_from_id(p.sender),
|
||||
deferreds=[],
|
||||
)
|
||||
for p in pdus
|
||||
]
|
||||
|
||||
# First we check that the sender event is signed by the sender's domain
|
||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||
pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
[
|
||||
(
|
||||
p.sender_domain,
|
||||
p.redacted_pdu_json,
|
||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
if not _is_invite_via_3pid(pdu):
|
||||
try:
|
||||
await keyring.verify_event_for_server(
|
||||
get_domain_from_id(pdu.sender),
|
||||
pdu,
|
||||
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
)
|
||||
for p in pdus_to_check_sender
|
||||
]
|
||||
)
|
||||
|
||||
def sender_err(e, pdu_to_check):
|
||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||
pdu_to_check.pdu.event_id,
|
||||
pdu_to_check.sender_domain,
|
||||
e.getErrorMessage(),
|
||||
)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||
d.addErrback(sender_err, p)
|
||||
p.deferreds.append(d)
|
||||
except Exception as e:
|
||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||
pdu.event_id,
|
||||
get_domain_from_id(pdu.sender),
|
||||
e,
|
||||
)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
# (ie, the room version uses old-style non-hash event IDs).
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
pdus_to_check_event_id = [
|
||||
p
|
||||
for p in pdus_to_check
|
||||
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
|
||||
]
|
||||
|
||||
more_deferreds = keyring.verify_json_objects_for_server(
|
||||
[
|
||||
(
|
||||
get_domain_from_id(p.pdu.event_id),
|
||||
p.redacted_pdu_json,
|
||||
p.pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
p.pdu.event_id,
|
||||
)
|
||||
for p in pdus_to_check_event_id
|
||||
]
|
||||
)
|
||||
|
||||
def event_err(e, pdu_to_check):
|
||||
if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
|
||||
pdu.event_id
|
||||
) != get_domain_from_id(pdu.sender):
|
||||
try:
|
||||
await keyring.verify_event_for_server(
|
||||
get_domain_from_id(pdu.event_id),
|
||||
pdu,
|
||||
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
)
|
||||
except Exception as e:
|
||||
errmsg = (
|
||||
"event id %s: unable to verify signature for event id domain: %s"
|
||||
% (pdu_to_check.pdu.event_id, e.getErrorMessage())
|
||||
"event id %s: unable to verify signature for event id domain %s: %s"
|
||||
% (
|
||||
pdu.event_id,
|
||||
get_domain_from_id(pdu.event_id),
|
||||
e,
|
||||
)
|
||||
)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
for p, d in zip(pdus_to_check_event_id, more_deferreds):
|
||||
d.addErrback(event_err, p)
|
||||
p.deferreds.append(d)
|
||||
|
||||
# replace lists of deferreds with single Deferreds
|
||||
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
|
||||
|
||||
|
||||
def _flatten_deferred_list(deferreds: List[Deferred]) -> Deferred:
|
||||
"""Given a list of deferreds, either return the single deferred,
|
||||
combine into a DeferredList, or return an already resolved deferred.
|
||||
"""
|
||||
if len(deferreds) > 1:
|
||||
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
|
||||
elif len(deferreds) == 1:
|
||||
return deferreds[0]
|
||||
else:
|
||||
return defer.succeed(None)
|
||||
|
||||
|
||||
def _is_invite_via_3pid(event: EventBase) -> bool:
|
||||
return (
|
||||
|
||||
@@ -21,6 +21,7 @@ from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -35,9 +36,6 @@ from typing import (
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.defer import Deferred
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
@@ -55,10 +53,10 @@ from synapse.api.room_versions import (
|
||||
)
|
||||
from synapse.events import EventBase, builder
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.logging.context import make_deferred_yieldable, preserve_fn
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
@@ -359,10 +357,9 @@ class FederationClient(FederationBase):
|
||||
async def _check_sigs_and_hash_and_fetch(
|
||||
self,
|
||||
origin: str,
|
||||
pdus: List[EventBase],
|
||||
pdus: Collection[EventBase],
|
||||
room_version: RoomVersion,
|
||||
outlier: bool = False,
|
||||
include_none: bool = False,
|
||||
) -> List[EventBase]:
|
||||
"""Takes a list of PDUs and checks the signatures and hashes of each
|
||||
one. If a PDU fails its signature check then we check if we have it in
|
||||
@@ -379,57 +376,87 @@ class FederationClient(FederationBase):
|
||||
pdu
|
||||
room_version
|
||||
outlier: Whether the events are outliers or not
|
||||
include_none: Whether to include None in the returned list
|
||||
for events that have failed their checks
|
||||
|
||||
Returns:
|
||||
A list of PDUs that have valid signatures and hashes.
|
||||
"""
|
||||
deferreds = self._check_sigs_and_hashes(room_version, pdus)
|
||||
|
||||
async def handle_check_result(pdu: EventBase, deferred: Deferred):
|
||||
# We limit how many PDUs we check at once, as if we try to do hundreds
|
||||
# of thousands of PDUs at once we see large memory spikes.
|
||||
|
||||
valid_pdus = []
|
||||
|
||||
async def _execute(pdu: EventBase) -> None:
|
||||
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||
pdu=pdu,
|
||||
origin=origin,
|
||||
outlier=outlier,
|
||||
room_version=room_version,
|
||||
)
|
||||
|
||||
if valid_pdu:
|
||||
valid_pdus.append(valid_pdu)
|
||||
|
||||
await concurrently_execute(_execute, pdus, 10000)
|
||||
|
||||
return valid_pdus
|
||||
|
||||
async def _check_sigs_and_hash_and_fetch_one(
|
||||
self,
|
||||
pdu: EventBase,
|
||||
origin: str,
|
||||
room_version: RoomVersion,
|
||||
outlier: bool = False,
|
||||
) -> Optional[EventBase]:
|
||||
"""Takes a PDU and checks its signatures and hashes. If the PDU fails
|
||||
its signature check then we check if we have it in the database and if
|
||||
not then request if from the originating server of that PDU.
|
||||
|
||||
If then PDU fails its content hash check then it is redacted.
|
||||
|
||||
Args:
|
||||
origin
|
||||
pdu
|
||||
room_version
|
||||
outlier: Whether the events are outliers or not
|
||||
include_none: Whether to include None in the returned list
|
||||
for events that have failed their checks
|
||||
|
||||
Returns:
|
||||
The PDU (possibly redacted) if it has valid signatures and hashes.
|
||||
"""
|
||||
|
||||
res = None
|
||||
try:
|
||||
res = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except SynapseError:
|
||||
pass
|
||||
|
||||
if not res:
|
||||
# Check local db.
|
||||
res = await self.store.get_event(
|
||||
pdu.event_id, allow_rejected=True, allow_none=True
|
||||
)
|
||||
|
||||
pdu_origin = get_domain_from_id(pdu.sender)
|
||||
if not res and pdu_origin != origin:
|
||||
try:
|
||||
res = await make_deferred_yieldable(deferred)
|
||||
res = await self.get_pdu(
|
||||
destinations=[pdu_origin],
|
||||
event_id=pdu.event_id,
|
||||
room_version=room_version,
|
||||
outlier=outlier,
|
||||
timeout=10000,
|
||||
)
|
||||
except SynapseError:
|
||||
res = None
|
||||
pass
|
||||
|
||||
if not res:
|
||||
# Check local db.
|
||||
res = await self.store.get_event(
|
||||
pdu.event_id, allow_rejected=True, allow_none=True
|
||||
)
|
||||
if not res:
|
||||
logger.warning(
|
||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||
)
|
||||
|
||||
pdu_origin = get_domain_from_id(pdu.sender)
|
||||
if not res and pdu_origin != origin:
|
||||
try:
|
||||
res = await self.get_pdu(
|
||||
destinations=[pdu_origin],
|
||||
event_id=pdu.event_id,
|
||||
room_version=room_version,
|
||||
outlier=outlier,
|
||||
timeout=10000,
|
||||
)
|
||||
except SynapseError:
|
||||
pass
|
||||
|
||||
if not res:
|
||||
logger.warning(
|
||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||
)
|
||||
|
||||
return res
|
||||
|
||||
handle = preserve_fn(handle_check_result)
|
||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
||||
|
||||
valid_pdus = await make_deferred_yieldable(
|
||||
defer.gatherResults(deferreds2, consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
if include_none:
|
||||
return valid_pdus
|
||||
else:
|
||||
return [p for p in valid_pdus if p]
|
||||
return res
|
||||
|
||||
async def get_event_auth(
|
||||
self, destination: str, room_id: str, event_id: str
|
||||
@@ -665,21 +692,10 @@ class FederationClient(FederationBase):
|
||||
"""
|
||||
|
||||
async def send_request(destination) -> Dict[str, Any]:
|
||||
content = await self._do_send_join(destination, pdu)
|
||||
response = await self._do_send_join(room_version, destination, pdu)
|
||||
|
||||
logger.debug("Got content: %s", content)
|
||||
|
||||
state = [
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("state", [])
|
||||
]
|
||||
|
||||
auth_chain = [
|
||||
event_from_pdu_json(p, room_version, outlier=True)
|
||||
for p in content.get("auth_chain", [])
|
||||
]
|
||||
|
||||
pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)}
|
||||
state = response.state
|
||||
auth_chain = response.auth_events
|
||||
|
||||
create_event = None
|
||||
for e in state:
|
||||
@@ -704,14 +720,29 @@ class FederationClient(FederationBase):
|
||||
% (create_room_version,)
|
||||
)
|
||||
|
||||
valid_pdus = await self._check_sigs_and_hash_and_fetch(
|
||||
destination,
|
||||
list(pdus.values()),
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
logger.info(
|
||||
"Processing from send_join %d events", len(state) + len(auth_chain)
|
||||
)
|
||||
|
||||
valid_pdus_map = {p.event_id: p for p in valid_pdus}
|
||||
# We now go and check the signatures and hashes for the event. Note
|
||||
# that we limit how many events we process at a time to keep the
|
||||
# memory overhead from exploding.
|
||||
valid_pdus_map: Dict[str, EventBase] = {}
|
||||
|
||||
async def _execute(pdu: EventBase) -> None:
|
||||
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
|
||||
pdu=pdu,
|
||||
origin=destination,
|
||||
outlier=True,
|
||||
room_version=room_version,
|
||||
)
|
||||
|
||||
if valid_pdu:
|
||||
valid_pdus_map[valid_pdu.event_id] = valid_pdu
|
||||
|
||||
await concurrently_execute(
|
||||
_execute, itertools.chain(state, auth_chain), 10000
|
||||
)
|
||||
|
||||
# NB: We *need* to copy to ensure that we don't have multiple
|
||||
# references being passed on, as that causes... issues.
|
||||
@@ -752,11 +783,14 @@ class FederationClient(FederationBase):
|
||||
|
||||
return await self._try_destination_list("send_join", destinations, send_request)
|
||||
|
||||
async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict:
|
||||
async def _do_send_join(
|
||||
self, room_version: RoomVersion, destination: str, pdu: EventBase
|
||||
) -> SendJoinResponse:
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
try:
|
||||
return await self.transport_layer.send_join_v2(
|
||||
room_version=room_version,
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
@@ -771,17 +805,14 @@ class FederationClient(FederationBase):
|
||||
|
||||
logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API")
|
||||
|
||||
resp = await self.transport_layer.send_join_v1(
|
||||
return await self.transport_layer.send_join_v1(
|
||||
room_version=room_version,
|
||||
destination=destination,
|
||||
room_id=pdu.room_id,
|
||||
event_id=pdu.event_id,
|
||||
content=pdu.get_pdu_json(time_now),
|
||||
)
|
||||
|
||||
# We expect the v1 API to respond with [200, content], so we only return the
|
||||
# content.
|
||||
return resp[1]
|
||||
|
||||
async def send_invite(
|
||||
self,
|
||||
destination: str,
|
||||
|
||||
@@ -17,18 +17,29 @@ import logging
|
||||
import urllib
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import attr
|
||||
import ijson
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.errors import Codes, HttpResponseException, SynapseError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.api.urls import (
|
||||
FEDERATION_UNSTABLE_PREFIX,
|
||||
FEDERATION_V1_PREFIX,
|
||||
FEDERATION_V2_PREFIX,
|
||||
)
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.http.matrixfederationclient import ByteParser
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Send join responses can be huge, so we set a separate limit here. The response
|
||||
# is parsed in a streaming manner, which helps alleviate the issue of memory
|
||||
# usage a bit.
|
||||
MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024
|
||||
|
||||
|
||||
class TransportLayerClient:
|
||||
"""Sends federation HTTP requests to other servers"""
|
||||
@@ -240,21 +251,38 @@ class TransportLayerClient:
|
||||
return content
|
||||
|
||||
@log_function
|
||||
async def send_join_v1(self, destination, room_id, event_id, content):
|
||||
async def send_join_v1(
|
||||
self,
|
||||
room_version,
|
||||
destination,
|
||||
room_id,
|
||||
event_id,
|
||||
content,
|
||||
) -> "SendJoinResponse":
|
||||
path = _create_v1_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=True),
|
||||
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@log_function
|
||||
async def send_join_v2(self, destination, room_id, event_id, content):
|
||||
async def send_join_v2(
|
||||
self, room_version, destination, room_id, event_id, content
|
||||
) -> "SendJoinResponse":
|
||||
path = _create_v2_path("/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = await self.client.put_json(
|
||||
destination=destination, path=path, data=content
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=False),
|
||||
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
|
||||
)
|
||||
|
||||
return response
|
||||
@@ -1053,3 +1081,59 @@ def _create_v2_path(path, *args):
|
||||
str
|
||||
"""
|
||||
return _create_path(FEDERATION_V2_PREFIX, path, *args)
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class SendJoinResponse:
|
||||
"""The parsed response of a `/send_join` request."""
|
||||
|
||||
auth_events: List[EventBase]
|
||||
state: List[EventBase]
|
||||
|
||||
|
||||
@ijson.coroutine
|
||||
def _event_list_parser(room_version: RoomVersion, events: List[EventBase]):
|
||||
"""Helper function for use with `ijson.items_coro` to parse an array of
|
||||
events and add them to the given list.
|
||||
"""
|
||||
|
||||
while True:
|
||||
obj = yield
|
||||
event = make_event_from_dict(obj, room_version)
|
||||
events.append(event)
|
||||
|
||||
|
||||
class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
"""A parser for the response to `/send_join` requests.
|
||||
|
||||
Args:
|
||||
room_version: The version of the room.
|
||||
v1_api: Whether the response is in the v1 format.
|
||||
"""
|
||||
|
||||
CONTENT_TYPE = "application/json"
|
||||
|
||||
def __init__(self, room_version: RoomVersion, v1_api: bool):
|
||||
self._response = SendJoinResponse([], [])
|
||||
|
||||
# The V1 API has the shape of `[200, {...}]`, which we handle by
|
||||
# prefixing with `item.*`.
|
||||
prefix = "item." if v1_api else ""
|
||||
|
||||
self._coro_state = ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.state),
|
||||
prefix + "state.item",
|
||||
)
|
||||
self._coro_auth = ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.auth_events),
|
||||
prefix + "auth_chain.item",
|
||||
)
|
||||
|
||||
def write(self, data: bytes) -> int:
|
||||
self._coro_state.send(data)
|
||||
self._coro_auth.send(data)
|
||||
|
||||
return len(data)
|
||||
|
||||
def finish(self) -> SendJoinResponse:
|
||||
return self._response
|
||||
|
||||
@@ -37,6 +37,7 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.logging.opentracing import (
|
||||
SynapseTags,
|
||||
start_active_span,
|
||||
start_active_span_from_request,
|
||||
tags,
|
||||
@@ -151,7 +152,9 @@ class Authenticator:
|
||||
)
|
||||
|
||||
await self.keyring.verify_json_for_server(
|
||||
origin, json_request, now, "Incoming request"
|
||||
origin,
|
||||
json_request,
|
||||
now,
|
||||
)
|
||||
|
||||
logger.debug("Request from %s", origin)
|
||||
@@ -160,7 +163,7 @@ class Authenticator:
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
# alive
|
||||
retry_timings = await self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings["retry_last_ts"]:
|
||||
if retry_timings and retry_timings.retry_last_ts:
|
||||
run_in_background(self._reset_retry_timings, origin)
|
||||
|
||||
return origin
|
||||
@@ -314,7 +317,7 @@ class BaseFederationServlet:
|
||||
raise
|
||||
|
||||
request_tags = {
|
||||
"request_id": request.get_request_id(),
|
||||
SynapseTags.REQUEST_ID: request.get_request_id(),
|
||||
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
|
||||
tags.HTTP_METHOD: request.get_method(),
|
||||
tags.HTTP_URL: request.get_redacted_uri(),
|
||||
@@ -1398,7 +1401,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
# TODO When switching to the stable endpoint, remove the POST handler.
|
||||
@@ -1428,7 +1431,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
return 200, await self.handler.federation_space_summary(
|
||||
room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms
|
||||
)
|
||||
|
||||
|
||||
@@ -1562,13 +1565,12 @@ def register_servlets(
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if hs.config.experimental.spaces_enabled:
|
||||
FederationSpaceSummaryServlet(
|
||||
handler=hs.get_space_summary_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
FederationSpaceSummaryServlet(
|
||||
handler=hs.get_space_summary_handler(),
|
||||
authenticator=authenticator,
|
||||
ratelimiter=ratelimiter,
|
||||
server_name=hs.hostname,
|
||||
).register(resource)
|
||||
|
||||
if "openid" in servlet_groups:
|
||||
for servletclass in OPENID_SERVLET_CLASSES:
|
||||
|
||||
@@ -108,7 +108,9 @@ class GroupAttestationSigning:
|
||||
|
||||
assert server_name is not None
|
||||
await self.keyring.verify_json_for_server(
|
||||
server_name, attestation, now, "Group attestation"
|
||||
server_name,
|
||||
attestation,
|
||||
now,
|
||||
)
|
||||
|
||||
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
|
||||
|
||||
@@ -15,12 +15,9 @@
|
||||
import email.mime.multipart
|
||||
import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import StoreError, SynapseError
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.types import UserID
|
||||
from synapse.util import stringutils
|
||||
@@ -36,9 +33,11 @@ class AccountValidityHandler:
|
||||
self.hs = hs
|
||||
self.config = hs.config
|
||||
self.store = self.hs.get_datastore()
|
||||
self.sendmail = self.hs.get_sendmail()
|
||||
self.send_email_handler = self.hs.get_send_email_handler()
|
||||
self.clock = self.hs.get_clock()
|
||||
|
||||
self._app_name = self.hs.config.email_app_name
|
||||
|
||||
self._account_validity_enabled = (
|
||||
hs.config.account_validity.account_validity_enabled
|
||||
)
|
||||
@@ -63,23 +62,10 @@ class AccountValidityHandler:
|
||||
self._template_text = (
|
||||
hs.config.account_validity.account_validity_template_text
|
||||
)
|
||||
account_validity_renew_email_subject = (
|
||||
self._renew_email_subject = (
|
||||
hs.config.account_validity.account_validity_renew_email_subject
|
||||
)
|
||||
|
||||
try:
|
||||
app_name = hs.config.email_app_name
|
||||
|
||||
self._subject = account_validity_renew_email_subject % {"app": app_name}
|
||||
|
||||
self._from_string = hs.config.email_notif_from % {"app": app_name}
|
||||
except Exception:
|
||||
# If substitution failed, fall back to the bare strings.
|
||||
self._subject = account_validity_renew_email_subject
|
||||
self._from_string = hs.config.email_notif_from
|
||||
|
||||
self._raw_from = email.utils.parseaddr(self._from_string)[1]
|
||||
|
||||
# Check the renewal emails to send and send them every 30min.
|
||||
if hs.config.run_background_tasks:
|
||||
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
|
||||
@@ -159,38 +145,17 @@ class AccountValidityHandler:
|
||||
}
|
||||
|
||||
html_text = self._template_html.render(**template_vars)
|
||||
html_part = MIMEText(html_text, "html", "utf8")
|
||||
|
||||
plain_text = self._template_text.render(**template_vars)
|
||||
text_part = MIMEText(plain_text, "plain", "utf8")
|
||||
|
||||
for address in addresses:
|
||||
raw_to = email.utils.parseaddr(address)[1]
|
||||
|
||||
multipart_msg = MIMEMultipart("alternative")
|
||||
multipart_msg["Subject"] = self._subject
|
||||
multipart_msg["From"] = self._from_string
|
||||
multipart_msg["To"] = address
|
||||
multipart_msg["Date"] = email.utils.formatdate()
|
||||
multipart_msg["Message-ID"] = email.utils.make_msgid()
|
||||
multipart_msg.attach(text_part)
|
||||
multipart_msg.attach(html_part)
|
||||
|
||||
logger.info("Sending renewal email to %s", address)
|
||||
|
||||
await make_deferred_yieldable(
|
||||
self.sendmail(
|
||||
self.hs.config.email_smtp_host,
|
||||
self._raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
reactor=self.hs.get_reactor(),
|
||||
port=self.hs.config.email_smtp_port,
|
||||
requireAuthentication=self.hs.config.email_smtp_user is not None,
|
||||
username=self.hs.config.email_smtp_user,
|
||||
password=self.hs.config.email_smtp_pass,
|
||||
requireTransportSecurity=self.hs.config.require_transport_security,
|
||||
)
|
||||
await self.send_email_handler.send_email(
|
||||
email_address=raw_to,
|
||||
subject=self._renew_email_subject,
|
||||
app_name=self._app_name,
|
||||
html=html_text,
|
||||
text=plain_text,
|
||||
)
|
||||
|
||||
await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True)
|
||||
|
||||
@@ -87,7 +87,8 @@ class ApplicationServicesHandler:
|
||||
self.is_processing = True
|
||||
try:
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound = -1
|
||||
while upper_bound < self.current_max:
|
||||
(
|
||||
upper_bound,
|
||||
events,
|
||||
@@ -95,9 +96,6 @@ class ApplicationServicesHandler:
|
||||
self.current_max, limit
|
||||
)
|
||||
|
||||
if not events:
|
||||
break
|
||||
|
||||
events_by_room = {} # type: Dict[str, List[EventBase]]
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
@@ -153,9 +151,6 @@ class ApplicationServicesHandler:
|
||||
|
||||
await self.store.set_appservice_last_pos(upper_bound)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(events[-1].event_id)
|
||||
|
||||
synapse.metrics.event_processing_positions.labels(
|
||||
"appservice_sender"
|
||||
).set(upper_bound)
|
||||
@@ -168,12 +163,16 @@ class ApplicationServicesHandler:
|
||||
|
||||
event_processing_loop_counter.labels("appservice_sender").inc()
|
||||
|
||||
synapse.metrics.event_processing_lag.labels(
|
||||
"appservice_sender"
|
||||
).set(now - ts)
|
||||
synapse.metrics.event_processing_last_ts.labels(
|
||||
"appservice_sender"
|
||||
).set(ts)
|
||||
if events:
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(events[-1].event_id)
|
||||
|
||||
synapse.metrics.event_processing_lag.labels(
|
||||
"appservice_sender"
|
||||
).set(now - ts)
|
||||
synapse.metrics.event_processing_last_ts.labels(
|
||||
"appservice_sender"
|
||||
).set(ts)
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
|
||||
@@ -11,10 +11,12 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Collection, Optional
|
||||
|
||||
from synapse.api.constants import EventTypes, JoinRules
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -29,46 +31,104 @@ class EventAuthHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
async def can_join_without_invite(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str
|
||||
) -> bool:
|
||||
async def check_restricted_join_rules(
|
||||
self,
|
||||
state_ids: StateMap[str],
|
||||
room_version: RoomVersion,
|
||||
user_id: str,
|
||||
prev_member_event: Optional[EventBase],
|
||||
) -> None:
|
||||
"""
|
||||
Check whether a user can join a room without an invite.
|
||||
Check whether a user can join a room without an invite due to restricted join rules.
|
||||
|
||||
When joining a room with restricted joined rules (as defined in MSC3083),
|
||||
the membership of spaces must be checked during join.
|
||||
the membership of spaces must be checked during a room join.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room being joined.
|
||||
user_id: The user joining the room.
|
||||
prev_member_event: The current membership event for this user.
|
||||
|
||||
Raises:
|
||||
AuthError if the user cannot join the room.
|
||||
"""
|
||||
# If the member is invited or currently joined, then nothing to do.
|
||||
if prev_member_event and (
|
||||
prev_member_event.membership in (Membership.JOIN, Membership.INVITE)
|
||||
):
|
||||
return
|
||||
|
||||
# This is not a room with a restricted join rule, so we don't need to do the
|
||||
# restricted room specific checks.
|
||||
#
|
||||
# Note: We'll be applying the standard join rule checks later, which will
|
||||
# catch the cases of e.g. trying to join private rooms without an invite.
|
||||
if not await self.has_restricted_join_rules(state_ids, room_version):
|
||||
return
|
||||
|
||||
# Get the spaces which allow access to this room and check if the user is
|
||||
# in any of them.
|
||||
allowed_spaces = await self.get_spaces_that_allow_join(state_ids)
|
||||
if not await self.is_user_in_rooms(allowed_spaces, user_id):
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required spaces to join this room.",
|
||||
)
|
||||
|
||||
async def has_restricted_join_rules(
|
||||
self, state_ids: StateMap[str], room_version: RoomVersion
|
||||
) -> bool:
|
||||
"""
|
||||
Return if the room has the proper join rules set for access via spaces.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
room_version: The room version of the room to query.
|
||||
|
||||
Returns:
|
||||
True if the user can join the room, false otherwise.
|
||||
True if the proper room version and join rules are set for restricted access.
|
||||
"""
|
||||
# This only applies to room versions which support the new join rule.
|
||||
if not room_version.msc3083_join_rules:
|
||||
return True
|
||||
return False
|
||||
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return True
|
||||
return False
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED
|
||||
|
||||
async def get_spaces_that_allow_join(
|
||||
self, state_ids: StateMap[str]
|
||||
) -> Collection[str]:
|
||||
"""
|
||||
Generate a list of spaces which allow access to a room.
|
||||
|
||||
Args:
|
||||
state_ids: The state of the room as it currently is.
|
||||
|
||||
Returns:
|
||||
A collection of spaces which provide membership to the room.
|
||||
"""
|
||||
# If there's no join rule, then it defaults to invite (so this doesn't apply).
|
||||
join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None)
|
||||
if not join_rules_event_id:
|
||||
return ()
|
||||
|
||||
# If the join rule is not restricted, this doesn't apply.
|
||||
join_rules_event = await self._store.get_event(join_rules_event_id)
|
||||
if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED:
|
||||
return True
|
||||
|
||||
# If allowed is of the wrong form, then only allow invited users.
|
||||
allowed_spaces = join_rules_event.content.get("allow", [])
|
||||
if not isinstance(allowed_spaces, list):
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self._store.get_rooms_for_user(user_id)
|
||||
return ()
|
||||
|
||||
# Pull out the other room IDs, invalid data gets filtered.
|
||||
result = []
|
||||
for space in allowed_spaces:
|
||||
if not isinstance(space, dict):
|
||||
continue
|
||||
@@ -77,10 +137,31 @@ class EventAuthHandler:
|
||||
if not isinstance(space_id, str):
|
||||
continue
|
||||
|
||||
# The user was joined to one of the spaces specified, they can join
|
||||
# this room!
|
||||
if space_id in joined_rooms:
|
||||
result.append(space_id)
|
||||
|
||||
return result
|
||||
|
||||
async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool:
|
||||
"""
|
||||
Check whether a user is a member of any of the provided rooms.
|
||||
|
||||
Args:
|
||||
room_ids: The rooms to check for membership.
|
||||
user_id: The user to check.
|
||||
|
||||
Returns:
|
||||
True if the user is in any of the rooms, false otherwise.
|
||||
"""
|
||||
if not room_ids:
|
||||
return False
|
||||
|
||||
# Get the list of joined rooms and see if there's an overlap.
|
||||
joined_rooms = await self._store.get_rooms_for_user(user_id)
|
||||
|
||||
# Check each room and see if the user is in it.
|
||||
for room_id in room_ids:
|
||||
if room_id in joined_rooms:
|
||||
return True
|
||||
|
||||
# The user was not in any of the required spaces.
|
||||
# The user was not in any of the rooms.
|
||||
return False
|
||||
|
||||
@@ -22,6 +22,7 @@ from collections.abc import Container
|
||||
from http import HTTPStatus
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -91,6 +92,7 @@ from synapse.types import (
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
from synapse.util.stringutils import shortstr
|
||||
from synapse.visibility import filter_events_for_server
|
||||
@@ -177,6 +179,8 @@ class FederationHandler(BaseHandler):
|
||||
self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]]
|
||||
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
||||
|
||||
self._room_backfill = Linearizer("room_backfill")
|
||||
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
self._ephemeral_messages_enabled = hs.config.enable_ephemeral_messages
|
||||
@@ -576,7 +580,9 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# Fetch the state events from the DB, and check we have the auth events.
|
||||
event_map = await self.store.get_events(state_event_ids, allow_rejected=True)
|
||||
auth_events_in_store = await self.store.have_seen_events(auth_event_ids)
|
||||
auth_events_in_store = await self.store.have_seen_events(
|
||||
room_id, auth_event_ids
|
||||
)
|
||||
|
||||
# Check for missing events. We handle state and auth event seperately,
|
||||
# as we want to pull the state from the DB, but we don't for the auth
|
||||
@@ -609,7 +615,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if missing_auth_events:
|
||||
auth_events_in_store = await self.store.have_seen_events(
|
||||
missing_auth_events
|
||||
room_id, missing_auth_events
|
||||
)
|
||||
missing_auth_events.difference_update(auth_events_in_store)
|
||||
|
||||
@@ -709,7 +715,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
|
||||
missing_auth_events.difference_update(
|
||||
await self.store.have_seen_events(missing_auth_events)
|
||||
await self.store.have_seen_events(room_id, missing_auth_events)
|
||||
)
|
||||
logger.debug("We are also missing %i auth events", len(missing_auth_events))
|
||||
|
||||
@@ -1038,6 +1044,12 @@ class FederationHandler(BaseHandler):
|
||||
return. This is used as part of the heuristic to decide if we
|
||||
should back paginate.
|
||||
"""
|
||||
with (await self._room_backfill.queue(room_id)):
|
||||
return await self._maybe_backfill_inner(room_id, current_depth, limit)
|
||||
|
||||
async def _maybe_backfill_inner(
|
||||
self, room_id: str, current_depth: int, limit: int
|
||||
) -> bool:
|
||||
extremities = await self.store.get_oldest_events_with_depth_in_room(room_id)
|
||||
|
||||
if not extremities:
|
||||
@@ -1353,11 +1365,12 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
event_infos.append(_NewEventInfo(event, None, auth))
|
||||
|
||||
await self._auth_and_persist_events(
|
||||
destination,
|
||||
room_id,
|
||||
event_infos,
|
||||
)
|
||||
if event_infos:
|
||||
await self._auth_and_persist_events(
|
||||
destination,
|
||||
room_id,
|
||||
event_infos,
|
||||
)
|
||||
|
||||
def _sanity_check_event(self, ev: EventBase) -> None:
|
||||
"""
|
||||
@@ -1668,28 +1681,17 @@ class FederationHandler(BaseHandler):
|
||||
# Check if the user is already in the room or invited to the room.
|
||||
user_id = event.state_key
|
||||
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
|
||||
newly_joined = True
|
||||
user_is_invited = False
|
||||
prev_member_event = None
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
user_is_invited = prev_member_event.membership == Membership.INVITE
|
||||
|
||||
# If the member is not already in the room, and not invited, check if
|
||||
# they should be allowed access via membership in a space.
|
||||
if (
|
||||
newly_joined
|
||||
and not user_is_invited
|
||||
and not await self._event_auth_handler.can_join_without_invite(
|
||||
prev_state_ids,
|
||||
event.room_version,
|
||||
user_id,
|
||||
)
|
||||
):
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required spaces to join this room.",
|
||||
)
|
||||
# Check if the member should be allowed access via membership in a space.
|
||||
await self._event_auth_handler.check_restricted_join_rules(
|
||||
prev_state_ids,
|
||||
event.room_version,
|
||||
user_id,
|
||||
prev_member_event,
|
||||
)
|
||||
|
||||
# Persist the event.
|
||||
await self._auth_and_persist_event(origin, event, context)
|
||||
@@ -2077,7 +2079,7 @@ class FederationHandler(BaseHandler):
|
||||
self,
|
||||
origin: str,
|
||||
room_id: str,
|
||||
event_infos: Iterable[_NewEventInfo],
|
||||
event_infos: Collection[_NewEventInfo],
|
||||
backfilled: bool = False,
|
||||
) -> None:
|
||||
"""Creates the appropriate contexts and persists events. The events
|
||||
@@ -2088,6 +2090,9 @@ class FederationHandler(BaseHandler):
|
||||
Notifies about the events where appropriate.
|
||||
"""
|
||||
|
||||
if not event_infos:
|
||||
return
|
||||
|
||||
async def prep(ev_info: _NewEventInfo):
|
||||
event = ev_info.event
|
||||
with nested_logging_context(suffix=event.event_id):
|
||||
@@ -2216,13 +2221,14 @@ class FederationHandler(BaseHandler):
|
||||
raise
|
||||
events_to_context[e.event_id].rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
await self.persist_events_and_notify(
|
||||
room_id,
|
||||
[
|
||||
(e, events_to_context[e.event_id])
|
||||
for e in itertools.chain(auth_events, state)
|
||||
],
|
||||
)
|
||||
if auth_events or state:
|
||||
await self.persist_events_and_notify(
|
||||
room_id,
|
||||
[
|
||||
(e, events_to_context[e.event_id])
|
||||
for e in itertools.chain(auth_events, state)
|
||||
],
|
||||
)
|
||||
|
||||
new_event_context = await self.state_handler.compute_event_context(
|
||||
event, old_state=state
|
||||
@@ -2485,7 +2491,7 @@ class FederationHandler(BaseHandler):
|
||||
#
|
||||
# we start by checking if they are in the store, and then try calling /event_auth/.
|
||||
if missing_auth:
|
||||
have_events = await self.store.have_seen_events(missing_auth)
|
||||
have_events = await self.store.have_seen_events(event.room_id, missing_auth)
|
||||
logger.debug("Events %s are in the store", have_events)
|
||||
missing_auth.difference_update(have_events)
|
||||
|
||||
@@ -2504,7 +2510,7 @@ class FederationHandler(BaseHandler):
|
||||
return context
|
||||
|
||||
seen_remotes = await self.store.have_seen_events(
|
||||
[e.event_id for e in remote_auth_chain]
|
||||
event.room_id, [e.event_id for e in remote_auth_chain]
|
||||
)
|
||||
|
||||
for e in remote_auth_chain:
|
||||
@@ -3061,16 +3067,25 @@ class FederationHandler(BaseHandler):
|
||||
the same room.
|
||||
backfilled: Whether these events are a result of
|
||||
backfilling or not
|
||||
|
||||
Returns:
|
||||
The stream ID after which all events have been persisted.
|
||||
"""
|
||||
if not event_and_contexts:
|
||||
return self.store.get_current_events_token()
|
||||
|
||||
instance = self.config.worker.events_shard_config.get_instance(room_id)
|
||||
if instance != self._instance_name:
|
||||
result = await self._send_events(
|
||||
instance_name=instance,
|
||||
store=self.store,
|
||||
room_id=room_id,
|
||||
event_and_contexts=event_and_contexts,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
# Limit the number of events sent over replication. We choose 200
|
||||
# here as that is what we default to in `max_request_body_size(..)`
|
||||
for batch in batch_iter(event_and_contexts, 200):
|
||||
result = await self._send_events(
|
||||
instance_name=instance,
|
||||
store=self.store,
|
||||
room_id=room_id,
|
||||
event_and_contexts=batch,
|
||||
backfilled=backfilled,
|
||||
)
|
||||
return result["max_stream_id"]
|
||||
else:
|
||||
assert self.storage.persistence
|
||||
|
||||
@@ -222,9 +222,21 @@ class BasePresenceHandler(abc.ABC):
|
||||
|
||||
@abc.abstractmethod
|
||||
async def set_state(
|
||||
self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False
|
||||
self,
|
||||
target_user: UserID,
|
||||
state: JsonDict,
|
||||
ignore_status_msg: bool = False,
|
||||
force_notify: bool = False,
|
||||
) -> None:
|
||||
"""Set the presence state of the user. """
|
||||
"""Set the presence state of the user.
|
||||
|
||||
Args:
|
||||
target_user: The ID of the user to set the presence state of.
|
||||
state: The presence state as a JSON dictionary.
|
||||
ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
|
||||
If False, the user's current status will be updated.
|
||||
force_notify: Whether to force notification of the update to clients.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
async def bump_presence_active_time(self, user: UserID):
|
||||
@@ -287,14 +299,59 @@ class BasePresenceHandler(abc.ABC):
|
||||
if not states:
|
||||
return
|
||||
|
||||
hosts_and_states = await get_interested_remotes(
|
||||
hosts_to_states = await get_interested_remotes(
|
||||
self.store,
|
||||
self.presence_router,
|
||||
states,
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
self._federation.send_presence_to_destinations(states, destinations)
|
||||
for destination, host_states in hosts_to_states.items():
|
||||
self._federation.send_presence_to_destinations(host_states, [destination])
|
||||
|
||||
async def send_full_presence_to_users(self, user_ids: Collection[str]):
|
||||
"""
|
||||
Adds to the list of users who should receive a full snapshot of presence
|
||||
upon their next sync. Note that this only works for local users.
|
||||
|
||||
Then, grabs the current presence state for a given set of users and adds it
|
||||
to the top of the presence stream.
|
||||
|
||||
Args:
|
||||
user_ids: The IDs of the local users to send full presence to.
|
||||
"""
|
||||
# Retrieve one of the users from the given set
|
||||
if not user_ids:
|
||||
raise Exception(
|
||||
"send_full_presence_to_users must be called with at least one user"
|
||||
)
|
||||
user_id = next(iter(user_ids))
|
||||
|
||||
# Mark all users as receiving full presence on their next sync
|
||||
await self.store.add_users_to_send_full_presence_to(user_ids)
|
||||
|
||||
# Add a new entry to the presence stream. Since we use stream tokens to determine whether a
|
||||
# local user should receive a full snapshot of presence when they sync, we need to bump the
|
||||
# presence stream so that subsequent syncs with no presence activity in between won't result
|
||||
# in the client receiving multiple full snapshots of presence.
|
||||
#
|
||||
# If we bump the stream ID, then the user will get a higher stream token next sync, and thus
|
||||
# correctly won't receive a second snapshot.
|
||||
|
||||
# Get the current presence state for one of the users (defaults to offline if not found)
|
||||
current_presence_state = await self.get_state(UserID.from_string(user_id))
|
||||
|
||||
# Convert the UserPresenceState object into a serializable dict
|
||||
state = {
|
||||
"presence": current_presence_state.state,
|
||||
"status_message": current_presence_state.status_msg,
|
||||
}
|
||||
|
||||
# Copy the presence state to the tip of the presence stream.
|
||||
|
||||
# We set force_notify=True here so that this presence update is guaranteed to
|
||||
# increment the presence stream ID (which resending the current user's presence
|
||||
# otherwise would not do).
|
||||
await self.set_state(UserID.from_string(user_id), state, force_notify=True)
|
||||
|
||||
|
||||
class _NullContextManager(ContextManager[None]):
|
||||
@@ -438,9 +495,6 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
users=users_to_states.keys(),
|
||||
)
|
||||
|
||||
# If this is a federation sender, notify about presence updates.
|
||||
await self.maybe_send_presence_to_interested_destinations(states)
|
||||
|
||||
async def process_replication_rows(
|
||||
self, stream_name: str, instance_name: str, token: int, rows: list
|
||||
):
|
||||
@@ -462,11 +516,27 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
for row in rows
|
||||
]
|
||||
|
||||
for state in states:
|
||||
self.user_to_current_state[state.user_id] = state
|
||||
# The list of states to notify sync streams and remote servers about.
|
||||
# This is calculated by comparing the old and new states for each user
|
||||
# using `should_notify(..)`.
|
||||
#
|
||||
# Note that this is necessary as the presence writer will periodically
|
||||
# flush presence state changes that should not be notified about to the
|
||||
# DB, and so will be sent over the replication stream.
|
||||
state_to_notify = []
|
||||
|
||||
for new_state in states:
|
||||
old_state = self.user_to_current_state.get(new_state.user_id)
|
||||
self.user_to_current_state[new_state.user_id] = new_state
|
||||
|
||||
if not old_state or should_notify(old_state, new_state):
|
||||
state_to_notify.append(new_state)
|
||||
|
||||
stream_id = token
|
||||
await self.notify_from_replication(states, stream_id)
|
||||
await self.notify_from_replication(state_to_notify, stream_id)
|
||||
|
||||
# If this is a federation sender, notify about presence updates.
|
||||
await self.maybe_send_presence_to_interested_destinations(state_to_notify)
|
||||
|
||||
def get_currently_syncing_users_for_replication(self) -> Iterable[str]:
|
||||
return [
|
||||
@@ -480,8 +550,17 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
target_user: UserID,
|
||||
state: JsonDict,
|
||||
ignore_status_msg: bool = False,
|
||||
force_notify: bool = False,
|
||||
) -> None:
|
||||
"""Set the presence state of the user."""
|
||||
"""Set the presence state of the user.
|
||||
|
||||
Args:
|
||||
target_user: The ID of the user to set the presence state of.
|
||||
state: The presence state as a JSON dictionary.
|
||||
ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
|
||||
If False, the user's current status will be updated.
|
||||
force_notify: Whether to force notification of the update to clients.
|
||||
"""
|
||||
presence = state["presence"]
|
||||
|
||||
valid_presence = (
|
||||
@@ -508,6 +587,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
|
||||
user_id=user_id,
|
||||
state=state,
|
||||
ignore_status_msg=ignore_status_msg,
|
||||
force_notify=force_notify,
|
||||
)
|
||||
|
||||
async def bump_presence_active_time(self, user: UserID) -> None:
|
||||
@@ -677,13 +757,19 @@ class PresenceHandler(BasePresenceHandler):
|
||||
[self.user_to_current_state[user_id] for user_id in unpersisted]
|
||||
)
|
||||
|
||||
async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None:
|
||||
async def _update_states(
|
||||
self, new_states: Iterable[UserPresenceState], force_notify: bool = False
|
||||
) -> None:
|
||||
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
||||
the notifier and federation if and only if the changed presence state
|
||||
should be sent to clients/servers.
|
||||
|
||||
Args:
|
||||
new_states: The new user presence state updates to process.
|
||||
force_notify: Whether to force notifying clients of this presence state update,
|
||||
even if it doesn't change the state of a user's presence (e.g online -> online).
|
||||
This is currently used to bump the max presence stream ID without changing any
|
||||
user's presence (see PresenceHandler.add_users_to_send_full_presence_to).
|
||||
"""
|
||||
now = self.clock.time_msec()
|
||||
|
||||
@@ -720,6 +806,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
now=now,
|
||||
)
|
||||
|
||||
if force_notify:
|
||||
should_notify = True
|
||||
|
||||
self.user_to_current_state[user_id] = new_state
|
||||
|
||||
if should_notify:
|
||||
@@ -753,15 +842,15 @@ class PresenceHandler(BasePresenceHandler):
|
||||
if to_federation_ping:
|
||||
federation_presence_out_counter.inc(len(to_federation_ping))
|
||||
|
||||
hosts_and_states = await get_interested_remotes(
|
||||
hosts_to_states = await get_interested_remotes(
|
||||
self.store,
|
||||
self.presence_router,
|
||||
list(to_federation_ping.values()),
|
||||
)
|
||||
|
||||
for destinations, states in hosts_and_states:
|
||||
for destination, states in hosts_to_states.items():
|
||||
self._federation_queue.send_presence_to_destinations(
|
||||
states, destinations
|
||||
states, [destination]
|
||||
)
|
||||
|
||||
async def _handle_timeouts(self) -> None:
|
||||
@@ -1058,9 +1147,21 @@ class PresenceHandler(BasePresenceHandler):
|
||||
await self._update_states(updates)
|
||||
|
||||
async def set_state(
|
||||
self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False
|
||||
self,
|
||||
target_user: UserID,
|
||||
state: JsonDict,
|
||||
ignore_status_msg: bool = False,
|
||||
force_notify: bool = False,
|
||||
) -> None:
|
||||
"""Set the presence state of the user."""
|
||||
"""Set the presence state of the user.
|
||||
|
||||
Args:
|
||||
target_user: The ID of the user to set the presence state of.
|
||||
state: The presence state as a JSON dictionary.
|
||||
ignore_status_msg: True to ignore the "status_msg" field of the `state` dict.
|
||||
If False, the user's current status will be updated.
|
||||
force_notify: Whether to force notification of the update to clients.
|
||||
"""
|
||||
status_msg = state.get("status_msg", None)
|
||||
presence = state["presence"]
|
||||
|
||||
@@ -1091,7 +1192,9 @@ class PresenceHandler(BasePresenceHandler):
|
||||
):
|
||||
new_fields["last_active_ts"] = self.clock.time_msec()
|
||||
|
||||
await self._update_states([prev_state.copy_and_replace(**new_fields)])
|
||||
await self._update_states(
|
||||
[prev_state.copy_and_replace(**new_fields)], force_notify=force_notify
|
||||
)
|
||||
|
||||
async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool:
|
||||
"""Returns whether a user can see another user's presence."""
|
||||
@@ -1389,11 +1492,10 @@ class PresenceEventSource:
|
||||
#
|
||||
# Presence -> Notifier -> PresenceEventSource -> Presence
|
||||
#
|
||||
# Same with get_module_api, get_presence_router
|
||||
# Same with get_presence_router:
|
||||
#
|
||||
# AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler
|
||||
self.get_presence_handler = hs.get_presence_handler
|
||||
self.get_module_api = hs.get_module_api
|
||||
self.get_presence_router = hs.get_presence_router
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
@@ -1424,16 +1526,21 @@ class PresenceEventSource:
|
||||
stream_change_cache = self.store.presence_stream_cache
|
||||
|
||||
with Measure(self.clock, "presence.get_new_events"):
|
||||
if user_id in self.get_module_api()._send_full_presence_to_local_users:
|
||||
# This user has been specified by a module to receive all current, online
|
||||
# user presence. Removing from_key and setting include_offline to false
|
||||
# will do effectively this.
|
||||
from_key = None
|
||||
include_offline = False
|
||||
|
||||
if from_key is not None:
|
||||
from_key = int(from_key)
|
||||
|
||||
# Check if this user should receive all current, online user presence. We only
|
||||
# bother to do this if from_key is set, as otherwise the user will receive all
|
||||
# user presence anyways.
|
||||
if await self.store.should_user_receive_full_presence_with_token(
|
||||
user_id, from_key
|
||||
):
|
||||
# This user has been specified by a module to receive all current, online
|
||||
# user presence. Removing from_key and setting include_offline to false
|
||||
# will do effectively this.
|
||||
from_key = None
|
||||
include_offline = False
|
||||
|
||||
max_token = self.store.get_current_presence_token()
|
||||
if from_key == max_token:
|
||||
# This is necessary as due to the way stream ID generators work
|
||||
@@ -1467,12 +1574,6 @@ class PresenceEventSource:
|
||||
user_id, include_offline, from_key
|
||||
)
|
||||
|
||||
# Remove the user from the list of users to receive all presence
|
||||
if user_id in self.get_module_api()._send_full_presence_to_local_users:
|
||||
self.get_module_api()._send_full_presence_to_local_users.remove(
|
||||
user_id
|
||||
)
|
||||
|
||||
return presence_updates, max_token
|
||||
|
||||
# Make mypy happy. users_interested_in should now be a set
|
||||
@@ -1522,10 +1623,6 @@ class PresenceEventSource:
|
||||
)
|
||||
presence_updates = list(users_to_state.values())
|
||||
|
||||
# Remove the user from the list of users to receive all presence
|
||||
if user_id in self.get_module_api()._send_full_presence_to_local_users:
|
||||
self.get_module_api()._send_full_presence_to_local_users.remove(user_id)
|
||||
|
||||
if not include_offline:
|
||||
# Filter out offline presence states
|
||||
presence_updates = self._filter_offline_presence_state(presence_updates)
|
||||
@@ -1878,7 +1975,7 @@ async def get_interested_remotes(
|
||||
store: DataStore,
|
||||
presence_router: PresenceRouter,
|
||||
states: List[UserPresenceState],
|
||||
) -> List[Tuple[Collection[str], List[UserPresenceState]]]:
|
||||
) -> Dict[str, Set[UserPresenceState]]:
|
||||
"""Given a list of presence states figure out which remote servers
|
||||
should be sent which.
|
||||
|
||||
@@ -1890,11 +1987,9 @@ async def get_interested_remotes(
|
||||
states: A list of incoming user presence updates.
|
||||
|
||||
Returns:
|
||||
A list of 2-tuples of destinations and states, where for
|
||||
each tuple the list of UserPresenceState should be sent to each
|
||||
destination
|
||||
A map from destinations to presence states to send to that destination.
|
||||
"""
|
||||
hosts_and_states = [] # type: List[Tuple[Collection[str], List[UserPresenceState]]]
|
||||
hosts_and_states: Dict[str, Set[UserPresenceState]] = {}
|
||||
|
||||
# First we look up the rooms each user is in (as well as any explicit
|
||||
# subscriptions), then for each distinct room we look up the remote
|
||||
@@ -1906,11 +2001,12 @@ async def get_interested_remotes(
|
||||
for room_id, states in room_ids_to_states.items():
|
||||
user_ids = await store.get_users_in_room(room_id)
|
||||
hosts = {get_domain_from_id(user_id) for user_id in user_ids}
|
||||
hosts_and_states.append((hosts, states))
|
||||
for host in hosts:
|
||||
hosts_and_states.setdefault(host, set()).update(states)
|
||||
|
||||
for user_id, states in users_to_states.items():
|
||||
host = get_domain_from_id(user_id)
|
||||
hosts_and_states.append(([host], states))
|
||||
hosts_and_states.setdefault(host, set()).update(states)
|
||||
|
||||
return hosts_and_states
|
||||
|
||||
|
||||
@@ -260,25 +260,15 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
||||
|
||||
if event.membership == Membership.JOIN:
|
||||
newly_joined = True
|
||||
user_is_invited = False
|
||||
prev_member_event = None
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
newly_joined = prev_member_event.membership != Membership.JOIN
|
||||
user_is_invited = prev_member_event.membership == Membership.INVITE
|
||||
|
||||
# If the member is not already in the room and is not accepting an invite,
|
||||
# check if they should be allowed access via membership in a space.
|
||||
if (
|
||||
newly_joined
|
||||
and not user_is_invited
|
||||
and not await self.event_auth_handler.can_join_without_invite(
|
||||
prev_state_ids, event.room_version, user_id
|
||||
)
|
||||
):
|
||||
raise AuthError(
|
||||
403,
|
||||
"You do not belong to any of the required spaces to join this room.",
|
||||
)
|
||||
# Check if the member should be allowed access via membership in a space.
|
||||
await self.event_auth_handler.check_restricted_join_rules(
|
||||
prev_state_ids, event.room_version, user_id, prev_member_event
|
||||
)
|
||||
|
||||
# Only rate-limit if the user actually joined the room, otherwise we'll end
|
||||
# up blocking profile updates.
|
||||
|
||||
98
synapse/handlers/send_email.py
Normal file
98
synapse/handlers/send_email.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Copyright 2021 The Matrix.org C.I.C. Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import email.utils
|
||||
import logging
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SendEmailHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
|
||||
self._sendmail = hs.get_sendmail()
|
||||
self._reactor = hs.get_reactor()
|
||||
|
||||
self._from = hs.config.email.email_notif_from
|
||||
self._smtp_host = hs.config.email.email_smtp_host
|
||||
self._smtp_port = hs.config.email.email_smtp_port
|
||||
self._smtp_user = hs.config.email.email_smtp_user
|
||||
self._smtp_pass = hs.config.email.email_smtp_pass
|
||||
self._require_transport_security = hs.config.email.require_transport_security
|
||||
|
||||
async def send_email(
|
||||
self,
|
||||
email_address: str,
|
||||
subject: str,
|
||||
app_name: str,
|
||||
html: str,
|
||||
text: str,
|
||||
) -> None:
|
||||
"""Send a multipart email with the given information.
|
||||
|
||||
Args:
|
||||
email_address: The address to send the email to.
|
||||
subject: The email's subject.
|
||||
app_name: The app name to include in the From header.
|
||||
html: The HTML content to include in the email.
|
||||
text: The plain text content to include in the email.
|
||||
"""
|
||||
try:
|
||||
from_string = self._from % {"app": app_name}
|
||||
except (KeyError, TypeError):
|
||||
from_string = self._from
|
||||
|
||||
raw_from = email.utils.parseaddr(from_string)[1]
|
||||
raw_to = email.utils.parseaddr(email_address)[1]
|
||||
|
||||
if raw_to == "":
|
||||
raise RuntimeError("Invalid 'to' address")
|
||||
|
||||
html_part = MIMEText(html, "html", "utf8")
|
||||
text_part = MIMEText(text, "plain", "utf8")
|
||||
|
||||
multipart_msg = MIMEMultipart("alternative")
|
||||
multipart_msg["Subject"] = subject
|
||||
multipart_msg["From"] = from_string
|
||||
multipart_msg["To"] = email_address
|
||||
multipart_msg["Date"] = email.utils.formatdate()
|
||||
multipart_msg["Message-ID"] = email.utils.make_msgid()
|
||||
multipart_msg.attach(text_part)
|
||||
multipart_msg.attach(html_part)
|
||||
|
||||
logger.info("Sending email to %s" % email_address)
|
||||
|
||||
await make_deferred_yieldable(
|
||||
self._sendmail(
|
||||
self._smtp_host,
|
||||
raw_from,
|
||||
raw_to,
|
||||
multipart_msg.as_string().encode("utf8"),
|
||||
reactor=self._reactor,
|
||||
port=self._smtp_port,
|
||||
requireAuthentication=self._smtp_user is not None,
|
||||
username=self._smtp_user,
|
||||
password=self._smtp_pass,
|
||||
requireTransportSecurity=self._require_transport_security,
|
||||
)
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user