Compare commits
196 Commits
v1.59.0
...
release-v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fdfcfaa042 | ||
|
|
09d89ddc1f | ||
|
|
ea10cdbea7 | ||
|
|
fa13080618 | ||
|
|
21e6c0ed64 | ||
|
|
b8bf61230c | ||
|
|
e87355f201 | ||
|
|
d580014e22 | ||
|
|
7b54badd31 | ||
|
|
8a499d7a60 | ||
|
|
495fbf5d63 | ||
|
|
a68dfef1f7 | ||
|
|
c51f5b9592 | ||
|
|
a47636c570 | ||
|
|
f7baffd8ec | ||
|
|
44de53bb79 | ||
|
|
148fe58a24 | ||
|
|
1acc897c31 | ||
|
|
fcd8703508 | ||
|
|
e3163e2e11 | ||
|
|
6b46c3eb3d | ||
|
|
01df5bacac | ||
|
|
888a29f412 | ||
|
|
782cb7420a | ||
|
|
7bc08f3201 | ||
|
|
88193f2125 | ||
|
|
79dadf7216 | ||
|
|
5949ab86f8 | ||
|
|
2e8763ec96 | ||
|
|
f0aec0abef | ||
|
|
cf05258f76 | ||
|
|
2fc787c341 | ||
|
|
641908f72f | ||
|
|
2fba1076c5 | ||
|
|
3594f6c1f3 | ||
|
|
b2b5279a3f | ||
|
|
bf01e51554 | ||
|
|
c8684e6792 | ||
|
|
5e17922ef7 | ||
|
|
5984ada6bb | ||
|
|
1e453053cb | ||
|
|
e541bb9eed | ||
|
|
8fd87739bf | ||
|
|
d0e40dfe29 | ||
|
|
bcfdfeb65d | ||
|
|
e0fae823e9 | ||
|
|
c4f548e05d | ||
|
|
cd9fc058de | ||
|
|
af7db19e1e | ||
|
|
1fd1856afc | ||
|
|
7f92ac4c1c | ||
|
|
b10211871f | ||
|
|
2480461879 | ||
|
|
119938792b | ||
|
|
80bd614dac | ||
|
|
563ef172ae | ||
|
|
72df42078b | ||
|
|
796a0312e1 | ||
|
|
6be4953b99 | ||
|
|
bda4600399 | ||
|
|
28989cb301 | ||
|
|
888eb736a1 | ||
|
|
724e11d620 | ||
|
|
c52abc1cfd | ||
|
|
d9f092285b | ||
|
|
053ca5f3ca | ||
|
|
a7da00d4f7 | ||
|
|
3503f42741 | ||
|
|
e409ab8e92 | ||
|
|
bb7a637765 | ||
|
|
7b88f5a107 | ||
|
|
f1605b7447 | ||
|
|
bc1beebc27 | ||
|
|
317248d42c | ||
|
|
49f06866e4 | ||
|
|
1cba285a79 | ||
|
|
e768644368 | ||
|
|
1885ee0113 | ||
|
|
b5707ceaba | ||
|
|
b83bc5fab5 | ||
|
|
1b338476af | ||
|
|
4660d9fdcf | ||
|
|
a8db8c6eba | ||
|
|
759f9c09e1 | ||
|
|
4cbcd4a999 | ||
|
|
6aeee9a19d | ||
|
|
1f9013ce60 | ||
|
|
33e2916858 | ||
|
|
2e5f88b5e6 | ||
|
|
b4fab0b14f | ||
|
|
774ac4930d | ||
|
|
298911555c | ||
|
|
e7c77a8750 | ||
|
|
81d9f2a8e9 | ||
|
|
042e47970b | ||
|
|
6855024e0a | ||
|
|
5d9f886aab | ||
|
|
88ce3080d4 | ||
|
|
9385cd0633 | ||
|
|
a670b5cda2 | ||
|
|
0b3423fd51 | ||
|
|
f5b1c09909 | ||
|
|
7c2a78bb3b | ||
|
|
28199e9357 | ||
|
|
4cc4229cd7 | ||
|
|
a608ac847b | ||
|
|
7a68203cde | ||
|
|
67aae05ece | ||
|
|
444588c5fc | ||
|
|
438925c422 | ||
|
|
a6ab3f5619 | ||
|
|
4fef76ca34 | ||
|
|
fbf904bd54 | ||
|
|
39dee30f01 | ||
|
|
10280fc943 | ||
|
|
71e8afe34d | ||
|
|
2be5a2b07b | ||
|
|
96df31239c | ||
|
|
177b884ad7 | ||
|
|
eb4aaa1b4b | ||
|
|
ab2a615cfb | ||
|
|
684feeaf2f | ||
|
|
66a5f6c400 | ||
|
|
f16ec055cc | ||
|
|
b935c9529c | ||
|
|
d25935cd3d | ||
|
|
47619017f9 | ||
|
|
5675cebfaa | ||
|
|
6ff99e3bea | ||
|
|
a1cb05b3e8 | ||
|
|
d38c73e9ab | ||
|
|
0fce474a40 | ||
|
|
19d79b6ebe | ||
|
|
3d8839c30c | ||
|
|
50ae4eafe1 | ||
|
|
682431efbe | ||
|
|
635f0d916b | ||
|
|
df4963548b | ||
|
|
a167304c8b | ||
|
|
deca250e3f | ||
|
|
d24a1486e5 | ||
|
|
1aa30f7b3e | ||
|
|
c22314c4e8 | ||
|
|
d4713d3e33 | ||
|
|
8afb7b55d0 | ||
|
|
37935b5183 | ||
|
|
0d17357fcd | ||
|
|
182ca78a12 | ||
|
|
5331fb5b47 | ||
|
|
6edefef602 | ||
|
|
942c30b16b | ||
|
|
24b590de32 | ||
|
|
a34a41f135 | ||
|
|
1402159bb8 | ||
|
|
32ef24fbd7 | ||
|
|
fcf951d5dc | ||
|
|
1fe202a1a3 | ||
|
|
6d8d1218dd | ||
|
|
3eafee629d | ||
|
|
e24c11afd6 | ||
|
|
83be72d76c | ||
|
|
3ce15cc7be | ||
|
|
b4eb163434 | ||
|
|
8060034612 | ||
|
|
a5c26750b5 | ||
|
|
86a515ccbf | ||
|
|
cde8af9a49 | ||
|
|
e8ae472d3b | ||
|
|
9013104429 | ||
|
|
aec69d2481 | ||
|
|
39bed28b28 | ||
|
|
c9fc2c0d22 | ||
|
|
57f6c496d0 | ||
|
|
17e1eb7749 | ||
|
|
de1e599b9d | ||
|
|
409573f6d0 | ||
|
|
bf7ce92bf7 | ||
|
|
db10f2c037 | ||
|
|
6ee61b9052 | ||
|
|
d38d242411 | ||
|
|
a559c8b0d9 | ||
|
|
9d8e380d2e | ||
|
|
dffecade7d | ||
|
|
a4c75918b3 | ||
|
|
84facf769e | ||
|
|
c72d26c1e1 | ||
|
|
c997bfb926 | ||
|
|
29f06704b8 | ||
|
|
989fa33096 | ||
|
|
147f098fb4 | ||
|
|
dbb12a0b54 | ||
|
|
5cfb004595 | ||
|
|
5c00151c28 | ||
|
|
2aad0ae57f | ||
|
|
b44fbdffa4 | ||
|
|
02cdace707 |
25
.ci/scripts/checkout_complement.sh
Executable file
25
.ci/scripts/checkout_complement.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Fetches a version of complement which best matches the current build.
|
||||
#
|
||||
# The tarball is unpacked into `./complement`.
|
||||
|
||||
set -e
|
||||
mkdir -p complement
|
||||
|
||||
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
|
||||
# etc. we need to use different fallbacks:
|
||||
#
|
||||
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
|
||||
# for pull requests, otherwise GITHUB_REF).
|
||||
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
|
||||
# (GITHUB_BASE_REF for pull requests).
|
||||
# 3. Use the default complement branch ("HEAD").
|
||||
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
|
||||
# Skip empty branch names and merge commits.
|
||||
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
||||
done
|
||||
@@ -6,3 +6,6 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56
|
||||
|
||||
# Update black to 20.8b1 (#9381).
|
||||
0a00b7ff14890987f09112a2ae696c61001e6cf1
|
||||
|
||||
# Convert tests/rest/admin/test_room.py to unix file endings (#7953).
|
||||
c4268e3da64f1abb5b31deaeb5769adb6510c0a7
|
||||
62
.github/workflows/tests.yml
vendored
62
.github/workflows/tests.yml
vendored
@@ -306,7 +306,7 @@ jobs:
|
||||
- run: .ci/scripts/test_synapse_port_db.sh
|
||||
|
||||
complement:
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
if: "${{ !failure() && !cancelled() }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -333,26 +333,7 @@ jobs:
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
- name: Checkout complement
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p complement
|
||||
# Attempt to use the version of complement which best matches the current
|
||||
# build. Depending on whether this is a PR or release, etc. we need to
|
||||
# use different fallbacks.
|
||||
#
|
||||
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
|
||||
# for pull requests, otherwise GITHUB_REF).
|
||||
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
|
||||
# (GITHUB_BASE_REF for pull requests).
|
||||
# 3. Use the default complement branch ("HEAD").
|
||||
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
|
||||
# Skip empty branch names and merge commits.
|
||||
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
|
||||
done
|
||||
run: synapse/.ci/scripts/checkout_complement.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
@@ -360,6 +341,45 @@ jobs:
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
|
||||
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
|
||||
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
|
||||
complement-developonly:
|
||||
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
|
||||
needs: linting-done
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
|
||||
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
|
||||
- name: "Set Go Version"
|
||||
run: |
|
||||
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
|
||||
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
|
||||
# Add the Go path to the PATH: We need this so we can call gotestfmt
|
||||
echo "~/go/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: "Install Complement Dependencies"
|
||||
run: |
|
||||
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
|
||||
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
|
||||
|
||||
- name: Run actions/checkout@v2 for synapse
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
# Attempt to check out the same branch of Complement as the PR. If it
|
||||
# doesn't exist, fallback to HEAD.
|
||||
- name: Checkout complement
|
||||
run: synapse/.ci/scripts/checkout_complement.sh
|
||||
|
||||
- run: |
|
||||
set -o pipefail
|
||||
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
|
||||
276
CHANGES.md
276
CHANGES.md
@@ -1,3 +1,277 @@
|
||||
Synapse 1.61.1 (2022-06-28)
|
||||
===========================
|
||||
|
||||
This patch release fixes a security issue regarding URL previews, affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild.
|
||||
|
||||
Server administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below.
|
||||
|
||||
## Security advisory
|
||||
|
||||
The following issue is fixed in 1.61.1.
|
||||
|
||||
* [GHSA-22p3-qrh9-cx32](https://github.com/matrix-org/synapse/security/advisories/GHSA-22p3-qrh9-cx32) / [CVE-2022-31052](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-31052)
|
||||
|
||||
Synapse instances with the [`url_preview_enabled`](https://matrix-org.github.io/synapse/v1.61/usage/configuration/config_documentation.html#media-store) homeserver config option set to `true` are affected. URL previews of some web pages can lead to unbounded recursion, causing the request to either fail, or in some cases crash the running Synapse process.
|
||||
|
||||
Requesting URL previews requires authentication. Nevertheless, it is possible to exploit this maliciously, either by malicious users on the homeserver, or by remote users sending URLs that a local user's client may automatically request a URL preview for.
|
||||
|
||||
Homeservers with the `url_preview_enabled` configuration option set to `false` (the default) are unaffected. Instances with the `enable_media_repo` configuration option set to `false` are also unaffected, as this also disables URL preview functionality.
|
||||
|
||||
Fixed by [fa1308061802ac7b7d20e954ba7372c5ac292333](https://github.com/matrix-org/synapse/commit/fa1308061802ac7b7d20e954ba7372c5ac292333).
|
||||
|
||||
Synapse 1.61.0 (2022-06-14)
|
||||
===========================
|
||||
|
||||
This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*.
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610)
|
||||
for more details.
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Mention removed community/group worker endpoints in [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
|
||||
|
||||
|
||||
Synapse 1.61.0rc1 (2022-06-07)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977))
|
||||
- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859))
|
||||
- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
|
||||
- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917))
|
||||
- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923))
|
||||
- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855))
|
||||
- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951))
|
||||
- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746))
|
||||
- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766))
|
||||
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784))
|
||||
- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829))
|
||||
- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832))
|
||||
- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840))
|
||||
- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843))
|
||||
- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858))
|
||||
- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877))
|
||||
- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889))
|
||||
- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903))
|
||||
- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905))
|
||||
- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932))
|
||||
- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863))
|
||||
- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966))
|
||||
- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908))
|
||||
- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909))
|
||||
- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910))
|
||||
- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933))
|
||||
- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964))
|
||||
- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812))
|
||||
- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813))
|
||||
- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836))
|
||||
- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846))
|
||||
- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849))
|
||||
- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851))
|
||||
- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904))
|
||||
- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856))
|
||||
- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860))
|
||||
- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865))
|
||||
- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866))
|
||||
- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868))
|
||||
- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869))
|
||||
- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871))
|
||||
- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872))
|
||||
- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879))
|
||||
- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884))
|
||||
- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885))
|
||||
- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886))
|
||||
- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888))
|
||||
- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894))
|
||||
- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902))
|
||||
- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912))
|
||||
- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913))
|
||||
- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914))
|
||||
- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916))
|
||||
- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925))
|
||||
|
||||
|
||||
Synapse 1.60.0 (2022-05-31)
|
||||
===========================
|
||||
|
||||
This release of Synapse adds a unique index to the `state_group_edges` table, in
|
||||
order to prevent accidentally introducing duplicate information (for example,
|
||||
because a database backup was restored multiple times). If your Synapse database
|
||||
already has duplicate rows in this table, this could fail with an error and
|
||||
require manual remediation.
|
||||
|
||||
Additionally, the signature of the `check_event_for_spam` module callback has changed.
|
||||
The previous signature has been deprecated and remains working for now. Module authors
|
||||
should update their modules to use the new signature where possible.
|
||||
|
||||
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600)
|
||||
for more details.
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918))
|
||||
|
||||
|
||||
Synapse 1.60.0rc2 (2022-05-27)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887))
|
||||
|
||||
|
||||
Synapse 1.60.0rc1 (2022-05-24)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513))
|
||||
- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618))
|
||||
- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623))
|
||||
- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809))
|
||||
- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673))
|
||||
- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701))
|
||||
- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718))
|
||||
- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744))
|
||||
- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792))
|
||||
- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611))
|
||||
- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683))
|
||||
- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687))
|
||||
- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696))
|
||||
- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
|
||||
- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
|
||||
- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
|
||||
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
|
||||
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
|
||||
- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
|
||||
- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
|
||||
- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803))
|
||||
- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823))
|
||||
|
||||
|
||||
Updates to the Docker image
|
||||
---------------------------
|
||||
|
||||
- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715))
|
||||
- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727))
|
||||
- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742))
|
||||
- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748))
|
||||
- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749))
|
||||
- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759))
|
||||
- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761))
|
||||
- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765))
|
||||
- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773))
|
||||
- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776))
|
||||
- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777))
|
||||
- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785))
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533))
|
||||
- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498))
|
||||
- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705))
|
||||
- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708))
|
||||
- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676))
|
||||
- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677))
|
||||
- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679))
|
||||
- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680))
|
||||
- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689))
|
||||
- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691))
|
||||
- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693))
|
||||
- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703))
|
||||
- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711))
|
||||
- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
|
||||
- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
|
||||
- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
|
||||
- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
|
||||
- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
|
||||
- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
|
||||
- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775))
|
||||
- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781))
|
||||
- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783))
|
||||
- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789))
|
||||
- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833))
|
||||
- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791))
|
||||
- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818))
|
||||
- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819))
|
||||
- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826))
|
||||
- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842))
|
||||
|
||||
|
||||
Synapse 1.59.1 (2022-05-18)
|
||||
===========================
|
||||
|
||||
This release fixes a long-standing issue which could prevent Synapse's user directory for updating properly.
|
||||
|
||||
Bugfixes
|
||||
----------------
|
||||
|
||||
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. Contributed by Nick @ Beeper. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
|
||||
|
||||
|
||||
Synapse 1.59.0 (2022-05-17)
|
||||
===========================
|
||||
|
||||
@@ -78,7 +352,7 @@ Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
|
||||
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
|
||||
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
|
||||
- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))
|
||||
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,6 +34,14 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
""" Starts a synapse client console. """
|
||||
import argparse
|
||||
import binascii
|
||||
import cmd
|
||||
import getpass
|
||||
import json
|
||||
@@ -26,9 +27,8 @@ import urllib
|
||||
from http import TwistedHttpClient
|
||||
from typing import Optional
|
||||
|
||||
import nacl.encoding
|
||||
import nacl.signing
|
||||
import urlparse
|
||||
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
|
||||
from signedjson.sign import SignatureVerifyException, verify_signed_json
|
||||
|
||||
from twisted.internet import defer, reactor, threads
|
||||
@@ -41,7 +41,6 @@ TRUSTED_ID_SERVERS = ["localhost:8001"]
|
||||
|
||||
|
||||
class SynapseCmd(cmd.Cmd):
|
||||
|
||||
"""Basic synapse command-line processor.
|
||||
|
||||
This processes commands from the user and calls the relevant HTTP methods.
|
||||
@@ -420,8 +419,8 @@ class SynapseCmd(cmd.Cmd):
|
||||
pubKey = None
|
||||
pubKeyObj = yield self.http_client.do_request("GET", url)
|
||||
if "public_key" in pubKeyObj:
|
||||
pubKey = nacl.signing.VerifyKey(
|
||||
pubKeyObj["public_key"], encoder=nacl.encoding.HexEncoder
|
||||
pubKey = decode_verify_key_bytes(
|
||||
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
|
||||
)
|
||||
else:
|
||||
print("No public key found in pubkey response!")
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import curses
|
||||
import curses.wrapper
|
||||
from curses.ascii import isprint
|
||||
|
||||
from twisted.internet import reactor
|
||||
|
||||
|
||||
class CursesStdIO:
|
||||
def __init__(self, stdscr, callback=None):
|
||||
self.statusText = "Synapse test app -"
|
||||
self.searchText = ""
|
||||
self.stdscr = stdscr
|
||||
|
||||
self.logLine = ""
|
||||
|
||||
self.callback = callback
|
||||
|
||||
self._setup()
|
||||
|
||||
def _setup(self):
|
||||
self.stdscr.nodelay(1) # Make non blocking
|
||||
|
||||
self.rows, self.cols = self.stdscr.getmaxyx()
|
||||
self.lines = []
|
||||
|
||||
curses.use_default_colors()
|
||||
|
||||
self.paintStatus(self.statusText)
|
||||
self.stdscr.refresh()
|
||||
|
||||
def set_callback(self, callback):
|
||||
self.callback = callback
|
||||
|
||||
def fileno(self):
|
||||
"""We want to select on FD 0"""
|
||||
return 0
|
||||
|
||||
def connectionLost(self, reason):
|
||||
self.close()
|
||||
|
||||
def print_line(self, text):
|
||||
"""add a line to the internal list of lines"""
|
||||
|
||||
self.lines.append(text)
|
||||
self.redraw()
|
||||
|
||||
def print_log(self, text):
|
||||
self.logLine = text
|
||||
self.redraw()
|
||||
|
||||
def redraw(self):
|
||||
"""method for redisplaying lines based on internal list of lines"""
|
||||
|
||||
self.stdscr.clear()
|
||||
self.paintStatus(self.statusText)
|
||||
i = 0
|
||||
index = len(self.lines) - 1
|
||||
while i < (self.rows - 3) and index >= 0:
|
||||
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
|
||||
i = i + 1
|
||||
index = index - 1
|
||||
|
||||
self.printLogLine(self.logLine)
|
||||
|
||||
self.stdscr.refresh()
|
||||
|
||||
def paintStatus(self, text):
|
||||
if len(text) > self.cols:
|
||||
raise RuntimeError("TextTooLongError")
|
||||
|
||||
self.stdscr.addstr(
|
||||
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
|
||||
)
|
||||
|
||||
def printLogLine(self, text):
|
||||
self.stdscr.addstr(
|
||||
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
|
||||
)
|
||||
|
||||
def doRead(self):
|
||||
"""Input is ready!"""
|
||||
curses.noecho()
|
||||
c = self.stdscr.getch() # read a character
|
||||
|
||||
if c == curses.KEY_BACKSPACE:
|
||||
self.searchText = self.searchText[:-1]
|
||||
|
||||
elif c == curses.KEY_ENTER or c == 10:
|
||||
text = self.searchText
|
||||
self.searchText = ""
|
||||
|
||||
self.print_line(">> %s" % text)
|
||||
|
||||
try:
|
||||
if self.callback:
|
||||
self.callback.on_line(text)
|
||||
except Exception as e:
|
||||
self.print_line(str(e))
|
||||
|
||||
self.stdscr.refresh()
|
||||
|
||||
elif isprint(c):
|
||||
if len(self.searchText) == self.cols - 2:
|
||||
return
|
||||
self.searchText = self.searchText + chr(c)
|
||||
|
||||
self.stdscr.addstr(
|
||||
self.rows - 1,
|
||||
0,
|
||||
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
|
||||
)
|
||||
|
||||
self.paintStatus(self.statusText + " %d" % len(self.searchText))
|
||||
self.stdscr.move(self.rows - 1, len(self.searchText))
|
||||
self.stdscr.refresh()
|
||||
|
||||
def logPrefix(self):
|
||||
return "CursesStdIO"
|
||||
|
||||
def close(self):
|
||||
"""clean up"""
|
||||
|
||||
curses.nocbreak()
|
||||
self.stdscr.keypad(0)
|
||||
curses.echo()
|
||||
curses.endwin()
|
||||
|
||||
|
||||
class Callback:
|
||||
def __init__(self, stdio):
|
||||
self.stdio = stdio
|
||||
|
||||
def on_line(self, text):
|
||||
self.stdio.print_line(text)
|
||||
|
||||
|
||||
def main(stdscr):
|
||||
screen = CursesStdIO(stdscr) # create Screen object
|
||||
|
||||
callback = Callback(screen)
|
||||
|
||||
screen.set_callback(callback)
|
||||
|
||||
stdscr.refresh()
|
||||
reactor.addReader(screen)
|
||||
reactor.run()
|
||||
screen.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
curses.wrapper(main)
|
||||
@@ -1,367 +0,0 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
""" This is an example of using the server to server implementation to do a
|
||||
basic chat style thing. It accepts commands from stdin and outputs to stdout.
|
||||
|
||||
It assumes that ucids are of the form <user>@<domain>, and uses <domain> as
|
||||
the address of the remote home server to hit.
|
||||
|
||||
Usage:
|
||||
python test_messaging.py <port>
|
||||
|
||||
Currently assumes the local address is localhost:<port>
|
||||
|
||||
"""
|
||||
|
||||
|
||||
import argparse
|
||||
import curses.wrapper
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import cursesio
|
||||
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.python import log
|
||||
|
||||
from synapse.app.homeserver import SynapseHomeServer
|
||||
from synapse.federation import ReplicationHandler
|
||||
from synapse.federation.units import Pdu
|
||||
from synapse.util import origin_from_ucid
|
||||
|
||||
# from synapse.logging.utils import log_function
|
||||
|
||||
|
||||
logger = logging.getLogger("example")
|
||||
|
||||
|
||||
def excpetion_errback(failure):
|
||||
logging.exception(failure)
|
||||
|
||||
|
||||
class InputOutput:
|
||||
"""This is responsible for basic I/O so that a user can interact with
|
||||
the example app.
|
||||
"""
|
||||
|
||||
def __init__(self, screen, user):
|
||||
self.screen = screen
|
||||
self.user = user
|
||||
|
||||
def set_home_server(self, server):
|
||||
self.server = server
|
||||
|
||||
def on_line(self, line):
|
||||
"""This is where we process commands."""
|
||||
|
||||
try:
|
||||
m = re.match(r"^join (\S+)$", line)
|
||||
if m:
|
||||
# The `sender` wants to join a room.
|
||||
(room_name,) = m.groups()
|
||||
self.print_line("%s joining %s" % (self.user, room_name))
|
||||
self.server.join_room(room_name, self.user, self.user)
|
||||
# self.print_line("OK.")
|
||||
return
|
||||
|
||||
m = re.match(r"^invite (\S+) (\S+)$", line)
|
||||
if m:
|
||||
# `sender` wants to invite someone to a room
|
||||
room_name, invitee = m.groups()
|
||||
self.print_line("%s invited to %s" % (invitee, room_name))
|
||||
self.server.invite_to_room(room_name, self.user, invitee)
|
||||
# self.print_line("OK.")
|
||||
return
|
||||
|
||||
m = re.match(r"^send (\S+) (.*)$", line)
|
||||
if m:
|
||||
# `sender` wants to message a room
|
||||
room_name, body = m.groups()
|
||||
self.print_line("%s send to %s" % (self.user, room_name))
|
||||
self.server.send_message(room_name, self.user, body)
|
||||
# self.print_line("OK.")
|
||||
return
|
||||
|
||||
m = re.match(r"^backfill (\S+)$", line)
|
||||
if m:
|
||||
# we want to backfill a room
|
||||
(room_name,) = m.groups()
|
||||
self.print_line("backfill %s" % room_name)
|
||||
self.server.backfill(room_name)
|
||||
return
|
||||
|
||||
self.print_line("Unrecognized command")
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
def print_line(self, text):
|
||||
self.screen.print_line(text)
|
||||
|
||||
def print_log(self, text):
|
||||
self.screen.print_log(text)
|
||||
|
||||
|
||||
class IOLoggerHandler(logging.Handler):
|
||||
def __init__(self, io):
|
||||
logging.Handler.__init__(self)
|
||||
self.io = io
|
||||
|
||||
def emit(self, record):
|
||||
if record.levelno < logging.WARN:
|
||||
return
|
||||
|
||||
msg = self.format(record)
|
||||
self.io.print_log(msg)
|
||||
|
||||
|
||||
class Room:
|
||||
"""Used to store (in memory) the current membership state of a room, and
|
||||
which home servers we should send PDUs associated with the room to.
|
||||
"""
|
||||
|
||||
def __init__(self, room_name):
|
||||
self.room_name = room_name
|
||||
self.invited = set()
|
||||
self.participants = set()
|
||||
self.servers = set()
|
||||
|
||||
self.oldest_server = None
|
||||
|
||||
self.have_got_metadata = False
|
||||
|
||||
def add_participant(self, participant):
|
||||
"""Someone has joined the room"""
|
||||
self.participants.add(participant)
|
||||
self.invited.discard(participant)
|
||||
|
||||
server = origin_from_ucid(participant)
|
||||
self.servers.add(server)
|
||||
|
||||
if not self.oldest_server:
|
||||
self.oldest_server = server
|
||||
|
||||
def add_invited(self, invitee):
|
||||
"""Someone has been invited to the room"""
|
||||
self.invited.add(invitee)
|
||||
self.servers.add(origin_from_ucid(invitee))
|
||||
|
||||
|
||||
class HomeServer(ReplicationHandler):
|
||||
"""A very basic home server implentation that allows people to join a
|
||||
room and then invite other people.
|
||||
"""
|
||||
|
||||
def __init__(self, server_name, replication_layer, output):
|
||||
self.server_name = server_name
|
||||
self.replication_layer = replication_layer
|
||||
self.replication_layer.set_handler(self)
|
||||
|
||||
self.joined_rooms = {}
|
||||
|
||||
self.output = output
|
||||
|
||||
def on_receive_pdu(self, pdu):
|
||||
"""We just received a PDU"""
|
||||
pdu_type = pdu.pdu_type
|
||||
|
||||
if pdu_type == "sy.room.message":
|
||||
self._on_message(pdu)
|
||||
elif pdu_type == "sy.room.member" and "membership" in pdu.content:
|
||||
if pdu.content["membership"] == "join":
|
||||
self._on_join(pdu.context, pdu.state_key)
|
||||
elif pdu.content["membership"] == "invite":
|
||||
self._on_invite(pdu.origin, pdu.context, pdu.state_key)
|
||||
else:
|
||||
self.output.print_line(
|
||||
"#%s (unrec) %s = %s"
|
||||
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
|
||||
)
|
||||
|
||||
def _on_message(self, pdu):
|
||||
"""We received a message"""
|
||||
self.output.print_line(
|
||||
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
|
||||
)
|
||||
|
||||
def _on_join(self, context, joinee):
|
||||
"""Someone has joined a room, either a remote user or a local user"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_participant(joinee)
|
||||
|
||||
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
|
||||
|
||||
def _on_invite(self, origin, context, invitee):
|
||||
"""Someone has been invited"""
|
||||
room = self._get_or_create_room(context)
|
||||
room.add_invited(invitee)
|
||||
|
||||
self.output.print_line("#%s %s %s" % (context, invitee, "*** INVITED"))
|
||||
|
||||
if not room.have_got_metadata and origin is not self.server_name:
|
||||
logger.debug("Get room state")
|
||||
self.replication_layer.get_state_for_context(origin, context)
|
||||
room.have_got_metadata = True
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_message(self, room_name, sender, body):
|
||||
"""Send a message to a room!"""
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.message",
|
||||
content={"sender": sender, "body": body},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def join_room(self, room_name, sender, joinee):
|
||||
"""Join a room!"""
|
||||
self._on_join(room_name, joinee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
pdu = Pdu.create_new(
|
||||
context=room_name,
|
||||
pdu_type="sy.room.member",
|
||||
is_state=True,
|
||||
state_key=joinee,
|
||||
content={"membership": "join"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
yield self.replication_layer.send_pdu(pdu)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def invite_to_room(self, room_name, sender, invitee):
|
||||
"""Invite someone to a room!"""
|
||||
self._on_invite(self.server_name, room_name, invitee)
|
||||
|
||||
destinations = yield self.get_servers_for_context(room_name)
|
||||
|
||||
try:
|
||||
yield self.replication_layer.send_pdu(
|
||||
Pdu.create_new(
|
||||
context=room_name,
|
||||
is_state=True,
|
||||
pdu_type="sy.room.member",
|
||||
state_key=invitee,
|
||||
content={"membership": "invite"},
|
||||
origin=self.server_name,
|
||||
destinations=destinations,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
|
||||
def backfill(self, room_name, limit=5):
|
||||
room = self.joined_rooms.get(room_name)
|
||||
|
||||
if not room:
|
||||
return
|
||||
|
||||
dest = room.oldest_server
|
||||
|
||||
return self.replication_layer.backfill(dest, room_name, limit)
|
||||
|
||||
def _get_room_remote_servers(self, room_name):
|
||||
return list(self.joined_rooms.setdefault(room_name).servers)
|
||||
|
||||
def _get_or_create_room(self, room_name):
|
||||
return self.joined_rooms.setdefault(room_name, Room(room_name))
|
||||
|
||||
def get_servers_for_context(self, context):
|
||||
return defer.succeed(
|
||||
self.joined_rooms.setdefault(context, Room(context)).servers
|
||||
)
|
||||
|
||||
|
||||
def main(stdscr):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("user", type=str)
|
||||
parser.add_argument("-v", "--verbose", action="count")
|
||||
args = parser.parse_args()
|
||||
|
||||
user = args.user
|
||||
server_name = origin_from_ucid(user)
|
||||
|
||||
# Set up logging
|
||||
|
||||
root_logger = logging.getLogger()
|
||||
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
||||
)
|
||||
if not os.path.exists("logs"):
|
||||
os.makedirs("logs")
|
||||
fh = logging.FileHandler("logs/%s" % user)
|
||||
fh.setFormatter(formatter)
|
||||
|
||||
root_logger.addHandler(fh)
|
||||
root_logger.setLevel(logging.DEBUG)
|
||||
|
||||
# Hack: The only way to get it to stop logging to sys.stderr :(
|
||||
log.theLogPublisher.observers = []
|
||||
observer = log.PythonLoggingObserver()
|
||||
observer.start()
|
||||
|
||||
# Set up synapse server
|
||||
|
||||
curses_stdio = cursesio.CursesStdIO(stdscr)
|
||||
input_output = InputOutput(curses_stdio, user)
|
||||
|
||||
curses_stdio.set_callback(input_output)
|
||||
|
||||
app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user)
|
||||
replication = app_hs.get_replication_layer()
|
||||
|
||||
hs = HomeServer(server_name, replication, curses_stdio)
|
||||
|
||||
input_output.set_home_server(hs)
|
||||
|
||||
# Add input_output logger
|
||||
io_logger = IOLoggerHandler(input_output)
|
||||
io_logger.setFormatter(formatter)
|
||||
root_logger.addHandler(io_logger)
|
||||
|
||||
# Start!
|
||||
|
||||
try:
|
||||
port = int(server_name.split(":")[1])
|
||||
except Exception:
|
||||
port = 12345
|
||||
|
||||
app_hs.get_http_server().start_listening(port)
|
||||
|
||||
reactor.addReader(curses_stdio)
|
||||
|
||||
reactor.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
curses.wrapper(main)
|
||||
@@ -1,298 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
|
||||
video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
|
||||
of the streams from the Jitsi bridge until the second lot of SDP comes down and
|
||||
we set the remote SDP at which point the stream ends. Our video never gets to
|
||||
the bridge.
|
||||
|
||||
Requires:
|
||||
npm install jquery jsdom
|
||||
"""
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
import gevent
|
||||
import grequests
|
||||
from BeautifulSoup import BeautifulSoup
|
||||
|
||||
ACCESS_TOKEN = ""
|
||||
|
||||
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
|
||||
MYUSERNAME = "@davetest:matrix.org"
|
||||
|
||||
HTTPBIND = "https://meet.jit.si/http-bind"
|
||||
# HTTPBIND = 'https://jitsi.vuc.me/http-bind'
|
||||
# ROOMNAME = "matrix"
|
||||
ROOMNAME = "pibble"
|
||||
|
||||
HOST = "guest.jit.si"
|
||||
# HOST="jitsi.vuc.me"
|
||||
|
||||
TURNSERVER = "turn.guest.jit.si"
|
||||
# TURNSERVER="turn.jitsi.vuc.me"
|
||||
|
||||
ROOMDOMAIN = "meet.jit.si"
|
||||
# ROOMDOMAIN="conference.jitsi.vuc.me"
|
||||
|
||||
|
||||
class TrivialMatrixClient:
|
||||
def __init__(self, access_token):
|
||||
self.token = None
|
||||
self.access_token = access_token
|
||||
|
||||
def getEvent(self):
|
||||
while True:
|
||||
url = (
|
||||
MATRIXBASE
|
||||
+ "events?access_token="
|
||||
+ self.access_token
|
||||
+ "&timeout=60000"
|
||||
)
|
||||
if self.token:
|
||||
url += "&from=" + self.token
|
||||
req = grequests.get(url)
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print("incoming from matrix", obj)
|
||||
if "end" not in obj:
|
||||
continue
|
||||
self.token = obj["end"]
|
||||
if len(obj["chunk"]):
|
||||
return obj["chunk"][0]
|
||||
|
||||
def joinRoom(self, roomId):
|
||||
url = MATRIXBASE + "rooms/" + roomId + "/join?access_token=" + self.access_token
|
||||
print(url)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
req = grequests.post(url, headers=headers, data="{}")
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print("response: ", obj)
|
||||
|
||||
def sendEvent(self, roomId, evType, event):
|
||||
url = (
|
||||
MATRIXBASE
|
||||
+ "rooms/"
|
||||
+ roomId
|
||||
+ "/send/"
|
||||
+ evType
|
||||
+ "?access_token="
|
||||
+ self.access_token
|
||||
)
|
||||
print(url)
|
||||
print(json.dumps(event))
|
||||
headers = {"Content-Type": "application/json"}
|
||||
req = grequests.post(url, headers=headers, data=json.dumps(event))
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print("response: ", obj)
|
||||
|
||||
|
||||
xmppClients = {}
|
||||
|
||||
|
||||
def matrixLoop():
|
||||
while True:
|
||||
ev = matrixCli.getEvent()
|
||||
print(ev)
|
||||
if ev["type"] == "m.room.member":
|
||||
print("membership event")
|
||||
if ev["membership"] == "invite" and ev["state_key"] == MYUSERNAME:
|
||||
roomId = ev["room_id"]
|
||||
print("joining room %s" % (roomId))
|
||||
matrixCli.joinRoom(roomId)
|
||||
elif ev["type"] == "m.room.message":
|
||||
if ev["room_id"] in xmppClients:
|
||||
print("already have a bridge for that user, ignoring")
|
||||
continue
|
||||
print("got message, connecting")
|
||||
xmppClients[ev["room_id"]] = TrivialXmppClient(ev["room_id"], ev["user_id"])
|
||||
gevent.spawn(xmppClients[ev["room_id"]].xmppLoop)
|
||||
elif ev["type"] == "m.call.invite":
|
||||
print("Incoming call")
|
||||
# sdp = ev['content']['offer']['sdp']
|
||||
# print "sdp: %s" % (sdp)
|
||||
# xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
# gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev["type"] == "m.call.answer":
|
||||
print("Call answered")
|
||||
sdp = ev["content"]["answer"]["sdp"]
|
||||
if ev["room_id"] not in xmppClients:
|
||||
print("We didn't have a call for that room")
|
||||
continue
|
||||
# should probably check call ID too
|
||||
xmppCli = xmppClients[ev["room_id"]]
|
||||
xmppCli.sendAnswer(sdp)
|
||||
elif ev["type"] == "m.call.hangup":
|
||||
if ev["room_id"] in xmppClients:
|
||||
xmppClients[ev["room_id"]].stop()
|
||||
del xmppClients[ev["room_id"]]
|
||||
|
||||
|
||||
class TrivialXmppClient:
|
||||
def __init__(self, matrixRoom, userId):
|
||||
self.rid = 0
|
||||
self.matrixRoom = matrixRoom
|
||||
self.userId = userId
|
||||
self.running = True
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
|
||||
def nextRid(self):
|
||||
self.rid += 1
|
||||
return "%d" % (self.rid)
|
||||
|
||||
def sendIq(self, xml):
|
||||
fullXml = (
|
||||
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>"
|
||||
% (self.nextRid(), self.sid, xml)
|
||||
)
|
||||
# print "\t>>>%s" % (fullXml)
|
||||
return self.xmppPoke(fullXml)
|
||||
|
||||
def xmppPoke(self, xml):
|
||||
headers = {"Content-Type": "application/xml"}
|
||||
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
|
||||
resps = grequests.map([req])
|
||||
obj = BeautifulSoup(resps[0].content)
|
||||
return obj
|
||||
|
||||
def sendAnswer(self, answer):
|
||||
print("sdp from matrix client", answer)
|
||||
p = subprocess.Popen(
|
||||
["node", "unjingle/unjingle.js", "--sdp"],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
jingle, out_err = p.communicate(answer)
|
||||
jingle = jingle % {
|
||||
"tojid": self.callfrom,
|
||||
"action": "session-accept",
|
||||
"initiator": self.callfrom,
|
||||
"responder": self.jid,
|
||||
"sid": self.callsid,
|
||||
}
|
||||
print("answer jingle from sdp", jingle)
|
||||
res = self.sendIq(jingle)
|
||||
print("reply from answer: ", res)
|
||||
|
||||
self.ssrcs = {}
|
||||
jingleSoup = BeautifulSoup(jingle)
|
||||
for cont in jingleSoup.iq.jingle.findAll("content"):
|
||||
if cont.description:
|
||||
self.ssrcs[cont["name"]] = cont.description["ssrc"]
|
||||
print("my ssrcs:", self.ssrcs)
|
||||
|
||||
gevent.joinall([gevent.spawn(self.advertiseSsrcs)])
|
||||
|
||||
def advertiseSsrcs(self):
|
||||
time.sleep(7)
|
||||
print("SSRC spammer started")
|
||||
while self.running:
|
||||
ssrcMsg = (
|
||||
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
|
||||
% {
|
||||
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
|
||||
"nick": self.userId,
|
||||
"assrc": self.ssrcs["audio"],
|
||||
"vssrc": self.ssrcs["video"],
|
||||
}
|
||||
)
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print("reply from ssrc announce: ", res)
|
||||
time.sleep(10)
|
||||
|
||||
def xmppLoop(self):
|
||||
self.matrixCallId = time.time()
|
||||
res = self.xmppPoke(
|
||||
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>"
|
||||
% (self.nextRid(), HOST)
|
||||
)
|
||||
|
||||
print(res)
|
||||
self.sid = res.body["sid"]
|
||||
print("sid %s" % (self.sid))
|
||||
|
||||
res = self.sendIq(
|
||||
"<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>"
|
||||
)
|
||||
|
||||
res = self.xmppPoke(
|
||||
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>"
|
||||
% (self.nextRid(), self.sid, HOST)
|
||||
)
|
||||
|
||||
res = self.sendIq(
|
||||
"<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>"
|
||||
)
|
||||
print(res)
|
||||
|
||||
self.jid = res.body.iq.bind.jid.string
|
||||
print("jid: %s" % (self.jid))
|
||||
self.shortJid = self.jid.split("-")[0]
|
||||
|
||||
res = self.sendIq(
|
||||
"<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>"
|
||||
)
|
||||
|
||||
# randomthing = res.body.iq['to']
|
||||
# whatsitpart = randomthing.split('-')[0]
|
||||
|
||||
# print "other random bind thing: %s" % (randomthing)
|
||||
|
||||
# advertise preence to the jitsi room, with our nick
|
||||
res = self.sendIq(
|
||||
"<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>"
|
||||
% (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId)
|
||||
)
|
||||
self.muc = {"users": []}
|
||||
for p in res.body.findAll("presence"):
|
||||
u = {}
|
||||
u["shortJid"] = p["from"].split("/")[1]
|
||||
if p.c and p.c.nick:
|
||||
u["nick"] = p.c.nick.string
|
||||
self.muc["users"].append(u)
|
||||
print("muc: ", self.muc)
|
||||
|
||||
# wait for stuff
|
||||
while True:
|
||||
print("waiting...")
|
||||
res = self.sendIq("")
|
||||
print("got from stream: ", res)
|
||||
if res.body.iq:
|
||||
jingles = res.body.iq.findAll("jingle")
|
||||
if len(jingles):
|
||||
self.callfrom = res.body.iq["from"]
|
||||
self.handleInvite(jingles[0])
|
||||
elif "type" in res.body and res.body["type"] == "terminate":
|
||||
self.running = False
|
||||
del xmppClients[self.matrixRoom]
|
||||
return
|
||||
|
||||
def handleInvite(self, jingle):
|
||||
self.initiator = jingle["initiator"]
|
||||
self.callsid = jingle["sid"]
|
||||
p = subprocess.Popen(
|
||||
["node", "unjingle/unjingle.js", "--jingle"],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
print("raw jingle invite", str(jingle))
|
||||
sdp, out_err = p.communicate(str(jingle))
|
||||
print("transformed remote offer sdp", sdp)
|
||||
inviteEvent = {
|
||||
"offer": {"type": "offer", "sdp": sdp},
|
||||
"call_id": self.matrixCallId,
|
||||
"version": 0,
|
||||
"lifetime": 30000,
|
||||
}
|
||||
matrixCli.sendEvent(self.matrixRoom, "m.call.invite", inviteEvent)
|
||||
|
||||
|
||||
matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name
|
||||
|
||||
gevent.joinall([gevent.spawn(matrixLoop)])
|
||||
@@ -1,188 +0,0 @@
|
||||
diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
|
||||
index 9fbfff0..dc68077 100644
|
||||
--- a/syweb/webclient/app/components/matrix/matrix-call.js
|
||||
+++ b/syweb/webclient/app/components/matrix/matrix-call.js
|
||||
@@ -16,6 +16,45 @@ limitations under the License.
|
||||
|
||||
'use strict';
|
||||
|
||||
+
|
||||
+function sendKeyframe(pc) {
|
||||
+ console.log('sendkeyframe', pc.iceConnectionState);
|
||||
+ if (pc.iceConnectionState !== 'connected') return; // safe...
|
||||
+ pc.setRemoteDescription(
|
||||
+ pc.remoteDescription,
|
||||
+ function () {
|
||||
+ pc.createAnswer(
|
||||
+ function (modifiedAnswer) {
|
||||
+ pc.setLocalDescription(
|
||||
+ modifiedAnswer,
|
||||
+ function () {
|
||||
+ // noop
|
||||
+ },
|
||||
+ function (error) {
|
||||
+ console.log('triggerKeyframe setLocalDescription failed', error);
|
||||
+ messageHandler.showError();
|
||||
+ }
|
||||
+ );
|
||||
+ },
|
||||
+ function (error) {
|
||||
+ console.log('triggerKeyframe createAnswer failed', error);
|
||||
+ messageHandler.showError();
|
||||
+ }
|
||||
+ );
|
||||
+ },
|
||||
+ function (error) {
|
||||
+ console.log('triggerKeyframe setRemoteDescription failed', error);
|
||||
+ messageHandler.showError();
|
||||
+ }
|
||||
+ );
|
||||
+}
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
var forAllVideoTracksOnStream = function(s, f) {
|
||||
var tracks = s.getVideoTracks();
|
||||
for (var i = 0; i < tracks.length; i++) {
|
||||
@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
|
||||
}
|
||||
|
||||
// FIXME: we should prevent any calls from being placed or accepted before this has finished
|
||||
- MatrixCall.getTurnServer();
|
||||
+ //MatrixCall.getTurnServer();
|
||||
|
||||
MatrixCall.CALL_TIMEOUT = 60000;
|
||||
MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
|
||||
@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
|
||||
pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
|
||||
pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
|
||||
pc.onaddstream = function(s) { self.onAddStream(s); };
|
||||
+
|
||||
+ var datachan = pc.createDataChannel('RTCDataChannel', {
|
||||
+ reliable: false
|
||||
+ });
|
||||
+ console.log("data chan: "+datachan);
|
||||
+ datachan.onopen = function() {
|
||||
+ console.log("data channel open");
|
||||
+ };
|
||||
+ datachan.onmessage = function() {
|
||||
+ console.log("data channel message");
|
||||
+ };
|
||||
+ pc.ondatachannel = function(event) {
|
||||
+ console.log("have data channel");
|
||||
+ event.channel.binaryType = 'blob';
|
||||
+ };
|
||||
+
|
||||
return pc;
|
||||
}
|
||||
|
||||
@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
|
||||
}, this.msg.lifetime - event.age);
|
||||
};
|
||||
|
||||
+ MatrixCall.prototype.receivedInvite = function(event) {
|
||||
+ console.log("Got second invite for call "+this.call_id);
|
||||
+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
|
||||
+ };
|
||||
+
|
||||
+
|
||||
// perverse as it may seem, sometimes we want to instantiate a call with a hangup message
|
||||
// (because when getting the state of the room on load, events come in reverse order and
|
||||
// we want to remember that a call has been hung up)
|
||||
@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
|
||||
'mandatory': {
|
||||
'OfferToReceiveAudio': true,
|
||||
'OfferToReceiveVideo': this.type == 'video'
|
||||
- },
|
||||
+ }
|
||||
};
|
||||
this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
|
||||
// This can't be in an apply() because it's called by a predecessor call under glare conditions :(
|
||||
@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
|
||||
MatrixCall.prototype.gotLocalIceCandidate = function(event) {
|
||||
if (event.candidate) {
|
||||
console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
|
||||
- this.sendCandidate(event.candidate);
|
||||
- }
|
||||
+ //this.sendCandidate(event.candidate);
|
||||
+ } else {
|
||||
+ console.log("have all candidates, sending answer");
|
||||
+ var content = {
|
||||
+ version: 0,
|
||||
+ call_id: this.call_id,
|
||||
+ answer: this.peerConn.localDescription
|
||||
+ };
|
||||
+ this.sendEventWithRetry('m.call.answer', content);
|
||||
+ var self = this;
|
||||
+ $rootScope.$apply(function() {
|
||||
+ self.state = 'connecting';
|
||||
+ });
|
||||
+ }
|
||||
}
|
||||
|
||||
MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
|
||||
@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
|
||||
console.log("Created answer: "+description);
|
||||
var self = this;
|
||||
this.peerConn.setLocalDescription(description, function() {
|
||||
- var content = {
|
||||
- version: 0,
|
||||
- call_id: self.call_id,
|
||||
- answer: self.peerConn.localDescription
|
||||
- };
|
||||
- self.sendEventWithRetry('m.call.answer', content);
|
||||
- $rootScope.$apply(function() {
|
||||
- self.state = 'connecting';
|
||||
- });
|
||||
}, function() { console.log("Error setting local description!"); } );
|
||||
};
|
||||
|
||||
@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
|
||||
$rootScope.$apply(function() {
|
||||
self.state = 'connected';
|
||||
self.didConnect = true;
|
||||
+ /*$timeout(function() {
|
||||
+ sendKeyframe(self.peerConn);
|
||||
+ }, 1000);*/
|
||||
});
|
||||
} else if (this.peerConn.iceConnectionState == 'failed') {
|
||||
this.hangup('ice_failed');
|
||||
@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
|
||||
|
||||
MatrixCall.prototype.onRemoteStreamEnded = function(event) {
|
||||
console.log("Remote stream ended");
|
||||
+ return;
|
||||
var self = this;
|
||||
$rootScope.$apply(function() {
|
||||
self.state = 'ended';
|
||||
diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
|
||||
index 55dbbf5..272fa27 100644
|
||||
--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
|
||||
+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
|
||||
@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
|
||||
return;
|
||||
}
|
||||
|
||||
+ // do we already have an entry for this call ID?
|
||||
+ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
|
||||
+ if (existingEntry) {
|
||||
+ existingEntry.receivedInvite(msg);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
var call = undefined;
|
||||
if (!isLive) {
|
||||
// if this event wasn't live then this call may already be over
|
||||
@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
|
||||
call.hangup();
|
||||
}
|
||||
} else {
|
||||
- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
|
||||
+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
|
||||
}
|
||||
} else if (event.type == 'm.call.answer') {
|
||||
var call = matrixPhoneService.allCalls[msg.call_id];
|
||||
@@ -1,712 +0,0 @@
|
||||
/* jshint -W117 */
|
||||
// SDP STUFF
|
||||
function SDP(sdp) {
|
||||
this.media = sdp.split('\r\nm=');
|
||||
for (var i = 1; i < this.media.length; i++) {
|
||||
this.media[i] = 'm=' + this.media[i];
|
||||
if (i != this.media.length - 1) {
|
||||
this.media[i] += '\r\n';
|
||||
}
|
||||
}
|
||||
this.session = this.media.shift() + '\r\n';
|
||||
this.raw = this.session + this.media.join('');
|
||||
}
|
||||
|
||||
exports.SDP = SDP;
|
||||
|
||||
var jsdom = require("jsdom");
|
||||
var window = jsdom.jsdom().parentWindow;
|
||||
var $ = require('jquery')(window);
|
||||
|
||||
var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
|
||||
|
||||
/**
|
||||
* Returns map of MediaChannel mapped per channel idx.
|
||||
*/
|
||||
SDP.prototype.getMediaSsrcMap = function() {
|
||||
var self = this;
|
||||
var media_ssrcs = {};
|
||||
for (channelNum = 0; channelNum < self.media.length; channelNum++) {
|
||||
modified = true;
|
||||
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
|
||||
var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
|
||||
var channel = new MediaChannel(channelNum, type);
|
||||
media_ssrcs[channelNum] = channel;
|
||||
tmp.forEach(function (line) {
|
||||
var linessrc = line.substring(7).split(' ')[0];
|
||||
// allocate new ChannelSsrc
|
||||
if(!channel.ssrcs[linessrc]) {
|
||||
channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
|
||||
}
|
||||
channel.ssrcs[linessrc].lines.push(line);
|
||||
});
|
||||
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
|
||||
tmp.forEach(function(line){
|
||||
var semantics = line.substr(0, idx).substr(13);
|
||||
var ssrcs = line.substr(14 + semantics.length).split(' ');
|
||||
if (ssrcs.length != 0) {
|
||||
var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
|
||||
channel.ssrcGroups.push(ssrcGroup);
|
||||
}
|
||||
});
|
||||
}
|
||||
return media_ssrcs;
|
||||
};
|
||||
/**
|
||||
* Returns <tt>true</tt> if this SDP contains given SSRC.
|
||||
* @param ssrc the ssrc to check.
|
||||
* @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
|
||||
*/
|
||||
SDP.prototype.containsSSRC = function(ssrc) {
|
||||
var channels = this.getMediaSsrcMap();
|
||||
var contains = false;
|
||||
Object.keys(channels).forEach(function(chNumber){
|
||||
var channel = channels[chNumber];
|
||||
//console.log("Check", channel, ssrc);
|
||||
if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
|
||||
contains = true;
|
||||
}
|
||||
});
|
||||
return contains;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
|
||||
* @param otherSdp the other SDP to check ssrc with.
|
||||
*/
|
||||
SDP.prototype.getNewMedia = function(otherSdp) {
|
||||
|
||||
// this could be useful in Array.prototype.
|
||||
function arrayEquals(array) {
|
||||
// if the other array is a falsy value, return
|
||||
if (!array)
|
||||
return false;
|
||||
|
||||
// compare lengths - can save a lot of time
|
||||
if (this.length != array.length)
|
||||
return false;
|
||||
|
||||
for (var i = 0, l=this.length; i < l; i++) {
|
||||
// Check if we have nested arrays
|
||||
if (this[i] instanceof Array && array[i] instanceof Array) {
|
||||
// recurse into the nested arrays
|
||||
if (!this[i].equals(array[i]))
|
||||
return false;
|
||||
}
|
||||
else if (this[i] != array[i]) {
|
||||
// Warning - two different object instances will never be equal: {x:20} != {x:20}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
var myMedia = this.getMediaSsrcMap();
|
||||
var othersMedia = otherSdp.getMediaSsrcMap();
|
||||
var newMedia = {};
|
||||
Object.keys(othersMedia).forEach(function(channelNum) {
|
||||
var myChannel = myMedia[channelNum];
|
||||
var othersChannel = othersMedia[channelNum];
|
||||
if(!myChannel && othersChannel) {
|
||||
// Add whole channel
|
||||
newMedia[channelNum] = othersChannel;
|
||||
return;
|
||||
}
|
||||
// Look for new ssrcs accross the channel
|
||||
Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
|
||||
if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
|
||||
// Allocate channel if we've found ssrc that doesn't exist in our channel
|
||||
if(!newMedia[channelNum]){
|
||||
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
|
||||
}
|
||||
newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
|
||||
}
|
||||
});
|
||||
|
||||
// Look for new ssrc groups across the channels
|
||||
othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
|
||||
|
||||
// try to match the other ssrc-group with an ssrc-group of ours
|
||||
var matched = false;
|
||||
for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
|
||||
var mySsrcGroup = myChannel.ssrcGroups[i];
|
||||
if (otherSsrcGroup.semantics == mySsrcGroup.semantics
|
||||
&& arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
|
||||
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matched) {
|
||||
// Allocate channel if we've found an ssrc-group that doesn't
|
||||
// exist in our channel
|
||||
|
||||
if(!newMedia[channelNum]){
|
||||
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
|
||||
}
|
||||
newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
|
||||
}
|
||||
});
|
||||
});
|
||||
return newMedia;
|
||||
};
|
||||
|
||||
// remove iSAC and CN from SDP
|
||||
SDP.prototype.mangle = function () {
|
||||
var i, j, mline, lines, rtpmap, newdesc;
|
||||
for (i = 0; i < this.media.length; i++) {
|
||||
lines = this.media[i].split('\r\n');
|
||||
lines.pop(); // remove empty last element
|
||||
mline = SDPUtil.parse_mline(lines.shift());
|
||||
if (mline.media != 'audio')
|
||||
continue;
|
||||
newdesc = '';
|
||||
mline.fmt.length = 0;
|
||||
for (j = 0; j < lines.length; j++) {
|
||||
if (lines[j].substr(0, 9) == 'a=rtpmap:') {
|
||||
rtpmap = SDPUtil.parse_rtpmap(lines[j]);
|
||||
if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
|
||||
continue;
|
||||
mline.fmt.push(rtpmap.id);
|
||||
newdesc += lines[j] + '\r\n';
|
||||
} else {
|
||||
newdesc += lines[j] + '\r\n';
|
||||
}
|
||||
}
|
||||
this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
|
||||
this.media[i] += newdesc;
|
||||
}
|
||||
this.raw = this.session + this.media.join('');
|
||||
};
|
||||
|
||||
// remove lines matching prefix from session section
|
||||
SDP.prototype.removeSessionLines = function(prefix) {
|
||||
var self = this;
|
||||
var lines = SDPUtil.find_lines(this.session, prefix);
|
||||
lines.forEach(function(line) {
|
||||
self.session = self.session.replace(line + '\r\n', '');
|
||||
});
|
||||
this.raw = this.session + this.media.join('');
|
||||
return lines;
|
||||
}
|
||||
// remove lines matching prefix from a media section specified by mediaindex
|
||||
// TODO: non-numeric mediaindex could match mid
|
||||
SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
|
||||
var self = this;
|
||||
var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
|
||||
lines.forEach(function(line) {
|
||||
self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
|
||||
});
|
||||
this.raw = this.session + this.media.join('');
|
||||
return lines;
|
||||
}
|
||||
|
||||
// add content's to a jingle element
|
||||
SDP.prototype.toJingle = function (elem, thecreator) {
|
||||
var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
|
||||
var self = this;
|
||||
// new bundle plan
|
||||
if (SDPUtil.find_line(this.session, 'a=group:')) {
|
||||
lines = SDPUtil.find_lines(this.session, 'a=group:');
|
||||
for (i = 0; i < lines.length; i++) {
|
||||
tmp = lines[i].split(' ');
|
||||
var semantics = tmp.shift().substr(8);
|
||||
elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
|
||||
for (j = 0; j < tmp.length; j++) {
|
||||
elem.c('content', {name: tmp[j]}).up();
|
||||
}
|
||||
elem.up();
|
||||
}
|
||||
}
|
||||
// old bundle plan, to be removed
|
||||
var bundle = [];
|
||||
if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
|
||||
bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
|
||||
bundle.shift();
|
||||
}
|
||||
for (i = 0; i < this.media.length; i++) {
|
||||
mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
|
||||
if (!(mline.media === 'audio' ||
|
||||
mline.media === 'video' ||
|
||||
mline.media === 'application'))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
|
||||
ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
|
||||
} else {
|
||||
ssrc = false;
|
||||
}
|
||||
|
||||
elem.c('content', {creator: thecreator, name: mline.media});
|
||||
if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
|
||||
// prefer identifier from a=mid if present
|
||||
var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
|
||||
elem.attrs({ name: mid });
|
||||
|
||||
// old BUNDLE plan, to be removed
|
||||
if (bundle.indexOf(mid) !== -1) {
|
||||
elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
|
||||
bundle.splice(bundle.indexOf(mid), 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
|
||||
{
|
||||
elem.c('description',
|
||||
{xmlns: 'urn:xmpp:jingle:apps:rtp:1',
|
||||
media: mline.media });
|
||||
if (ssrc) {
|
||||
elem.attrs({ssrc: ssrc});
|
||||
}
|
||||
for (j = 0; j < mline.fmt.length; j++) {
|
||||
rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
|
||||
elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
|
||||
// put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
|
||||
if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
|
||||
tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
|
||||
for (k = 0; k < tmp.length; k++) {
|
||||
elem.c('parameter', tmp[k]).up();
|
||||
}
|
||||
}
|
||||
this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
|
||||
|
||||
elem.up();
|
||||
}
|
||||
if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
|
||||
elem.c('encryption', {required: 1});
|
||||
var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
|
||||
crypto.forEach(function(line) {
|
||||
elem.c('crypto', SDPUtil.parse_crypto(line)).up();
|
||||
});
|
||||
elem.up(); // end of encryption
|
||||
}
|
||||
|
||||
if (ssrc) {
|
||||
// new style mapping
|
||||
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
|
||||
// FIXME: group by ssrc and support multiple different ssrcs
|
||||
var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
|
||||
ssrclines.forEach(function(line) {
|
||||
idx = line.indexOf(' ');
|
||||
var linessrc = line.substr(0, idx).substr(7);
|
||||
if (linessrc != ssrc) {
|
||||
elem.up();
|
||||
ssrc = linessrc;
|
||||
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
|
||||
}
|
||||
var kv = line.substr(idx + 1);
|
||||
elem.c('parameter');
|
||||
if (kv.indexOf(':') == -1) {
|
||||
elem.attrs({ name: kv });
|
||||
} else {
|
||||
elem.attrs({ name: kv.split(':', 2)[0] });
|
||||
elem.attrs({ value: kv.split(':', 2)[1] });
|
||||
}
|
||||
elem.up();
|
||||
});
|
||||
elem.up();
|
||||
|
||||
// old proprietary mapping, to be removed at some point
|
||||
tmp = SDPUtil.parse_ssrc(this.media[i]);
|
||||
tmp.xmlns = 'http://estos.de/ns/ssrc';
|
||||
tmp.ssrc = ssrc;
|
||||
elem.c('ssrc', tmp).up(); // ssrc is part of description
|
||||
|
||||
// XEP-0339 handle ssrc-group attributes
|
||||
var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
|
||||
ssrc_group_lines.forEach(function(line) {
|
||||
idx = line.indexOf(' ');
|
||||
var semantics = line.substr(0, idx).substr(13);
|
||||
var ssrcs = line.substr(14 + semantics.length).split(' ');
|
||||
if (ssrcs.length != 0) {
|
||||
elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
|
||||
ssrcs.forEach(function(ssrc) {
|
||||
elem.c('source', { ssrc: ssrc })
|
||||
.up();
|
||||
});
|
||||
elem.up();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
|
||||
elem.c('rtcp-mux').up();
|
||||
}
|
||||
|
||||
// XEP-0293 -- map a=rtcp-fb:*
|
||||
this.RtcpFbToJingle(i, elem, '*');
|
||||
|
||||
// XEP-0294
|
||||
if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
|
||||
lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
|
||||
for (j = 0; j < lines.length; j++) {
|
||||
tmp = SDPUtil.parse_extmap(lines[j]);
|
||||
elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
|
||||
uri: tmp.uri,
|
||||
id: tmp.value });
|
||||
if (tmp.hasOwnProperty('direction')) {
|
||||
switch (tmp.direction) {
|
||||
case 'sendonly':
|
||||
elem.attrs({senders: 'responder'});
|
||||
break;
|
||||
case 'recvonly':
|
||||
elem.attrs({senders: 'initiator'});
|
||||
break;
|
||||
case 'sendrecv':
|
||||
elem.attrs({senders: 'both'});
|
||||
break;
|
||||
case 'inactive':
|
||||
elem.attrs({senders: 'none'});
|
||||
break;
|
||||
}
|
||||
}
|
||||
// TODO: handle params
|
||||
elem.up();
|
||||
}
|
||||
}
|
||||
elem.up(); // end of description
|
||||
}
|
||||
|
||||
// map ice-ufrag/pwd, dtls fingerprint, candidates
|
||||
this.TransportToJingle(i, elem);
|
||||
|
||||
if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
|
||||
elem.attrs({senders: 'both'});
|
||||
} else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
|
||||
elem.attrs({senders: 'initiator'});
|
||||
} else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
|
||||
elem.attrs({senders: 'responder'});
|
||||
} else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
|
||||
elem.attrs({senders: 'none'});
|
||||
}
|
||||
if (mline.port == '0') {
|
||||
// estos hack to reject an m-line
|
||||
elem.attrs({senders: 'rejected'});
|
||||
}
|
||||
elem.up(); // end of content
|
||||
}
|
||||
elem.up();
|
||||
return elem;
|
||||
};
|
||||
|
||||
SDP.prototype.TransportToJingle = function (mediaindex, elem) {
|
||||
var i = mediaindex;
|
||||
var tmp;
|
||||
var self = this;
|
||||
elem.c('transport');
|
||||
|
||||
// XEP-0343 DTLS/SCTP
|
||||
if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
|
||||
{
|
||||
var sctpmap = SDPUtil.find_line(
|
||||
this.media[i], 'a=sctpmap:', self.session);
|
||||
if (sctpmap)
|
||||
{
|
||||
var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
|
||||
elem.c('sctpmap',
|
||||
{
|
||||
xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
|
||||
number: sctpAttrs[0], /* SCTP port */
|
||||
protocol: sctpAttrs[1], /* protocol */
|
||||
});
|
||||
// Optional stream count attribute
|
||||
if (sctpAttrs.length > 2)
|
||||
elem.attrs({ streams: sctpAttrs[2]});
|
||||
elem.up();
|
||||
}
|
||||
}
|
||||
// XEP-0320
|
||||
var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
|
||||
fingerprints.forEach(function(line) {
|
||||
tmp = SDPUtil.parse_fingerprint(line);
|
||||
tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
|
||||
elem.c('fingerprint').t(tmp.fingerprint);
|
||||
delete tmp.fingerprint;
|
||||
line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
|
||||
if (line) {
|
||||
tmp.setup = line.substr(8);
|
||||
}
|
||||
elem.attrs(tmp);
|
||||
elem.up(); // end of fingerprint
|
||||
});
|
||||
tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
|
||||
if (tmp) {
|
||||
tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
|
||||
elem.attrs(tmp);
|
||||
// XEP-0176
|
||||
if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
|
||||
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
|
||||
lines.forEach(function (line) {
|
||||
elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
|
||||
});
|
||||
}
|
||||
}
|
||||
elem.up(); // end of transport
|
||||
}
|
||||
|
||||
SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
|
||||
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
|
||||
lines.forEach(function (line) {
|
||||
var tmp = SDPUtil.parse_rtcpfb(line);
|
||||
if (tmp.type == 'trr-int') {
|
||||
elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
|
||||
elem.up();
|
||||
} else {
|
||||
elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
|
||||
if (tmp.params.length > 0) {
|
||||
elem.attrs({'subtype': tmp.params[0]});
|
||||
}
|
||||
elem.up();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
|
||||
var media = '';
|
||||
var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
|
||||
if (tmp.length) {
|
||||
media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
|
||||
if (tmp.attr('value')) {
|
||||
media += tmp.attr('value');
|
||||
} else {
|
||||
media += '0';
|
||||
}
|
||||
media += '\r\n';
|
||||
}
|
||||
tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
|
||||
tmp.each(function () {
|
||||
media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
|
||||
if ($(this).attr('subtype')) {
|
||||
media += ' ' + $(this).attr('subtype');
|
||||
}
|
||||
media += '\r\n';
|
||||
});
|
||||
return media;
|
||||
};
|
||||
|
||||
// construct an SDP from a jingle stanza
|
||||
SDP.prototype.fromJingle = function (jingle) {
|
||||
var self = this;
|
||||
this.raw = 'v=0\r\n' +
|
||||
'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
|
||||
's=-\r\n' +
|
||||
't=0 0\r\n';
|
||||
// http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
|
||||
if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
|
||||
$(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
|
||||
var contents = $(group).find('>content').map(function (idx, content) {
|
||||
return content.getAttribute('name');
|
||||
}).get();
|
||||
if (contents.length > 0) {
|
||||
self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
|
||||
}
|
||||
});
|
||||
} else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
|
||||
// temporary namespace, not to be used. to be removed soon.
|
||||
$(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
|
||||
var contents = $(group).find('>content').map(function (idx, content) {
|
||||
return content.getAttribute('name');
|
||||
}).get();
|
||||
if (group.getAttribute('type') !== null && contents.length > 0) {
|
||||
self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// for backward compability, to be removed soon
|
||||
// assume all contents are in the same bundle group, can be improved upon later
|
||||
var bundle = $(jingle).find('>content').filter(function (idx, content) {
|
||||
//elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
|
||||
return $(content).find('>bundle').length > 0;
|
||||
}).map(function (idx, content) {
|
||||
return content.getAttribute('name');
|
||||
}).get();
|
||||
if (bundle.length) {
|
||||
this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
|
||||
}
|
||||
}
|
||||
|
||||
this.session = this.raw;
|
||||
jingle.find('>content').each(function () {
|
||||
var m = self.jingle2media($(this));
|
||||
self.media.push(m);
|
||||
});
|
||||
|
||||
// reconstruct msid-semantic -- apparently not necessary
|
||||
/*
|
||||
var msid = SDPUtil.parse_ssrc(this.raw);
|
||||
if (msid.hasOwnProperty('mslabel')) {
|
||||
this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
|
||||
}
|
||||
*/
|
||||
|
||||
this.raw = this.session + this.media.join('');
|
||||
};
|
||||
|
||||
// translate a jingle content element into an an SDP media part
|
||||
SDP.prototype.jingle2media = function (content) {
|
||||
var media = '',
|
||||
desc = content.find('description'),
|
||||
ssrc = desc.attr('ssrc'),
|
||||
self = this,
|
||||
tmp;
|
||||
var sctp = content.find(
|
||||
'>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
|
||||
|
||||
tmp = { media: desc.attr('media') };
|
||||
tmp.port = '1';
|
||||
if (content.attr('senders') == 'rejected') {
|
||||
// estos hack to reject an m-line.
|
||||
tmp.port = '0';
|
||||
}
|
||||
if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
|
||||
if (sctp.length)
|
||||
tmp.proto = 'DTLS/SCTP';
|
||||
else
|
||||
tmp.proto = 'RTP/SAVPF';
|
||||
} else {
|
||||
tmp.proto = 'RTP/AVPF';
|
||||
}
|
||||
if (!sctp.length)
|
||||
{
|
||||
tmp.fmt = desc.find('payload-type').map(
|
||||
function () { return this.getAttribute('id'); }).get();
|
||||
media += SDPUtil.build_mline(tmp) + '\r\n';
|
||||
}
|
||||
else
|
||||
{
|
||||
media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
|
||||
media += 'a=sctpmap:' + sctp.attr('number') +
|
||||
' ' + sctp.attr('protocol');
|
||||
|
||||
var streamCount = sctp.attr('streams');
|
||||
if (streamCount)
|
||||
media += ' ' + streamCount + '\r\n';
|
||||
else
|
||||
media += '\r\n';
|
||||
}
|
||||
|
||||
media += 'c=IN IP4 0.0.0.0\r\n';
|
||||
if (!sctp.length)
|
||||
media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
|
||||
//tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
|
||||
tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
|
||||
//console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
|
||||
//console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
|
||||
//console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
|
||||
if (tmp.length) {
|
||||
if (tmp.attr('ufrag')) {
|
||||
media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
|
||||
}
|
||||
if (tmp.attr('pwd')) {
|
||||
media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
|
||||
}
|
||||
tmp.find('>fingerprint').each(function () {
|
||||
// FIXME: check namespace at some point
|
||||
media += 'a=fingerprint:' + this.getAttribute('hash');
|
||||
media += ' ' + $(this).text();
|
||||
media += '\r\n';
|
||||
//console.log("mline "+media);
|
||||
if (this.getAttribute('setup')) {
|
||||
media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
|
||||
}
|
||||
});
|
||||
}
|
||||
switch (content.attr('senders')) {
|
||||
case 'initiator':
|
||||
media += 'a=sendonly\r\n';
|
||||
break;
|
||||
case 'responder':
|
||||
media += 'a=recvonly\r\n';
|
||||
break;
|
||||
case 'none':
|
||||
media += 'a=inactive\r\n';
|
||||
break;
|
||||
case 'both':
|
||||
media += 'a=sendrecv\r\n';
|
||||
break;
|
||||
}
|
||||
media += 'a=mid:' + content.attr('name') + '\r\n';
|
||||
/*if (content.attr('name') == 'video') {
|
||||
media += 'a=x-google-flag:conference' + '\r\n';
|
||||
}*/
|
||||
|
||||
// <description><rtcp-mux/></description>
|
||||
// see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
|
||||
// and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
|
||||
if (desc.find('rtcp-mux').length) {
|
||||
media += 'a=rtcp-mux\r\n';
|
||||
}
|
||||
|
||||
if (desc.find('encryption').length) {
|
||||
desc.find('encryption>crypto').each(function () {
|
||||
media += 'a=crypto:' + this.getAttribute('tag');
|
||||
media += ' ' + this.getAttribute('crypto-suite');
|
||||
media += ' ' + this.getAttribute('key-params');
|
||||
if (this.getAttribute('session-params')) {
|
||||
media += ' ' + this.getAttribute('session-params');
|
||||
}
|
||||
media += '\r\n';
|
||||
});
|
||||
}
|
||||
desc.find('payload-type').each(function () {
|
||||
media += SDPUtil.build_rtpmap(this) + '\r\n';
|
||||
if ($(this).find('>parameter').length) {
|
||||
media += 'a=fmtp:' + this.getAttribute('id') + ' ';
|
||||
media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
|
||||
media += '\r\n';
|
||||
}
|
||||
// xep-0293
|
||||
media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
|
||||
});
|
||||
|
||||
// xep-0293
|
||||
media += self.RtcpFbFromJingle(desc, '*');
|
||||
|
||||
// xep-0294
|
||||
tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
|
||||
tmp.each(function () {
|
||||
media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
|
||||
});
|
||||
|
||||
content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
|
||||
media += SDPUtil.candidateFromJingle(this);
|
||||
});
|
||||
|
||||
// XEP-0339 handle ssrc-group attributes
|
||||
tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
|
||||
var semantics = this.getAttribute('semantics');
|
||||
var ssrcs = $(this).find('>source').map(function() {
|
||||
return this.getAttribute('ssrc');
|
||||
}).get();
|
||||
|
||||
if (ssrcs.length != 0) {
|
||||
media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
|
||||
}
|
||||
});
|
||||
|
||||
tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
|
||||
tmp.each(function () {
|
||||
var ssrc = this.getAttribute('ssrc');
|
||||
$(this).find('>parameter').each(function () {
|
||||
media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
|
||||
if (this.getAttribute('value') && this.getAttribute('value').length)
|
||||
media += ':' + this.getAttribute('value');
|
||||
media += '\r\n';
|
||||
});
|
||||
});
|
||||
|
||||
if (tmp.length === 0) {
|
||||
// fallback to proprietary mapping of a=ssrc lines
|
||||
tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
|
||||
if (tmp.length) {
|
||||
media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
|
||||
media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
|
||||
media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
|
||||
media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
|
||||
}
|
||||
}
|
||||
return media;
|
||||
};
|
||||
|
||||
@@ -1,408 +0,0 @@
|
||||
/**
|
||||
* Contains utility classes used in SDP class.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Class holds a=ssrc lines and media type a=mid
|
||||
* @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
|
||||
* @param type media type eg. "audio" or "video"(a=mid frm SDP)
|
||||
* @constructor
|
||||
*/
|
||||
function ChannelSsrc(ssrc, type) {
|
||||
this.ssrc = ssrc;
|
||||
this.type = type;
|
||||
this.lines = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Class holds a=ssrc-group: lines
|
||||
* @param semantics
|
||||
* @param ssrcs
|
||||
* @constructor
|
||||
*/
|
||||
function ChannelSsrcGroup(semantics, ssrcs, line) {
|
||||
this.semantics = semantics;
|
||||
this.ssrcs = ssrcs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
|
||||
* @param channelNumber channel idx in SDP media array.
|
||||
* @param mediaType media type(a=mid)
|
||||
* @constructor
|
||||
*/
|
||||
function MediaChannel(channelNumber, mediaType) {
|
||||
/**
|
||||
* SDP channel number
|
||||
* @type {*}
|
||||
*/
|
||||
this.chNumber = channelNumber;
|
||||
/**
|
||||
* Channel media type(a=mid)
|
||||
* @type {*}
|
||||
*/
|
||||
this.mediaType = mediaType;
|
||||
/**
|
||||
* The maps of ssrc numbers to ChannelSsrc objects.
|
||||
*/
|
||||
this.ssrcs = {};
|
||||
|
||||
/**
|
||||
* The array of ChannelSsrcGroup objects.
|
||||
* @type {Array}
|
||||
*/
|
||||
this.ssrcGroups = [];
|
||||
}
|
||||
|
||||
SDPUtil = {
|
||||
iceparams: function (mediadesc, sessiondesc) {
|
||||
var data = null;
|
||||
if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
|
||||
SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
|
||||
data = {
|
||||
ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
|
||||
pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
|
||||
};
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_iceufrag: function (line) {
|
||||
return line.substring(12);
|
||||
},
|
||||
build_iceufrag: function (frag) {
|
||||
return 'a=ice-ufrag:' + frag;
|
||||
},
|
||||
parse_icepwd: function (line) {
|
||||
return line.substring(10);
|
||||
},
|
||||
build_icepwd: function (pwd) {
|
||||
return 'a=ice-pwd:' + pwd;
|
||||
},
|
||||
parse_mid: function (line) {
|
||||
return line.substring(6);
|
||||
},
|
||||
parse_mline: function (line) {
|
||||
var parts = line.substring(2).split(' '),
|
||||
data = {};
|
||||
data.media = parts.shift();
|
||||
data.port = parts.shift();
|
||||
data.proto = parts.shift();
|
||||
if (parts[parts.length - 1] === '') { // trailing whitespace
|
||||
parts.pop();
|
||||
}
|
||||
data.fmt = parts;
|
||||
return data;
|
||||
},
|
||||
build_mline: function (mline) {
|
||||
return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
|
||||
},
|
||||
parse_rtpmap: function (line) {
|
||||
var parts = line.substring(9).split(' '),
|
||||
data = {};
|
||||
data.id = parts.shift();
|
||||
parts = parts[0].split('/');
|
||||
data.name = parts.shift();
|
||||
data.clockrate = parts.shift();
|
||||
data.channels = parts.length ? parts.shift() : '1';
|
||||
return data;
|
||||
},
|
||||
/**
|
||||
* Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
|
||||
* @param line eg. "a=sctpmap:5000 webrtc-datachannel"
|
||||
* @returns [SCTP port number, protocol, streams]
|
||||
*/
|
||||
parse_sctpmap: function (line)
|
||||
{
|
||||
var parts = line.substring(10).split(' ');
|
||||
var sctpPort = parts[0];
|
||||
var protocol = parts[1];
|
||||
// Stream count is optional
|
||||
var streamCount = parts.length > 2 ? parts[2] : null;
|
||||
return [sctpPort, protocol, streamCount];// SCTP port
|
||||
},
|
||||
build_rtpmap: function (el) {
|
||||
var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
|
||||
if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
|
||||
line += '/' + el.getAttribute('channels');
|
||||
}
|
||||
return line;
|
||||
},
|
||||
parse_crypto: function (line) {
|
||||
var parts = line.substring(9).split(' '),
|
||||
data = {};
|
||||
data.tag = parts.shift();
|
||||
data['crypto-suite'] = parts.shift();
|
||||
data['key-params'] = parts.shift();
|
||||
if (parts.length) {
|
||||
data['session-params'] = parts.join(' ');
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_fingerprint: function (line) { // RFC 4572
|
||||
var parts = line.substring(14).split(' '),
|
||||
data = {};
|
||||
data.hash = parts.shift();
|
||||
data.fingerprint = parts.shift();
|
||||
// TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
|
||||
return data;
|
||||
},
|
||||
parse_fmtp: function (line) {
|
||||
var parts = line.split(' '),
|
||||
i, key, value,
|
||||
data = [];
|
||||
parts.shift();
|
||||
parts = parts.join(' ').split(';');
|
||||
for (i = 0; i < parts.length; i++) {
|
||||
key = parts[i].split('=')[0];
|
||||
while (key.length && key[0] == ' ') {
|
||||
key = key.substring(1);
|
||||
}
|
||||
value = parts[i].split('=')[1];
|
||||
if (key && value) {
|
||||
data.push({name: key, value: value});
|
||||
} else if (key) {
|
||||
// rfc 4733 (DTMF) style stuff
|
||||
data.push({name: '', value: key});
|
||||
}
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_icecandidate: function (line) {
|
||||
var candidate = {},
|
||||
elems = line.split(' ');
|
||||
candidate.foundation = elems[0].substring(12);
|
||||
candidate.component = elems[1];
|
||||
candidate.protocol = elems[2].toLowerCase();
|
||||
candidate.priority = elems[3];
|
||||
candidate.ip = elems[4];
|
||||
candidate.port = elems[5];
|
||||
// elems[6] => "typ"
|
||||
candidate.type = elems[7];
|
||||
candidate.generation = 0; // default value, may be overwritten below
|
||||
for (var i = 8; i < elems.length; i += 2) {
|
||||
switch (elems[i]) {
|
||||
case 'raddr':
|
||||
candidate['rel-addr'] = elems[i + 1];
|
||||
break;
|
||||
case 'rport':
|
||||
candidate['rel-port'] = elems[i + 1];
|
||||
break;
|
||||
case 'generation':
|
||||
candidate.generation = elems[i + 1];
|
||||
break;
|
||||
case 'tcptype':
|
||||
candidate.tcptype = elems[i + 1];
|
||||
break;
|
||||
default: // TODO
|
||||
console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
|
||||
}
|
||||
}
|
||||
candidate.network = '1';
|
||||
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
|
||||
return candidate;
|
||||
},
|
||||
build_icecandidate: function (cand) {
|
||||
var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
|
||||
line += ' ';
|
||||
switch (cand.type) {
|
||||
case 'srflx':
|
||||
case 'prflx':
|
||||
case 'relay':
|
||||
if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
|
||||
line += 'raddr';
|
||||
line += ' ';
|
||||
line += cand['rel-addr'];
|
||||
line += ' ';
|
||||
line += 'rport';
|
||||
line += ' ';
|
||||
line += cand['rel-port'];
|
||||
line += ' ';
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (cand.hasOwnAttribute('tcptype')) {
|
||||
line += 'tcptype';
|
||||
line += ' ';
|
||||
line += cand.tcptype;
|
||||
line += ' ';
|
||||
}
|
||||
line += 'generation';
|
||||
line += ' ';
|
||||
line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
|
||||
return line;
|
||||
},
|
||||
parse_ssrc: function (desc) {
|
||||
// proprietary mapping of a=ssrc lines
|
||||
// TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
|
||||
// and parse according to that
|
||||
var lines = desc.split('\r\n'),
|
||||
data = {};
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
if (lines[i].substring(0, 7) == 'a=ssrc:') {
|
||||
var idx = lines[i].indexOf(' ');
|
||||
data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
|
||||
}
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_rtcpfb: function (line) {
|
||||
var parts = line.substr(10).split(' ');
|
||||
var data = {};
|
||||
data.pt = parts.shift();
|
||||
data.type = parts.shift();
|
||||
data.params = parts;
|
||||
return data;
|
||||
},
|
||||
parse_extmap: function (line) {
|
||||
var parts = line.substr(9).split(' ');
|
||||
var data = {};
|
||||
data.value = parts.shift();
|
||||
if (data.value.indexOf('/') != -1) {
|
||||
data.direction = data.value.substr(data.value.indexOf('/') + 1);
|
||||
data.value = data.value.substr(0, data.value.indexOf('/'));
|
||||
} else {
|
||||
data.direction = 'both';
|
||||
}
|
||||
data.uri = parts.shift();
|
||||
data.params = parts;
|
||||
return data;
|
||||
},
|
||||
find_line: function (haystack, needle, sessionpart) {
|
||||
var lines = haystack.split('\r\n');
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
if (lines[i].substring(0, needle.length) == needle) {
|
||||
return lines[i];
|
||||
}
|
||||
}
|
||||
if (!sessionpart) {
|
||||
return false;
|
||||
}
|
||||
// search session part
|
||||
lines = sessionpart.split('\r\n');
|
||||
for (var j = 0; j < lines.length; j++) {
|
||||
if (lines[j].substring(0, needle.length) == needle) {
|
||||
return lines[j];
|
||||
}
|
||||
}
|
||||
return false;
|
||||
},
|
||||
find_lines: function (haystack, needle, sessionpart) {
|
||||
var lines = haystack.split('\r\n'),
|
||||
needles = [];
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
if (lines[i].substring(0, needle.length) == needle)
|
||||
needles.push(lines[i]);
|
||||
}
|
||||
if (needles.length || !sessionpart) {
|
||||
return needles;
|
||||
}
|
||||
// search session part
|
||||
lines = sessionpart.split('\r\n');
|
||||
for (var j = 0; j < lines.length; j++) {
|
||||
if (lines[j].substring(0, needle.length) == needle) {
|
||||
needles.push(lines[j]);
|
||||
}
|
||||
}
|
||||
return needles;
|
||||
},
|
||||
candidateToJingle: function (line) {
|
||||
// a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
|
||||
// <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
|
||||
if (line.indexOf('candidate:') === 0) {
|
||||
line = 'a=' + line;
|
||||
} else if (line.substring(0, 12) != 'a=candidate:') {
|
||||
console.log('parseCandidate called with a line that is not a candidate line');
|
||||
console.log(line);
|
||||
return null;
|
||||
}
|
||||
if (line.substring(line.length - 2) == '\r\n') // chomp it
|
||||
line = line.substring(0, line.length - 2);
|
||||
var candidate = {},
|
||||
elems = line.split(' '),
|
||||
i;
|
||||
if (elems[6] != 'typ') {
|
||||
console.log('did not find typ in the right place');
|
||||
console.log(line);
|
||||
return null;
|
||||
}
|
||||
candidate.foundation = elems[0].substring(12);
|
||||
candidate.component = elems[1];
|
||||
candidate.protocol = elems[2].toLowerCase();
|
||||
candidate.priority = elems[3];
|
||||
candidate.ip = elems[4];
|
||||
candidate.port = elems[5];
|
||||
// elems[6] => "typ"
|
||||
candidate.type = elems[7];
|
||||
|
||||
candidate.generation = '0'; // default, may be overwritten below
|
||||
for (i = 8; i < elems.length; i += 2) {
|
||||
switch (elems[i]) {
|
||||
case 'raddr':
|
||||
candidate['rel-addr'] = elems[i + 1];
|
||||
break;
|
||||
case 'rport':
|
||||
candidate['rel-port'] = elems[i + 1];
|
||||
break;
|
||||
case 'generation':
|
||||
candidate.generation = elems[i + 1];
|
||||
break;
|
||||
case 'tcptype':
|
||||
candidate.tcptype = elems[i + 1];
|
||||
break;
|
||||
default: // TODO
|
||||
console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
|
||||
}
|
||||
}
|
||||
candidate.network = '1';
|
||||
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
|
||||
return candidate;
|
||||
},
|
||||
candidateFromJingle: function (cand) {
|
||||
var line = 'a=candidate:';
|
||||
line += cand.getAttribute('foundation');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('component');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
|
||||
line += ' ';
|
||||
line += cand.getAttribute('priority');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('ip');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('port');
|
||||
line += ' ';
|
||||
line += 'typ';
|
||||
line += ' ' + cand.getAttribute('type');
|
||||
line += ' ';
|
||||
switch (cand.getAttribute('type')) {
|
||||
case 'srflx':
|
||||
case 'prflx':
|
||||
case 'relay':
|
||||
if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
|
||||
line += 'raddr';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('rel-addr');
|
||||
line += ' ';
|
||||
line += 'rport';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('rel-port');
|
||||
line += ' ';
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
|
||||
line += 'tcptype';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('tcptype');
|
||||
line += ' ';
|
||||
}
|
||||
line += 'generation';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('generation') || '0';
|
||||
return line + '\r\n';
|
||||
}
|
||||
};
|
||||
|
||||
exports.SDPUtil = SDPUtil;
|
||||
|
||||
@@ -1,254 +0,0 @@
|
||||
/**
|
||||
* Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
|
||||
*
|
||||
* This can be used with JS designed for browsers to improve reuse of code and
|
||||
* allow the use of existing libraries.
|
||||
*
|
||||
* Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
|
||||
*
|
||||
* @todo SSL Support
|
||||
* @author Dan DeFelippi <dan@driverdan.com>
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
var Url = require("url")
|
||||
,sys = require("util");
|
||||
|
||||
exports.XMLHttpRequest = function() {
|
||||
/**
|
||||
* Private variables
|
||||
*/
|
||||
var self = this;
|
||||
var http = require('http');
|
||||
var https = require('https');
|
||||
|
||||
// Holds http.js objects
|
||||
var client;
|
||||
var request;
|
||||
var response;
|
||||
|
||||
// Request settings
|
||||
var settings = {};
|
||||
|
||||
// Set some default headers
|
||||
var defaultHeaders = {
|
||||
"User-Agent": "node.js",
|
||||
"Accept": "*/*",
|
||||
};
|
||||
|
||||
var headers = defaultHeaders;
|
||||
|
||||
/**
|
||||
* Constants
|
||||
*/
|
||||
this.UNSENT = 0;
|
||||
this.OPENED = 1;
|
||||
this.HEADERS_RECEIVED = 2;
|
||||
this.LOADING = 3;
|
||||
this.DONE = 4;
|
||||
|
||||
/**
|
||||
* Public vars
|
||||
*/
|
||||
// Current state
|
||||
this.readyState = this.UNSENT;
|
||||
|
||||
// default ready state change handler in case one is not set or is set late
|
||||
this.onreadystatechange = function() {};
|
||||
|
||||
// Result & response
|
||||
this.responseText = "";
|
||||
this.responseXML = "";
|
||||
this.status = null;
|
||||
this.statusText = null;
|
||||
|
||||
/**
|
||||
* Open the connection. Currently supports local server requests.
|
||||
*
|
||||
* @param string method Connection method (eg GET, POST)
|
||||
* @param string url URL for the connection.
|
||||
* @param boolean async Asynchronous connection. Default is true.
|
||||
* @param string user Username for basic authentication (optional)
|
||||
* @param string password Password for basic authentication (optional)
|
||||
*/
|
||||
this.open = function(method, url, async, user, password) {
|
||||
settings = {
|
||||
"method": method,
|
||||
"url": url,
|
||||
"async": async || null,
|
||||
"user": user || null,
|
||||
"password": password || null
|
||||
};
|
||||
|
||||
this.abort();
|
||||
|
||||
setState(this.OPENED);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets a header for the request.
|
||||
*
|
||||
* @param string header Header name
|
||||
* @param string value Header value
|
||||
*/
|
||||
this.setRequestHeader = function(header, value) {
|
||||
headers[header] = value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets a header from the server response.
|
||||
*
|
||||
* @param string header Name of header to get.
|
||||
* @return string Text of the header or null if it doesn't exist.
|
||||
*/
|
||||
this.getResponseHeader = function(header) {
|
||||
if (this.readyState > this.OPENED && response.headers[header]) {
|
||||
return header + ": " + response.headers[header];
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets all the response headers.
|
||||
*
|
||||
* @return string
|
||||
*/
|
||||
this.getAllResponseHeaders = function() {
|
||||
if (this.readyState < this.HEADERS_RECEIVED) {
|
||||
throw "INVALID_STATE_ERR: Headers have not been received.";
|
||||
}
|
||||
var result = "";
|
||||
|
||||
for (var i in response.headers) {
|
||||
result += i + ": " + response.headers[i] + "\r\n";
|
||||
}
|
||||
return result.substr(0, result.length - 2);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sends the request to the server.
|
||||
*
|
||||
* @param string data Optional data to send as request body.
|
||||
*/
|
||||
this.send = function(data) {
|
||||
if (this.readyState != this.OPENED) {
|
||||
throw "INVALID_STATE_ERR: connection must be opened before send() is called";
|
||||
}
|
||||
|
||||
var ssl = false;
|
||||
var url = Url.parse(settings.url);
|
||||
|
||||
// Determine the server
|
||||
switch (url.protocol) {
|
||||
case 'https:':
|
||||
ssl = true;
|
||||
// SSL & non-SSL both need host, no break here.
|
||||
case 'http:':
|
||||
var host = url.hostname;
|
||||
break;
|
||||
|
||||
case undefined:
|
||||
case '':
|
||||
var host = "localhost";
|
||||
break;
|
||||
|
||||
default:
|
||||
throw "Protocol not supported.";
|
||||
}
|
||||
|
||||
// Default to port 80. If accessing localhost on another port be sure
|
||||
// to use http://localhost:port/path
|
||||
var port = url.port || (ssl ? 443 : 80);
|
||||
// Add query string if one is used
|
||||
var uri = url.pathname + (url.search ? url.search : '');
|
||||
|
||||
// Set the Host header or the server may reject the request
|
||||
this.setRequestHeader("Host", host);
|
||||
|
||||
// Set content length header
|
||||
if (settings.method == "GET" || settings.method == "HEAD") {
|
||||
data = null;
|
||||
} else if (data) {
|
||||
this.setRequestHeader("Content-Length", Buffer.byteLength(data));
|
||||
|
||||
if (!headers["Content-Type"]) {
|
||||
this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
|
||||
}
|
||||
}
|
||||
|
||||
// Use the proper protocol
|
||||
var doRequest = ssl ? https.request : http.request;
|
||||
|
||||
var options = {
|
||||
host: host,
|
||||
port: port,
|
||||
path: uri,
|
||||
method: settings.method,
|
||||
headers: headers,
|
||||
agent: false
|
||||
};
|
||||
|
||||
var req = doRequest(options, function(res) {
|
||||
response = res;
|
||||
response.setEncoding("utf8");
|
||||
|
||||
setState(self.HEADERS_RECEIVED);
|
||||
self.status = response.statusCode;
|
||||
|
||||
response.on('data', function(chunk) {
|
||||
// Make sure there's some data
|
||||
if (chunk) {
|
||||
self.responseText += chunk;
|
||||
}
|
||||
setState(self.LOADING);
|
||||
});
|
||||
|
||||
response.on('end', function() {
|
||||
setState(self.DONE);
|
||||
});
|
||||
|
||||
response.on('error', function() {
|
||||
self.handleError(error);
|
||||
});
|
||||
}).on('error', function(error) {
|
||||
self.handleError(error);
|
||||
});
|
||||
|
||||
req.setHeader("Connection", "Close");
|
||||
|
||||
// Node 0.4 and later won't accept empty data. Make sure it's needed.
|
||||
if (data) {
|
||||
req.write(data);
|
||||
}
|
||||
|
||||
req.end();
|
||||
};
|
||||
|
||||
this.handleError = function(error) {
|
||||
this.status = 503;
|
||||
this.statusText = error;
|
||||
this.responseText = error.stack;
|
||||
setState(this.DONE);
|
||||
};
|
||||
|
||||
/**
|
||||
* Aborts a request.
|
||||
*/
|
||||
this.abort = function() {
|
||||
headers = defaultHeaders;
|
||||
this.readyState = this.UNSENT;
|
||||
this.responseText = "";
|
||||
this.responseXML = "";
|
||||
};
|
||||
|
||||
/**
|
||||
* Changes readyState and calls onreadystatechange.
|
||||
*
|
||||
* @param int state New state
|
||||
*/
|
||||
var setState = function(state) {
|
||||
self.readyState = state;
|
||||
self.onreadystatechange();
|
||||
}
|
||||
};
|
||||
@@ -1,83 +0,0 @@
|
||||
// This code was written by Tyler Akins and has been placed in the
|
||||
// public domain. It would be nice if you left this header intact.
|
||||
// Base64 code from Tyler Akins -- http://rumkin.com
|
||||
|
||||
var Base64 = (function () {
|
||||
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
|
||||
var obj = {
|
||||
/**
|
||||
* Encodes a string in base64
|
||||
* @param {String} input The string to encode in base64.
|
||||
*/
|
||||
encode: function (input) {
|
||||
var output = "";
|
||||
var chr1, chr2, chr3;
|
||||
var enc1, enc2, enc3, enc4;
|
||||
var i = 0;
|
||||
|
||||
do {
|
||||
chr1 = input.charCodeAt(i++);
|
||||
chr2 = input.charCodeAt(i++);
|
||||
chr3 = input.charCodeAt(i++);
|
||||
|
||||
enc1 = chr1 >> 2;
|
||||
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
|
||||
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
|
||||
enc4 = chr3 & 63;
|
||||
|
||||
if (isNaN(chr2)) {
|
||||
enc3 = enc4 = 64;
|
||||
} else if (isNaN(chr3)) {
|
||||
enc4 = 64;
|
||||
}
|
||||
|
||||
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
|
||||
keyStr.charAt(enc3) + keyStr.charAt(enc4);
|
||||
} while (i < input.length);
|
||||
|
||||
return output;
|
||||
},
|
||||
|
||||
/**
|
||||
* Decodes a base64 string.
|
||||
* @param {String} input The string to decode.
|
||||
*/
|
||||
decode: function (input) {
|
||||
var output = "";
|
||||
var chr1, chr2, chr3;
|
||||
var enc1, enc2, enc3, enc4;
|
||||
var i = 0;
|
||||
|
||||
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
|
||||
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
|
||||
|
||||
do {
|
||||
enc1 = keyStr.indexOf(input.charAt(i++));
|
||||
enc2 = keyStr.indexOf(input.charAt(i++));
|
||||
enc3 = keyStr.indexOf(input.charAt(i++));
|
||||
enc4 = keyStr.indexOf(input.charAt(i++));
|
||||
|
||||
chr1 = (enc1 << 2) | (enc2 >> 4);
|
||||
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
|
||||
chr3 = ((enc3 & 3) << 6) | enc4;
|
||||
|
||||
output = output + String.fromCharCode(chr1);
|
||||
|
||||
if (enc3 != 64) {
|
||||
output = output + String.fromCharCode(chr2);
|
||||
}
|
||||
if (enc4 != 64) {
|
||||
output = output + String.fromCharCode(chr3);
|
||||
}
|
||||
} while (i < input.length);
|
||||
|
||||
return output;
|
||||
}
|
||||
};
|
||||
|
||||
return obj;
|
||||
})();
|
||||
|
||||
// Nodify
|
||||
exports.Base64 = Base64;
|
||||
@@ -1,279 +0,0 @@
|
||||
/*
|
||||
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
|
||||
* Digest Algorithm, as defined in RFC 1321.
|
||||
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
|
||||
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
|
||||
* Distributed under the BSD License
|
||||
* See http://pajhome.org.uk/crypt/md5 for more info.
|
||||
*/
|
||||
|
||||
var MD5 = (function () {
|
||||
/*
|
||||
* Configurable variables. You may need to tweak these to be compatible with
|
||||
* the server-side, but the defaults work in most cases.
|
||||
*/
|
||||
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
|
||||
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
|
||||
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
|
||||
|
||||
/*
|
||||
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
|
||||
* to work around bugs in some JS interpreters.
|
||||
*/
|
||||
var safe_add = function (x, y) {
|
||||
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
|
||||
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
|
||||
return (msw << 16) | (lsw & 0xFFFF);
|
||||
};
|
||||
|
||||
/*
|
||||
* Bitwise rotate a 32-bit number to the left.
|
||||
*/
|
||||
var bit_rol = function (num, cnt) {
|
||||
return (num << cnt) | (num >>> (32 - cnt));
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert a string to an array of little-endian words
|
||||
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
|
||||
*/
|
||||
var str2binl = function (str) {
|
||||
var bin = [];
|
||||
var mask = (1 << chrsz) - 1;
|
||||
for(var i = 0; i < str.length * chrsz; i += chrsz)
|
||||
{
|
||||
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
|
||||
}
|
||||
return bin;
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an array of little-endian words to a string
|
||||
*/
|
||||
var binl2str = function (bin) {
|
||||
var str = "";
|
||||
var mask = (1 << chrsz) - 1;
|
||||
for(var i = 0; i < bin.length * 32; i += chrsz)
|
||||
{
|
||||
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an array of little-endian words to a hex string.
|
||||
*/
|
||||
var binl2hex = function (binarray) {
|
||||
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
|
||||
var str = "";
|
||||
for(var i = 0; i < binarray.length * 4; i++)
|
||||
{
|
||||
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
|
||||
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an array of little-endian words to a base-64 string
|
||||
*/
|
||||
var binl2b64 = function (binarray) {
|
||||
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
var str = "";
|
||||
var triplet, j;
|
||||
for(var i = 0; i < binarray.length * 4; i += 3)
|
||||
{
|
||||
triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
|
||||
(((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
|
||||
((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
|
||||
for(j = 0; j < 4; j++)
|
||||
{
|
||||
if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
|
||||
else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
|
||||
}
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
/*
|
||||
* These functions implement the four basic operations the algorithm uses.
|
||||
*/
|
||||
var md5_cmn = function (q, a, b, x, s, t) {
|
||||
return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
|
||||
};
|
||||
|
||||
var md5_ff = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
|
||||
};
|
||||
|
||||
var md5_gg = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
|
||||
};
|
||||
|
||||
var md5_hh = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
|
||||
};
|
||||
|
||||
var md5_ii = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
|
||||
};
|
||||
|
||||
/*
|
||||
* Calculate the MD5 of an array of little-endian words, and a bit length
|
||||
*/
|
||||
var core_md5 = function (x, len) {
|
||||
/* append padding */
|
||||
x[len >> 5] |= 0x80 << ((len) % 32);
|
||||
x[(((len + 64) >>> 9) << 4) + 14] = len;
|
||||
|
||||
var a = 1732584193;
|
||||
var b = -271733879;
|
||||
var c = -1732584194;
|
||||
var d = 271733878;
|
||||
|
||||
var olda, oldb, oldc, oldd;
|
||||
for (var i = 0; i < x.length; i += 16)
|
||||
{
|
||||
olda = a;
|
||||
oldb = b;
|
||||
oldc = c;
|
||||
oldd = d;
|
||||
|
||||
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
|
||||
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
|
||||
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
|
||||
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
|
||||
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
|
||||
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
|
||||
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
|
||||
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
|
||||
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
|
||||
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
|
||||
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
|
||||
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
|
||||
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
|
||||
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
|
||||
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
|
||||
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
|
||||
|
||||
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
|
||||
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
|
||||
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
|
||||
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
|
||||
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
|
||||
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
|
||||
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
|
||||
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
|
||||
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
|
||||
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
|
||||
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
|
||||
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
|
||||
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
|
||||
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
|
||||
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
|
||||
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
|
||||
|
||||
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
|
||||
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
|
||||
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
|
||||
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
|
||||
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
|
||||
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
|
||||
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
|
||||
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
|
||||
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
|
||||
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
|
||||
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
|
||||
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
|
||||
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
|
||||
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
|
||||
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
|
||||
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
|
||||
|
||||
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
|
||||
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
|
||||
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
|
||||
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
|
||||
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
|
||||
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
|
||||
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
|
||||
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
|
||||
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
|
||||
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
|
||||
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
|
||||
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
|
||||
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
|
||||
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
|
||||
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
|
||||
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
|
||||
|
||||
a = safe_add(a, olda);
|
||||
b = safe_add(b, oldb);
|
||||
c = safe_add(c, oldc);
|
||||
d = safe_add(d, oldd);
|
||||
}
|
||||
return [a, b, c, d];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Calculate the HMAC-MD5, of a key and some data
|
||||
*/
|
||||
var core_hmac_md5 = function (key, data) {
|
||||
var bkey = str2binl(key);
|
||||
if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
|
||||
|
||||
var ipad = new Array(16), opad = new Array(16);
|
||||
for(var i = 0; i < 16; i++)
|
||||
{
|
||||
ipad[i] = bkey[i] ^ 0x36363636;
|
||||
opad[i] = bkey[i] ^ 0x5C5C5C5C;
|
||||
}
|
||||
|
||||
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
|
||||
return core_md5(opad.concat(hash), 512 + 128);
|
||||
};
|
||||
|
||||
var obj = {
|
||||
/*
|
||||
* These are the functions you'll usually want to call.
|
||||
* They take string arguments and return either hex or base-64 encoded
|
||||
* strings.
|
||||
*/
|
||||
hexdigest: function (s) {
|
||||
return binl2hex(core_md5(str2binl(s), s.length * chrsz));
|
||||
},
|
||||
|
||||
b64digest: function (s) {
|
||||
return binl2b64(core_md5(str2binl(s), s.length * chrsz));
|
||||
},
|
||||
|
||||
hash: function (s) {
|
||||
return binl2str(core_md5(str2binl(s), s.length * chrsz));
|
||||
},
|
||||
|
||||
hmac_hexdigest: function (key, data) {
|
||||
return binl2hex(core_hmac_md5(key, data));
|
||||
},
|
||||
|
||||
hmac_b64digest: function (key, data) {
|
||||
return binl2b64(core_hmac_md5(key, data));
|
||||
},
|
||||
|
||||
hmac_hash: function (key, data) {
|
||||
return binl2str(core_hmac_md5(key, data));
|
||||
},
|
||||
|
||||
/*
|
||||
* Perform a simple self-test to see if the VM is working
|
||||
*/
|
||||
test: function () {
|
||||
return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
|
||||
}
|
||||
};
|
||||
|
||||
return obj;
|
||||
})();
|
||||
|
||||
// Nodify
|
||||
exports.MD5 = MD5;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
||||
var strophe = require("./strophe/strophe.js").Strophe;
|
||||
|
||||
var Strophe = strophe.Strophe;
|
||||
var $iq = strophe.$iq;
|
||||
var $msg = strophe.$msg;
|
||||
var $build = strophe.$build;
|
||||
var $pres = strophe.$pres;
|
||||
|
||||
var jsdom = require("jsdom");
|
||||
var window = jsdom.jsdom().parentWindow;
|
||||
var $ = require('jquery')(window);
|
||||
|
||||
var stropheJingle = require("./strophe.jingle.sdp.js");
|
||||
|
||||
|
||||
var input = '';
|
||||
|
||||
process.stdin.on('readable', function() {
|
||||
var chunk = process.stdin.read();
|
||||
if (chunk !== null) {
|
||||
input += chunk;
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', function() {
|
||||
if (process.argv[2] == '--jingle') {
|
||||
var elem = $(input);
|
||||
// app does:
|
||||
// sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
|
||||
//console.log(elem.find('>content'));
|
||||
var sdp = new stropheJingle.SDP('');
|
||||
sdp.fromJingle(elem);
|
||||
console.log(sdp.raw);
|
||||
} else if (process.argv[2] == '--sdp') {
|
||||
var sdp = new stropheJingle.SDP(input);
|
||||
var accept = $iq({to: '%(tojid)s',
|
||||
type: 'set'})
|
||||
.c('jingle', {xmlns: 'urn:xmpp:jingle:1',
|
||||
//action: 'session-accept',
|
||||
action: '%(action)s',
|
||||
initiator: '%(initiator)s',
|
||||
responder: '%(responder)s',
|
||||
sid: '%(sid)s' });
|
||||
sdp.toJingle(accept, 'responder');
|
||||
console.log(Strophe.serialize(accept));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import json
|
||||
import sys
|
||||
import urllib
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def _mkurl(template, kws):
|
||||
for key in kws:
|
||||
template = template.replace(key, kws[key])
|
||||
return template
|
||||
|
||||
|
||||
def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
if not why:
|
||||
why = "Automated kick."
|
||||
print(
|
||||
"Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
|
||||
)
|
||||
room_state_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
|
||||
{"$HS": hs, "$ROOM": room_id, "$TOKEN": access_token},
|
||||
)
|
||||
print("Getting room state => %s" % room_state_url)
|
||||
res = requests.get(room_state_url)
|
||||
print("HTTP %s" % res.status_code)
|
||||
state_events = res.json()
|
||||
if "error" in state_events:
|
||||
print("FATAL")
|
||||
print(state_events)
|
||||
return
|
||||
|
||||
kick_list = []
|
||||
room_name = room_id
|
||||
for event in state_events:
|
||||
if not event["type"] == "m.room.member":
|
||||
if event["type"] == "m.room.name":
|
||||
room_name = event["content"].get("name")
|
||||
continue
|
||||
if not event["content"].get("membership") == "join":
|
||||
continue
|
||||
if event["state_key"].startswith(user_id_prefix):
|
||||
kick_list.append(event["state_key"])
|
||||
|
||||
if len(kick_list) == 0:
|
||||
print("No user IDs match the prefix '%s'" % user_id_prefix)
|
||||
return
|
||||
|
||||
print("The following user IDs will be kicked from %s" % room_name)
|
||||
for uid in kick_list:
|
||||
print(uid)
|
||||
doit = input("Continue? [Y]es\n")
|
||||
if len(doit) > 0 and doit.lower() == "y":
|
||||
print("Kicking members...")
|
||||
# encode them all
|
||||
kick_list = [urllib.quote(uid) for uid in kick_list]
|
||||
for uid in kick_list:
|
||||
kick_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
|
||||
{"$HS": hs, "$UID": uid, "$ROOM": room_id, "$TOKEN": access_token},
|
||||
)
|
||||
kick_body = {"membership": "leave", "reason": why}
|
||||
print("Kicking %s" % uid)
|
||||
res = requests.put(kick_url, data=json.dumps(kick_body))
|
||||
if res.status_code != 200:
|
||||
print("ERROR: HTTP %s" % res.status_code)
|
||||
if res.json().get("error"):
|
||||
print("ERROR: JSON %s" % res.json())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
|
||||
parser.add_argument("-u", "--user-id", help="The user ID prefix e.g. '@irc_'")
|
||||
parser.add_argument("-t", "--token", help="Your access_token")
|
||||
parser.add_argument("-r", "--room", help="The room ID to kick members in")
|
||||
parser.add_argument(
|
||||
"-s", "--homeserver", help="The base HS url e.g. http://matrix.org"
|
||||
)
|
||||
parser.add_argument("-w", "--why", help="Reason for the kick. Optional.")
|
||||
args = parser.parse_args()
|
||||
if not args.room or not args.token or not args.user_id or not args.homeserver:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
main(args.homeserver, args.room, args.token, args.user_id, args.why)
|
||||
43
debian/changelog
vendored
43
debian/changelog
vendored
@@ -1,3 +1,46 @@
|
||||
matrix-synapse-py3 (1.61.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.61.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 28 Jun 2022 14:33:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.61.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.61.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jun 2022 11:44:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.61.0~rc1) stable; urgency=medium
|
||||
|
||||
* Remove unused `jitsimeetbridge` experiment from `contrib` directory.
|
||||
* New Synapse release 1.61.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jun 2022 12:42:31 +0100
|
||||
|
||||
matrix-synapse-py3 (1.60.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.60.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 31 May 2022 13:41:22 +0100
|
||||
|
||||
matrix-synapse-py3 (1.60.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.60.0rc2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 27 May 2022 11:04:55 +0100
|
||||
|
||||
matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.60.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 24 May 2022 12:05:01 +0100
|
||||
|
||||
matrix-synapse-py3 (1.59.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.59.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 18 May 2022 11:41:46 +0100
|
||||
|
||||
matrix-synapse-py3 (1.59.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.59.0.
|
||||
|
||||
23
debian/copyright
vendored
23
debian/copyright
vendored
@@ -22,29 +22,6 @@ Files: synapse/config/repository.py
|
||||
Copyright: 2014-2015, matrix.org
|
||||
License: Apache-2.0
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js
|
||||
Copyright: Public Domain (Tyler Akins http://rumkin.com)
|
||||
License: public-domain
|
||||
This code was written by Tyler Akins and has been placed in the
|
||||
public domain. It would be nice if you left this header intact.
|
||||
Base64 code from Tyler Akins -- http://rumkin.com
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js
|
||||
Copyright: 1999-2002, Paul Johnston & Contributors
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js
|
||||
Copyright: 2006-2008, OGG, LLC
|
||||
License: Expat
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
|
||||
Copyright: 2010 passive.ly LLC
|
||||
License: Expat
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/*.js
|
||||
Copyright: 2014 Jitsi
|
||||
License: Apache-2.0
|
||||
|
||||
Files: debian/*
|
||||
Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
|
||||
2017, Rahul De <rahulde@swecha.net>
|
||||
|
||||
@@ -6,12 +6,14 @@ CWD=$(pwd)
|
||||
|
||||
cd "$DIR/.." || exit
|
||||
|
||||
PYTHONPATH=$(readlink -f "$(pwd)")
|
||||
export PYTHONPATH
|
||||
|
||||
|
||||
echo "$PYTHONPATH"
|
||||
# Do not override PYTHONPATH if we are in a virtual env
|
||||
if [ "$VIRTUAL_ENV" = "" ]; then
|
||||
PYTHONPATH=$(readlink -f "$(pwd)")
|
||||
export PYTHONPATH
|
||||
echo "$PYTHONPATH"
|
||||
fi
|
||||
|
||||
# Create servers which listen on HTTP at 808x and HTTPS at 848x.
|
||||
for port in 8080 8081 8082; do
|
||||
echo "Starting server on port $port... "
|
||||
|
||||
@@ -19,10 +21,12 @@ for port in 8080 8081 8082; do
|
||||
mkdir -p demo/$port
|
||||
pushd demo/$port || exit
|
||||
|
||||
# Generate the configuration for the homeserver at localhost:848x.
|
||||
# Generate the configuration for the homeserver at localhost:848x, note that
|
||||
# the homeserver name needs to match the HTTPS listening port for federation
|
||||
# to properly work..
|
||||
python3 -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
--server-name "localhost:$port" \
|
||||
--server-name "localhost:$https_port" \
|
||||
--config-path "$port.config" \
|
||||
--report-stats no
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ RUN \
|
||||
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
|
||||
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5
|
||||
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
|
||||
|
||||
WORKDIR /synapse
|
||||
|
||||
|
||||
@@ -6,12 +6,6 @@
|
||||
# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse
|
||||
FROM matrixdotorg/synapse-workers
|
||||
|
||||
# Download a caddy server to stand in front of nginx and terminate TLS using Complement's
|
||||
# custom CA.
|
||||
# We include this near the top of the file in order to cache the result.
|
||||
RUN curl -OL "https://github.com/caddyserver/caddy/releases/download/v2.3.0/caddy_2.3.0_linux_amd64.tar.gz" && \
|
||||
tar xzf caddy_2.3.0_linux_amd64.tar.gz && rm caddy_2.3.0_linux_amd64.tar.gz && mv caddy /root
|
||||
|
||||
# Install postgresql
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13
|
||||
@@ -31,16 +25,12 @@ COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml
|
||||
|
||||
WORKDIR /data
|
||||
|
||||
# Copy the caddy config
|
||||
COPY conf-workers/caddy.complement.json /root/caddy.json
|
||||
|
||||
COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf
|
||||
COPY conf-workers/caddy.supervisord.conf /etc/supervisor/conf.d/caddy.conf
|
||||
|
||||
# Copy the entrypoint
|
||||
COPY conf-workers/start-complement-synapse-workers.sh /
|
||||
|
||||
# Expose caddy's listener ports
|
||||
# Expose nginx's listener ports
|
||||
EXPOSE 8008 8448
|
||||
|
||||
ENTRYPOINT ["/start-complement-synapse-workers.sh"]
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":8448"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"{{ server_name }}"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "reverse_proxy",
|
||||
"upstreams": [
|
||||
{
|
||||
"dial": "localhost:8008"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"{{ server_name }}"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"module": "internal"
|
||||
}
|
||||
],
|
||||
"on_demand": true
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"local": {
|
||||
"name": "Complement CA",
|
||||
"root": {
|
||||
"certificate": "/complement/ca/ca.crt",
|
||||
"private_key": "/complement/ca/ca.key"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
[program:caddy]
|
||||
command=/usr/local/bin/prefix-log /root/caddy run --config /root/caddy.json
|
||||
autorestart=unexpected
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
@@ -9,9 +9,6 @@ function log {
|
||||
echo "$d $@"
|
||||
}
|
||||
|
||||
# Replace the server name in the caddy config
|
||||
sed -i "s/{{ server_name }}/${SERVER_NAME}/g" /root/caddy.json
|
||||
|
||||
# Set the server name of the homeserver
|
||||
export SYNAPSE_SERVER_NAME=${SERVER_NAME}
|
||||
|
||||
@@ -39,6 +36,26 @@ export SYNAPSE_WORKER_TYPES="\
|
||||
appservice, \
|
||||
pusher"
|
||||
|
||||
# Add Complement's appservice registration directory, if there is one
|
||||
# (It can be absent when there are no application services in this test!)
|
||||
if [ -d /complement/appservice ]; then
|
||||
export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice
|
||||
fi
|
||||
|
||||
# Generate a TLS key, then generate a certificate by having Complement's CA sign it
|
||||
# Note that both the key and certificate are in PEM format (not DER).
|
||||
openssl genrsa -out /conf/server.tls.key 2048
|
||||
|
||||
openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \
|
||||
-subj "/CN=${SERVER_NAME}"
|
||||
|
||||
openssl x509 -req -in /conf/server.tls.csr \
|
||||
-CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \
|
||||
-out /conf/server.tls.crt
|
||||
|
||||
export SYNAPSE_TLS_CERT=/conf/server.tls.crt
|
||||
export SYNAPSE_TLS_KEY=/conf/server.tls.key
|
||||
|
||||
# Run the script that writes the necessary config files and starts supervisord, which in turn
|
||||
# starts everything else
|
||||
exec /configure_workers_and_start.py
|
||||
|
||||
@@ -5,6 +5,12 @@ enable_registration: true
|
||||
enable_registration_without_verification: true
|
||||
bcrypt_rounds: 4
|
||||
|
||||
## Registration ##
|
||||
|
||||
# Needed by Complement to register admin users
|
||||
# DO NOT USE in a production configuration! This should be a random secret.
|
||||
registration_shared_secret: complement
|
||||
|
||||
## Federation ##
|
||||
|
||||
# trust certs signed by Complement's CA
|
||||
@@ -53,6 +59,18 @@ rc_joins:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_3pid_validation:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
rc_invites:
|
||||
per_room:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
## Experimental Features ##
|
||||
|
||||
@@ -87,6 +87,18 @@ rc_joins:
|
||||
per_second: 9999
|
||||
burst_count: 9999
|
||||
|
||||
rc_3pid_validation:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
rc_invites:
|
||||
per_room:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
per_user:
|
||||
per_second: 1000
|
||||
burst_count: 1000
|
||||
|
||||
federation_rr_transactions_per_room_per_second: 9999
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
@@ -9,6 +9,22 @@ server {
|
||||
listen 8008;
|
||||
listen [::]:8008;
|
||||
|
||||
{% if tls_cert_path is not none and tls_key_path is not none %}
|
||||
listen 8448 ssl;
|
||||
listen [::]:8448 ssl;
|
||||
|
||||
ssl_certificate {{ tls_cert_path }};
|
||||
ssl_certificate_key {{ tls_key_path }};
|
||||
|
||||
# Some directives from cipherlist.eu (fka cipherli.st):
|
||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
|
||||
ssl_ecdh_curve secp384r1; # Requires nginx >= 1.1.0
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_tickets off; # Requires nginx >= 1.5.9
|
||||
{% endif %}
|
||||
|
||||
server_name localhost;
|
||||
|
||||
# Nginx by default only allows file uploads up to 1M in size
|
||||
|
||||
@@ -6,4 +6,13 @@
|
||||
redis:
|
||||
enabled: true
|
||||
|
||||
{{ shared_worker_config }}
|
||||
{% if appservice_registrations is not none %}
|
||||
## Application Services ##
|
||||
# A list of application service config files to use.
|
||||
app_service_config_files:
|
||||
{%- for path in appservice_registrations %}
|
||||
- "{{ path }}"
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
{{ shared_worker_config }}
|
||||
|
||||
@@ -21,6 +21,11 @@
|
||||
# * SYNAPSE_REPORT_STATS: Whether to report stats.
|
||||
# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG
|
||||
# below. Leave empty for no workers, or set to '*' for all possible workers.
|
||||
# * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files
|
||||
# will be treated as Application Service registration files.
|
||||
# * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format.
|
||||
# * SYNAPSE_TLS_KEY: Path to a TLS key. If this and SYNAPSE_TLS_CERT are specified,
|
||||
# Nginx will be configured to serve TLS on port 8448.
|
||||
#
|
||||
# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined
|
||||
# in the project's README), this script may be run multiple times, and functionality should
|
||||
@@ -29,6 +34,7 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set
|
||||
|
||||
import jinja2
|
||||
@@ -152,6 +158,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
|
||||
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
|
||||
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
|
||||
],
|
||||
"shared_extra_conf": {},
|
||||
"worker_extra_conf": "",
|
||||
@@ -488,11 +495,23 @@ def generate_worker_files(
|
||||
master_log_config = generate_worker_log_config(environ, "master", data_dir)
|
||||
shared_config["log_config"] = master_log_config
|
||||
|
||||
# Find application service registrations
|
||||
appservice_registrations = None
|
||||
appservice_registration_dir = os.environ.get("SYNAPSE_AS_REGISTRATION_DIR")
|
||||
if appservice_registration_dir:
|
||||
# Scan for all YAML files that should be application service registrations.
|
||||
appservice_registrations = [
|
||||
str(reg_path.resolve())
|
||||
for reg_path in Path(appservice_registration_dir).iterdir()
|
||||
if reg_path.suffix.lower() in (".yaml", ".yml")
|
||||
]
|
||||
|
||||
# Shared homeserver config
|
||||
convert(
|
||||
"/conf/shared.yaml.j2",
|
||||
"/conf/workers/shared.yaml",
|
||||
shared_worker_config=yaml.dump(shared_config),
|
||||
appservice_registrations=appservice_registrations,
|
||||
)
|
||||
|
||||
# Nginx config
|
||||
@@ -501,6 +520,8 @@ def generate_worker_files(
|
||||
"/etc/nginx/conf.d/matrix-synapse.conf",
|
||||
worker_locations=nginx_location_config,
|
||||
upstream_directives=nginx_upstream_config,
|
||||
tls_cert_path=os.environ.get("SYNAPSE_TLS_CERT"),
|
||||
tls_key_path=os.environ.get("SYNAPSE_TLS_KEY"),
|
||||
)
|
||||
|
||||
# Supervisord config
|
||||
|
||||
@@ -89,6 +89,7 @@
|
||||
- [Database Schemas](development/database_schema.md)
|
||||
- [Experimental features](development/experimental_features.md)
|
||||
- [Synapse Architecture]()
|
||||
- [Cancellation](development/synapse_architecture/cancellation.md)
|
||||
- [Log Contexts](log_contexts.md)
|
||||
- [Replication](replication.md)
|
||||
- [TCP Replication](tcp_replication.md)
|
||||
|
||||
@@ -289,7 +289,7 @@ POST /_synapse/admin/v1/purge_media_cache?before_ts=<unix_timestamp_in_ms>
|
||||
|
||||
URL Parameters
|
||||
|
||||
* `unix_timestamp_in_ms`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
* `before_ts`: string representing a positive integer - Unix timestamp in milliseconds.
|
||||
All cached media that was last accessed before this timestamp will be removed.
|
||||
|
||||
Response:
|
||||
|
||||
@@ -115,7 +115,9 @@ URL parameters:
|
||||
Body parameters:
|
||||
|
||||
- `password` - string, optional. If provided, the user's password is updated and all
|
||||
devices are logged out.
|
||||
devices are logged out, unless `logout_devices` is set to `false`.
|
||||
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
|
||||
logged out even when `password` is provided.
|
||||
- `displayname` - string, optional, defaults to the value of `user_id`.
|
||||
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
|
||||
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.
|
||||
|
||||
@@ -206,7 +206,32 @@ This means that we need to run our unit tests against PostgreSQL too. Our CI doe
|
||||
this automatically for pull requests and release candidates, but it's sometimes
|
||||
useful to reproduce this locally.
|
||||
|
||||
To do so, [configure Postgres](../postgres.md) and run `trial` with the
|
||||
#### Using Docker
|
||||
|
||||
The easiest way to do so is to run Postgres via a docker container. In one
|
||||
terminal:
|
||||
|
||||
```shell
|
||||
docker run --rm -e POSTGRES_PASSWORD=mysecretpassword -e POSTGRES_USER=postgres -e POSTGRES_DB=postgress -p 5432:5432 postgres:14
|
||||
```
|
||||
|
||||
If you see an error like
|
||||
|
||||
```
|
||||
docker: Error response from daemon: driver failed programming external connectivity on endpoint nice_ride (b57bbe2e251b70015518d00c9981e8cb8346b5c785250341a6c53e3c899875f1): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use.
|
||||
```
|
||||
|
||||
then something is already bound to port 5432. You're probably already running postgres locally.
|
||||
|
||||
Once you have a postgres server running, invoke `trial` in a second terminal:
|
||||
|
||||
```shell
|
||||
SYNAPSE_POSTGRES=1 SYNAPSE_POSTGRES_HOST=127.0.0.1 SYNAPSE_POSTGRES_USER=postgres SYNAPSE_POSTGRES_PASSWORD=mysecretpassword poetry run trial tests
|
||||
````
|
||||
|
||||
#### Using an existing Postgres installation
|
||||
|
||||
If you have postgres already installed on your system, you can run `trial` with the
|
||||
following environment variables matching your configuration:
|
||||
|
||||
- `SYNAPSE_POSTGRES` to anything nonempty
|
||||
@@ -229,8 +254,8 @@ You don't need to specify the host, user, port or password if your Postgres
|
||||
server is set to authenticate you over the UNIX socket (i.e. if the `psql` command
|
||||
works without further arguments).
|
||||
|
||||
Your Postgres account needs to be able to create databases.
|
||||
|
||||
Your Postgres account needs to be able to create databases; see the postgres
|
||||
docs for [`ALTER ROLE`](https://www.postgresql.org/docs/current/sql-alterrole.html).
|
||||
|
||||
## Run the integration tests ([Sytest](https://github.com/matrix-org/sytest)).
|
||||
|
||||
@@ -397,8 +422,8 @@ same lightweight approach that the Linux Kernel
|
||||
[submitting patches process](
|
||||
https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>),
|
||||
[Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
projects use: the DCO ([Developer Certificate of Origin](http://developercertificate.org/)).
|
||||
This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix:
|
||||
|
||||
```
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies).
|
||||
|
||||
The demo setup allows running three federation Synapse servers, with server
|
||||
names `localhost:8080`, `localhost:8081`, and `localhost:8082`.
|
||||
names `localhost:8480`, `localhost:8481`, and `localhost:8482`.
|
||||
|
||||
You can access them via any Matrix client over HTTP at `localhost:8080`,
|
||||
`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`,
|
||||
@@ -20,9 +20,10 @@ and the servers are configured in a highly insecure way, including:
|
||||
The servers are configured to store their data under `demo/8080`, `demo/8081`, and
|
||||
`demo/8082`. This includes configuration, logs, SQLite databases, and media.
|
||||
|
||||
Note that when joining a public room on a different HS via "#foo:bar.net", then
|
||||
you are (in the current impl) joining a room with room_id "foo". This means that
|
||||
it won't work if your HS already has a room with that name.
|
||||
Note that when joining a public room on a different homeserver via "#foo:bar.net",
|
||||
then you are (in the current implementation) joining a room with room_id "foo".
|
||||
This means that it won't work if your homeserver already has a room with that
|
||||
name.
|
||||
|
||||
## Using the demo scripts
|
||||
|
||||
|
||||
392
docs/development/synapse_architecture/cancellation.md
Normal file
392
docs/development/synapse_architecture/cancellation.md
Normal file
@@ -0,0 +1,392 @@
|
||||
# Cancellation
|
||||
Sometimes, requests take a long time to service and clients disconnect
|
||||
before Synapse produces a response. To avoid wasting resources, Synapse
|
||||
can cancel request processing for select endpoints marked with the
|
||||
`@cancellable` decorator.
|
||||
|
||||
Synapse makes use of Twisted's `Deferred.cancel()` feature to make
|
||||
cancellation work. The `@cancellable` decorator does nothing by itself
|
||||
and merely acts as a flag, signalling to developers and other code alike
|
||||
that a method can be cancelled.
|
||||
|
||||
## Enabling cancellation for an endpoint
|
||||
1. Check that the endpoint method, and any `async` functions in its call
|
||||
tree handle cancellation correctly. See
|
||||
[Handling cancellation correctly](#handling-cancellation-correctly)
|
||||
for a list of things to look out for.
|
||||
2. Add the `@cancellable` decorator to the `on_GET/POST/PUT/DELETE`
|
||||
method. It's not recommended to make non-`GET` methods cancellable,
|
||||
since cancellation midway through some database updates is less
|
||||
likely to be handled correctly.
|
||||
|
||||
## Mechanics
|
||||
There are two stages to cancellation: downward propagation of a
|
||||
`cancel()` call, followed by upwards propagation of a `CancelledError`
|
||||
out of a blocked `await`.
|
||||
Both Twisted and asyncio have a cancellation mechanism.
|
||||
|
||||
| | Method | Exception | Exception inherits from |
|
||||
|---------------|---------------------|-----------------------------------------|-------------------------|
|
||||
| Twisted | `Deferred.cancel()` | `twisted.internet.defer.CancelledError` | `Exception` (!) |
|
||||
| asyncio | `Task.cancel()` | `asyncio.CancelledError` | `BaseException` |
|
||||
|
||||
### Deferred.cancel()
|
||||
When Synapse starts handling a request, it runs the async method
|
||||
responsible for handling it using `defer.ensureDeferred`, which returns
|
||||
a `Deferred`. For example:
|
||||
|
||||
```python
|
||||
def do_something() -> Deferred[None]:
|
||||
...
|
||||
|
||||
@cancellable
|
||||
async def on_GET() -> Tuple[int, JsonDict]:
|
||||
d = make_deferred_yieldable(do_something())
|
||||
await d
|
||||
return 200, {}
|
||||
|
||||
request = defer.ensureDeferred(on_GET())
|
||||
```
|
||||
|
||||
When a client disconnects early, Synapse checks for the presence of the
|
||||
`@cancellable` decorator on `on_GET`. Since `on_GET` is cancellable,
|
||||
`Deferred.cancel()` is called on the `Deferred` from
|
||||
`defer.ensureDeferred`, ie. `request`. Twisted knows which `Deferred`
|
||||
`request` is waiting on and passes the `cancel()` call on to `d`.
|
||||
|
||||
The `Deferred` being waited on, `d`, may have its own handling for
|
||||
`cancel()` and pass the call on to other `Deferred`s.
|
||||
|
||||
Eventually, a `Deferred` handles the `cancel()` call by resolving itself
|
||||
with a `CancelledError`.
|
||||
|
||||
### CancelledError
|
||||
The `CancelledError` gets raised out of the `await` and bubbles up, as
|
||||
per normal Python exception handling.
|
||||
|
||||
## Handling cancellation correctly
|
||||
In general, when writing code that might be subject to cancellation, two
|
||||
things must be considered:
|
||||
* The effect of `CancelledError`s raised out of `await`s.
|
||||
* The effect of `Deferred`s being `cancel()`ed.
|
||||
|
||||
Examples of code that handles cancellation incorrectly include:
|
||||
* `try-except` blocks which swallow `CancelledError`s.
|
||||
* Code that shares the same `Deferred`, which may be cancelled, between
|
||||
multiple requests.
|
||||
* Code that starts some processing that's exempt from cancellation, but
|
||||
uses a logging context from cancellable code. The logging context
|
||||
will be finished upon cancellation, while the uncancelled processing
|
||||
is still using it.
|
||||
|
||||
Some common patterns are listed below in more detail.
|
||||
|
||||
### `async` function calls
|
||||
Most functions in Synapse are relatively straightforward from a
|
||||
cancellation standpoint: they don't do anything with `Deferred`s and
|
||||
purely call and `await` other `async` functions.
|
||||
|
||||
An `async` function handles cancellation correctly if its own code
|
||||
handles cancellation correctly and all the async function it calls
|
||||
handle cancellation correctly. For example:
|
||||
```python
|
||||
async def do_two_things() -> None:
|
||||
check_something()
|
||||
await do_something()
|
||||
await do_something_else()
|
||||
```
|
||||
`do_two_things` handles cancellation correctly if `do_something` and
|
||||
`do_something_else` handle cancellation correctly.
|
||||
|
||||
That is, when checking whether a function handles cancellation
|
||||
correctly, its implementation and all its `async` function calls need to
|
||||
be checked, recursively.
|
||||
|
||||
As `check_something` is not `async`, it does not need to be checked.
|
||||
|
||||
### CancelledErrors
|
||||
Because Twisted's `CancelledError`s are `Exception`s, it's easy to
|
||||
accidentally catch and suppress them. Care must be taken to ensure that
|
||||
`CancelledError`s are allowed to propagate upwards.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
try:
|
||||
await do_something()
|
||||
except Exception:
|
||||
# `CancelledError` gets swallowed here.
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
try:
|
||||
await do_something()
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**OK**:
|
||||
```python
|
||||
try:
|
||||
check_something()
|
||||
# A `CancelledError` won't ever be raised here.
|
||||
except Exception:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
try:
|
||||
await do_something()
|
||||
except ValueError:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
#### defer.gatherResults
|
||||
`defer.gatherResults` produces a `Deferred` which:
|
||||
* broadcasts `cancel()` calls to every `Deferred` being waited on.
|
||||
* wraps the first exception it sees in a `FirstError`.
|
||||
|
||||
Together, this means that `CancelledError`s will be wrapped in
|
||||
a `FirstError` unless unwrapped. Such `FirstError`s are liable to be
|
||||
swallowed, so they must be unwrapped.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
async def do_something() -> None:
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults([...], consumeErrors=True)
|
||||
)
|
||||
|
||||
try:
|
||||
await do_something()
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
# `FirstError(CancelledError)` gets swallowed here.
|
||||
logger.info(...)
|
||||
```
|
||||
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
async def do_something() -> None:
|
||||
await make_deferred_yieldable(
|
||||
defer.gatherResults([...], consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
try:
|
||||
await do_something()
|
||||
except CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
logger.info(...)
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
### Creation of `Deferred`s
|
||||
If a function creates a `Deferred`, the effect of cancelling it must be considered. `Deferred`s that get shared are likely to have unintended behaviour when cancelled.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
cache: Dict[str, Deferred[None]] = {}
|
||||
|
||||
def wait_for_room(room_id: str) -> Deferred[None]:
|
||||
deferred = cache.get(room_id)
|
||||
if deferred is None:
|
||||
deferred = Deferred()
|
||||
cache[room_id] = deferred
|
||||
# `deferred` can have multiple waiters.
|
||||
# All of them will observe a `CancelledError`
|
||||
# if any one of them is cancelled.
|
||||
return make_deferred_yieldable(deferred)
|
||||
|
||||
# Request 1
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
# Request 2
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
cache: Dict[str, Deferred[None]] = {}
|
||||
|
||||
def wait_for_room(room_id: str) -> Deferred[None]:
|
||||
deferred = cache.get(room_id)
|
||||
if deferred is None:
|
||||
deferred = Deferred()
|
||||
cache[room_id] = deferred
|
||||
# `deferred` will never be cancelled now.
|
||||
# A `CancelledError` will still come out of
|
||||
# the `await`.
|
||||
# `delay_cancellation` may also be used.
|
||||
return make_deferred_yieldable(stop_cancellation(deferred))
|
||||
|
||||
# Request 1
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
# Request 2
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
cache: Dict[str, List[Deferred[None]]] = {}
|
||||
|
||||
def wait_for_room(room_id: str) -> Deferred[None]:
|
||||
if room_id not in cache:
|
||||
cache[room_id] = []
|
||||
# Each request gets its own `Deferred` to wait on.
|
||||
deferred = Deferred()
|
||||
cache[room_id]].append(deferred)
|
||||
return make_deferred_yieldable(deferred)
|
||||
|
||||
# Request 1
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
# Request 2
|
||||
await wait_for_room("!aAAaaAaaaAAAaAaAA:matrix.org")
|
||||
```
|
||||
</td>
|
||||
</table>
|
||||
|
||||
### Uncancelled processing
|
||||
Some `async` functions may kick off some `async` processing which is
|
||||
intentionally protected from cancellation, by `stop_cancellation` or
|
||||
other means. If the `async` processing inherits the logcontext of the
|
||||
request which initiated it, care must be taken to ensure that the
|
||||
logcontext is not finished before the `async` processing completes.
|
||||
|
||||
<table width="100%">
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Bad**:
|
||||
```python
|
||||
cache: Optional[ObservableDeferred[None]] = None
|
||||
|
||||
async def do_something_else(
|
||||
to_resolve: Deferred[None]
|
||||
) -> None:
|
||||
await ...
|
||||
logger.info("done!")
|
||||
to_resolve.callback(None)
|
||||
|
||||
async def do_something() -> None:
|
||||
if not cache:
|
||||
to_resolve = Deferred()
|
||||
cache = ObservableDeferred(to_resolve)
|
||||
# `do_something_else` will never be cancelled and
|
||||
# can outlive the `request-1` logging context.
|
||||
run_in_background(do_something_else, to_resolve)
|
||||
|
||||
await make_deferred_yieldable(cache.observe())
|
||||
|
||||
with LoggingContext("request-1"):
|
||||
await do_something()
|
||||
```
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
**Good**:
|
||||
```python
|
||||
cache: Optional[ObservableDeferred[None]] = None
|
||||
|
||||
async def do_something_else(
|
||||
to_resolve: Deferred[None]
|
||||
) -> None:
|
||||
await ...
|
||||
logger.info("done!")
|
||||
to_resolve.callback(None)
|
||||
|
||||
async def do_something() -> None:
|
||||
if not cache:
|
||||
to_resolve = Deferred()
|
||||
cache = ObservableDeferred(to_resolve)
|
||||
run_in_background(do_something_else, to_resolve)
|
||||
# We'll wait until `do_something_else` is
|
||||
# done before raising a `CancelledError`.
|
||||
await make_deferred_yieldable(
|
||||
delay_cancellation(cache.observe())
|
||||
)
|
||||
else:
|
||||
await make_deferred_yieldable(cache.observe())
|
||||
|
||||
with LoggingContext("request-1"):
|
||||
await do_something()
|
||||
```
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%">
|
||||
|
||||
**OK**:
|
||||
```python
|
||||
cache: Optional[ObservableDeferred[None]] = None
|
||||
|
||||
async def do_something_else(
|
||||
to_resolve: Deferred[None]
|
||||
) -> None:
|
||||
await ...
|
||||
logger.info("done!")
|
||||
to_resolve.callback(None)
|
||||
|
||||
async def do_something() -> None:
|
||||
if not cache:
|
||||
to_resolve = Deferred()
|
||||
cache = ObservableDeferred(to_resolve)
|
||||
# `do_something_else` will get its own independent
|
||||
# logging context. `request-1` will not count any
|
||||
# metrics from `do_something_else`.
|
||||
run_as_background_process(
|
||||
"do_something_else",
|
||||
do_something_else,
|
||||
to_resolve,
|
||||
)
|
||||
|
||||
await make_deferred_yieldable(cache.observe())
|
||||
|
||||
with LoggingContext("request-1"):
|
||||
await do_something()
|
||||
```
|
||||
</td>
|
||||
<td width="50%">
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
@@ -117,7 +117,7 @@ In this example, we define three jobs:
|
||||
Note that this example is tailored to show different configurations and
|
||||
features slightly more jobs than it's probably necessary (in practice, a
|
||||
server admin would probably consider it better to replace the two last
|
||||
jobs with one that runs once a day and handles rooms which which
|
||||
jobs with one that runs once a day and handles rooms which
|
||||
policy's `max_lifetime` is greater than 3 days).
|
||||
|
||||
Keep in mind, when configuring these jobs, that a purge job can become
|
||||
|
||||
@@ -12,21 +12,27 @@ The available spam checker callbacks are:
|
||||
|
||||
_First introduced in Synapse v1.37.0_
|
||||
|
||||
_Changed in Synapse v1.60.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean or a string is now deprecated._
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str]
|
||||
async def check_event_for_spam(event: "synapse.module_api.EventBase") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", str, bool]
|
||||
```
|
||||
|
||||
Called when receiving an event from a client or via federation. The callback must return
|
||||
either:
|
||||
- an error message string, to indicate the event must be rejected because of spam and
|
||||
give a rejection reason to forward to clients;
|
||||
- the boolean `True`, to indicate that the event is spammy, but not provide further details; or
|
||||
- the booelan `False`, to indicate that the event is not considered spammy.
|
||||
Called when receiving an event from a client or via federation. The callback must return one of:
|
||||
- `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still
|
||||
decide to reject it.
|
||||
- `synapse.module_api.errors.Codes` to reject the operation with an error code. In case
|
||||
of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code.
|
||||
- (deprecated) a non-`Codes` `str` to reject the operation and specify an error message. Note that clients
|
||||
typically will not localize the error message to the user's preferred locale.
|
||||
- (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`.
|
||||
- (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`.
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one.
|
||||
The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will
|
||||
be used. If this happens, Synapse will not call any of the subsequent implementations of
|
||||
this callback.
|
||||
|
||||
### `user_may_join_room`
|
||||
|
||||
@@ -249,6 +255,24 @@ callback returns `False`, Synapse falls through to the next one. The value of th
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
### `should_drop_federated_event`
|
||||
|
||||
_First introduced in Synapse v1.60.0_
|
||||
|
||||
```python
|
||||
async def should_drop_federated_event(event: "synapse.events.EventBase") -> bool
|
||||
```
|
||||
|
||||
Called when checking whether a remote server can federate an event with us. **Returning
|
||||
`True` from this function will silently drop a federated event and split-brain our view
|
||||
of a room's DAG, and thus you shouldn't use this callback unless you know what you are
|
||||
doing.**
|
||||
|
||||
If multiple modules implement this callback, they will be considered in order. If a
|
||||
callback returns `False`, Synapse falls through to the next one. The value of the first
|
||||
callback that does not return `False` will be used. If this happens, Synapse will not call
|
||||
any of the subsequent implementations of this callback.
|
||||
|
||||
## Example
|
||||
|
||||
The example below is a module that implements the spam checker callback
|
||||
|
||||
@@ -159,7 +159,7 @@ Follow the [Getting Started Guide](https://www.keycloak.org/getting-started) to
|
||||
oidc_providers:
|
||||
- idp_id: keycloak
|
||||
idp_name: "My KeyCloak server"
|
||||
issuer: "https://127.0.0.1:8443/auth/realms/{realm_name}"
|
||||
issuer: "https://127.0.0.1:8443/realms/{realm_name}"
|
||||
client_id: "synapse"
|
||||
client_secret: "copy secret generated from above"
|
||||
scopes: ["openid", "profile"]
|
||||
@@ -293,7 +293,7 @@ can be used to retrieve information on the authenticated user. As the Synapse
|
||||
login mechanism needs an attribute to uniquely identify users, and that endpoint
|
||||
does not return a `sub` property, an alternative `subject_claim` has to be set.
|
||||
|
||||
1. Create a new OAuth application: https://github.com/settings/applications/new.
|
||||
1. Create a new OAuth application: [https://github.com/settings/applications/new](https://github.com/settings/applications/new).
|
||||
2. Set the callback URL to `[synapse public baseurl]/_synapse/client/oidc/callback`.
|
||||
|
||||
Synapse config:
|
||||
@@ -322,10 +322,10 @@ oidc_providers:
|
||||
|
||||
[Google][google-idp] is an OpenID certified authentication and authorisation provider.
|
||||
|
||||
1. Set up a project in the Google API Console (see
|
||||
https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup).
|
||||
2. Add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
3. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
1. Set up a project in the Google API Console (see
|
||||
[documentation](https://developers.google.com/identity/protocols/oauth2/openid-connect#appsetup)).
|
||||
3. Add an "OAuth Client ID" for a Web Application under "Credentials".
|
||||
4. Copy the Client ID and Client Secret, and add the following to your synapse config:
|
||||
```yaml
|
||||
oidc_providers:
|
||||
- idp_id: google
|
||||
@@ -501,8 +501,8 @@ As well as the private key file, you will need:
|
||||
* Team ID: a 10-character ID associated with your developer account.
|
||||
* Key ID: the 10-character identifier for the key.
|
||||
|
||||
https://help.apple.com/developer-account/?lang=en#/dev77c875b7e has more
|
||||
documentation on setting up SiWA.
|
||||
[Apple's developer documentation](https://help.apple.com/developer-account/?lang=en#/dev77c875b7e)
|
||||
has more information on setting up SiWA.
|
||||
|
||||
The synapse config will look like this:
|
||||
|
||||
@@ -535,8 +535,8 @@ needed to add OAuth2 capabilities to your Django projects. It supports
|
||||
|
||||
Configuration on Django's side:
|
||||
|
||||
1. Add an application: https://example.com/admin/oauth2_provider/application/add/ and choose parameters like this:
|
||||
* `Redirect uris`: https://synapse.example.com/_synapse/client/oidc/callback
|
||||
1. Add an application: `https://example.com/admin/oauth2_provider/application/add/` and choose parameters like this:
|
||||
* `Redirect uris`: `https://synapse.example.com/_synapse/client/oidc/callback`
|
||||
* `Client type`: `Confidential`
|
||||
* `Authorization grant type`: `Authorization code`
|
||||
* `Algorithm`: `HMAC with SHA-2 256`
|
||||
|
||||
@@ -289,7 +289,7 @@ presence:
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
#
|
||||
# keys: the key discovery API (/_matrix/keys).
|
||||
# keys: the key discovery API (/_matrix/key).
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
@@ -730,6 +730,12 @@ retention:
|
||||
# A cache 'factor' is a multiplier that can be applied to each of
|
||||
# Synapse's caches in order to increase or decrease the maximum
|
||||
# number of entries that can be stored.
|
||||
#
|
||||
# The configuration for cache factors (caches.global_factor and
|
||||
# caches.per_cache_factors) can be reloaded while the application is running,
|
||||
# by sending a SIGHUP signal to the Synapse process. Changes to other parts of
|
||||
# the caching config will NOT be applied after a SIGHUP is received; a restart
|
||||
# is necessary.
|
||||
|
||||
# The number of events to cache in memory. Not affected by
|
||||
# caches.global_factor.
|
||||
@@ -778,6 +784,24 @@ caches:
|
||||
#
|
||||
#cache_entry_ttl: 30m
|
||||
|
||||
# This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`,
|
||||
# `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain
|
||||
# a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize
|
||||
# this option, and all three of the options must be specified for this feature to work.
|
||||
#cache_autotuning:
|
||||
# This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted.
|
||||
# They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
# the flag below, or until the `min_cache_ttl` is hit.
|
||||
#max_cache_memory_usage: 1024M
|
||||
|
||||
# This flag sets a rough target for the desired memory usage of the caches.
|
||||
#target_cache_memory_usage: 758M
|
||||
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
# caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
# from being emptied while Synapse is evicting due to memory.
|
||||
#min_cache_ttl: 5m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
# a successful response is returned. A higher duration can help clients with
|
||||
# intermittent connections, at the cost of higher memory usage.
|
||||
@@ -2192,7 +2216,9 @@ sso:
|
||||
|
||||
|
||||
password_config:
|
||||
# Uncomment to disable password login
|
||||
# Uncomment to disable password login.
|
||||
# Set to `only_for_reauth` to permit reauthentication for users that
|
||||
# have passwords and are already logged in.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
@@ -2462,15 +2488,39 @@ push:
|
||||
#
|
||||
#encryption_enabled_by_default_for_room_type: invite
|
||||
|
||||
|
||||
# Uncomment to allow non-server-admin users to create groups on this server
|
||||
# Override the default power levels for rooms created on this server, per
|
||||
# room creation preset.
|
||||
#
|
||||
#enable_group_creation: true
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
# The appropriate dictionary for the room preset will be applied on top
|
||||
# of the existing power levels content.
|
||||
#
|
||||
#group_creation_prefix: "unofficial_"
|
||||
# Useful if you know that your users need special permissions in rooms
|
||||
# that they create (e.g. to send particular types of state events without
|
||||
# needing an elevated power level). This takes the same shape as the
|
||||
# `power_level_content_override` parameter in the /createRoom API, but
|
||||
# is applied before that parameter.
|
||||
#
|
||||
# Valid keys are some or all of `private_chat`, `trusted_private_chat`
|
||||
# and `public_chat`. Inside each of those should be any of the
|
||||
# properties allowed in `power_level_content_override` in the
|
||||
# /createRoom API. If any property is missing, its default value will
|
||||
# continue to be used. If any property is present, it will overwrite
|
||||
# the existing default completely (so if the `events` property exists,
|
||||
# the default event power levels will be ignored).
|
||||
#
|
||||
#default_power_level_content_override:
|
||||
# private_chat:
|
||||
# "events":
|
||||
# "com.example.myeventtype" : 0
|
||||
# "m.room.avatar": 50
|
||||
# "m.room.canonical_alias": 50
|
||||
# "m.room.encryption": 100
|
||||
# "m.room.history_visibility": 100
|
||||
# "m.room.name": 50
|
||||
# "m.room.power_levels": 100
|
||||
# "m.room.server_acl": 100
|
||||
# "m.room.tombstone": 100
|
||||
# "events_default": 1
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ loggers:
|
||||
The above logging config will set Synapse as 'INFO' logging level by default,
|
||||
with the SQL layer at 'WARNING', and will log to a file, stored as JSON.
|
||||
|
||||
It is also possible to figure Synapse to log to a remote endpoint by using the
|
||||
It is also possible to configure Synapse to log to a remote endpoint by using the
|
||||
`synapse.logging.RemoteHandler` class included with Synapse. It takes the
|
||||
following arguments:
|
||||
|
||||
|
||||
137
docs/upgrade.md
137
docs/upgrade.md
@@ -89,6 +89,143 @@ process, for example:
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.61.0
|
||||
|
||||
## Removal of deprecated community/groups
|
||||
|
||||
This release of Synapse will remove deprecated community/groups from codebase.
|
||||
|
||||
### Worker endpoints
|
||||
|
||||
For those who have deployed workers, following worker endpoints will no longer
|
||||
exist and they can be removed from the reverse proxy configuration:
|
||||
|
||||
- `^/_matrix/federation/v1/get_groups_publicised$`
|
||||
- `^/_matrix/client/(r0|v3|unstable)/joined_groups$`
|
||||
- `^/_matrix/client/(r0|v3|unstable)/publicised_groups$`
|
||||
- `^/_matrix/client/(r0|v3|unstable)/publicised_groups/`
|
||||
- `^/_matrix/federation/v1/groups/`
|
||||
- `^/_matrix/client/(r0|v3|unstable)/groups/`
|
||||
|
||||
# Upgrading to v1.60.0
|
||||
|
||||
## Adding a new unique index to `state_group_edges` could fail if your database is corrupted
|
||||
|
||||
This release of Synapse will add a unique index to the `state_group_edges` table, in order
|
||||
to prevent accidentally introducing duplicate information (for example, because a database
|
||||
backup was restored multiple times).
|
||||
|
||||
Duplicate rows being present in this table could cause drastic performance problems; see
|
||||
[issue 11779](https://github.com/matrix-org/synapse/issues/11779) for more details.
|
||||
|
||||
If your Synapse database already has had duplicate rows introduced into this table,
|
||||
this could fail, with either of these errors:
|
||||
|
||||
|
||||
**On Postgres:**
|
||||
```
|
||||
synapse.storage.background_updates - 623 - INFO - background_updates-0 - Adding index state_group_edges_unique_idx to state_group_edges
|
||||
synapse.storage.background_updates - 282 - ERROR - background_updates-0 - Error doing update
|
||||
...
|
||||
psycopg2.errors.UniqueViolation: could not create unique index "state_group_edges_unique_idx"
|
||||
DETAIL: Key (state_group, prev_state_group)=(2, 1) is duplicated.
|
||||
```
|
||||
(The numbers may be different.)
|
||||
|
||||
**On SQLite:**
|
||||
```
|
||||
synapse.storage.background_updates - 623 - INFO - background_updates-0 - Adding index state_group_edges_unique_idx to state_group_edges
|
||||
synapse.storage.background_updates - 282 - ERROR - background_updates-0 - Error doing update
|
||||
...
|
||||
sqlite3.IntegrityError: UNIQUE constraint failed: state_group_edges.state_group, state_group_edges.prev_state_group
|
||||
```
|
||||
|
||||
|
||||
<details>
|
||||
<summary><b>Expand this section for steps to resolve this problem</b></summary>
|
||||
|
||||
### On Postgres
|
||||
|
||||
Connect to your database with `psql`.
|
||||
|
||||
```sql
|
||||
BEGIN;
|
||||
DELETE FROM state_group_edges WHERE (ctid, state_group, prev_state_group) IN (
|
||||
SELECT row_id, state_group, prev_state_group
|
||||
FROM (
|
||||
SELECT
|
||||
ctid AS row_id,
|
||||
MIN(ctid) OVER (PARTITION BY state_group, prev_state_group) AS min_row_id,
|
||||
state_group,
|
||||
prev_state_group
|
||||
FROM state_group_edges
|
||||
) AS t1
|
||||
WHERE row_id <> min_row_id
|
||||
);
|
||||
COMMIT;
|
||||
```
|
||||
|
||||
|
||||
### On SQLite
|
||||
|
||||
At the command-line, use `sqlite3 path/to/your-homeserver-database.db`:
|
||||
|
||||
```sql
|
||||
BEGIN;
|
||||
DELETE FROM state_group_edges WHERE (rowid, state_group, prev_state_group) IN (
|
||||
SELECT row_id, state_group, prev_state_group
|
||||
FROM (
|
||||
SELECT
|
||||
rowid AS row_id,
|
||||
MIN(rowid) OVER (PARTITION BY state_group, prev_state_group) AS min_row_id,
|
||||
state_group,
|
||||
prev_state_group
|
||||
FROM state_group_edges
|
||||
)
|
||||
WHERE row_id <> min_row_id
|
||||
);
|
||||
COMMIT;
|
||||
```
|
||||
|
||||
|
||||
### For more details
|
||||
|
||||
[This comment on issue 11779](https://github.com/matrix-org/synapse/issues/11779#issuecomment-1131545970)
|
||||
has queries that can be used to check a database for this problem in advance.
|
||||
|
||||
</details>
|
||||
|
||||
## New signature for the spam checker callback `check_event_for_spam`
|
||||
|
||||
The previous signature has been deprecated.
|
||||
|
||||
Whereas `check_event_for_spam` callbacks used to return `Union[str, bool]`, they should now return `Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]`.
|
||||
|
||||
This is part of an ongoing refactoring of the SpamChecker API to make it less ambiguous and more powerful.
|
||||
|
||||
If your module implements `check_event_for_spam` as follows:
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event):
|
||||
if ...:
|
||||
# Event is spam
|
||||
return True
|
||||
# Event is not spam
|
||||
return False
|
||||
```
|
||||
|
||||
you should rewrite it as follows:
|
||||
|
||||
```python
|
||||
async def check_event_for_spam(event):
|
||||
if ...:
|
||||
# Event is spam, mark it as forbidden (you may use some more precise error
|
||||
# code if it is useful).
|
||||
return synapse.module_api.errors.Codes.FORBIDDEN
|
||||
# Event is not spam, mark it as such.
|
||||
return synapse.module_api.NOT_SPAM
|
||||
```
|
||||
|
||||
# Upgrading to v1.59.0
|
||||
|
||||
## Device name lookup over federation has been disabled by default
|
||||
|
||||
@@ -23,6 +23,14 @@ followed by a letter. Letters have the following meanings:
|
||||
For example, setting `redaction_retention_period: 5m` would remove redacted
|
||||
messages from the database after 5 minutes, rather than 5 months.
|
||||
|
||||
In addition, configuration options referring to size use the following suffixes:
|
||||
|
||||
* `M` = MiB, or 1,048,576 bytes
|
||||
* `K` = KiB, or 1024 bytes
|
||||
|
||||
For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes
|
||||
for a user avatar.
|
||||
|
||||
### YAML
|
||||
The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules
|
||||
apply if you want your config file to be read properly. A few helpful things to know:
|
||||
@@ -467,13 +475,13 @@ Sub-options for each listener include:
|
||||
|
||||
Valid resource names are:
|
||||
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies 'media' and 'static'.
|
||||
* `client`: the client-server API (/_matrix/client), and the synapse admin API (/_synapse/admin). Also implies `media` and `static`.
|
||||
|
||||
* `consent`: user consent forms (/_matrix/consent). See [here](../../consent_tracking.md) for more.
|
||||
|
||||
* `federation`: the server-server API (/_matrix/federation). Also implies `media`, `keys`, `openid`
|
||||
|
||||
* `keys`: the key discovery API (/_matrix/keys).
|
||||
* `keys`: the key discovery API (/_matrix/key).
|
||||
|
||||
* `media`: the media API (/_matrix/media).
|
||||
|
||||
@@ -567,6 +575,18 @@ Example configuration:
|
||||
dummy_events_threshold: 5
|
||||
```
|
||||
---
|
||||
Config option `delete_stale_devices_after`
|
||||
|
||||
An optional duration. If set, Synapse will run a daily background task to log out and
|
||||
delete any device that hasn't been accessed for more than the specified amount of time.
|
||||
|
||||
Defaults to no duration, which means devices are never pruned.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
delete_stale_devices_after: 1y
|
||||
```
|
||||
|
||||
## Homeserver blocking ##
|
||||
Useful options for Synapse admins.
|
||||
|
||||
@@ -1119,7 +1139,22 @@ Caching can be configured through the following sub-options:
|
||||
with intermittent connections, at the cost of higher memory usage.
|
||||
By default, this is zero, which means that sync responses are not cached
|
||||
at all.
|
||||
|
||||
* `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and
|
||||
`min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory
|
||||
usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu)
|
||||
to utilize this option, and all three of the options must be specified for this feature to work. This option
|
||||
defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work
|
||||
and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided.
|
||||
Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry
|
||||
durations.
|
||||
* `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted.
|
||||
They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option.
|
||||
* `target_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value
|
||||
for this option.
|
||||
* `min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
from being emptied while Synapse is evicting due to memory. There is no default value for this option.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
@@ -1127,9 +1162,29 @@ caches:
|
||||
global_factor: 1.0
|
||||
per_cache_factors:
|
||||
get_users_who_share_room_with_user: 2.0
|
||||
expire_caches: false
|
||||
sync_response_cache_duration: 2m
|
||||
cache_autotuning:
|
||||
max_cache_memory_usage: 1024M
|
||||
target_cache_memory_usage: 758M
|
||||
min_cache_ttl: 5m
|
||||
```
|
||||
|
||||
### Reloading cache factors
|
||||
|
||||
The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) may be reloaded at any time by sending a
|
||||
[`SIGHUP`](https://en.wikipedia.org/wiki/SIGHUP) signal to Synapse using e.g.
|
||||
|
||||
```commandline
|
||||
kill -HUP [PID_OF_SYNAPSE_PROCESS]
|
||||
```
|
||||
|
||||
If you are running multiple workers, you must individually update the worker
|
||||
config file and send this signal to each worker process.
|
||||
|
||||
If you're using the [example systemd service](https://github.com/matrix-org/synapse/blob/develop/contrib/systemd/matrix-synapse.service)
|
||||
file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using
|
||||
`systemctl reload matrix-synapse`.
|
||||
|
||||
---
|
||||
## Database ##
|
||||
Config options related to database settings.
|
||||
@@ -1164,7 +1219,7 @@ For more information on using Synapse with Postgres,
|
||||
see [here](../../postgres.md).
|
||||
|
||||
Example SQLite configuration:
|
||||
```
|
||||
```yaml
|
||||
database:
|
||||
name: sqlite3
|
||||
args:
|
||||
@@ -1172,7 +1227,7 @@ database:
|
||||
```
|
||||
|
||||
Example Postgres configuration:
|
||||
```
|
||||
```yaml
|
||||
database:
|
||||
name: psycopg2
|
||||
txn_limit: 10000
|
||||
@@ -1327,6 +1382,20 @@ This option sets ratelimiting how often invites can be sent in a room or to a
|
||||
specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and
|
||||
`per_user` defaults to `per_second: 0.003`, `burst_count: 5`.
|
||||
|
||||
Client requests that invite user(s) when [creating a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom)
|
||||
will count against the `rc_invites.per_room` limit, whereas
|
||||
client requests to [invite a single user to a
|
||||
room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidinvite)
|
||||
will count against both the `rc_invites.per_user` and `rc_invites.per_room` limits.
|
||||
|
||||
Federation requests to invite a user will count against the `rc_invites.per_user`
|
||||
limit only, as Synapse presumes ratelimiting by room will be done by the sending server.
|
||||
|
||||
The `rc_invites.per_user` limit applies to the *receiver* of the invite, rather than the
|
||||
sender, meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a single user
|
||||
cannot *receive* more than a burst of 5 invites at a time.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
rc_invites:
|
||||
@@ -1390,7 +1459,7 @@ federation_rr_transactions_per_room_per_second: 40
|
||||
```
|
||||
---
|
||||
## Media Store ##
|
||||
Config options relating to Synapse media store.
|
||||
Config options related to Synapse's media store.
|
||||
|
||||
---
|
||||
Config option: `enable_media_repo`
|
||||
@@ -1494,6 +1563,39 @@ thumbnail_sizes:
|
||||
height: 600
|
||||
method: scale
|
||||
```
|
||||
---
|
||||
Config option: `media_retention`
|
||||
|
||||
Controls whether local media and entries in the remote media cache
|
||||
(media that is downloaded from other homeservers) should be removed
|
||||
under certain conditions, typically for the purpose of saving space.
|
||||
|
||||
Purging media files will be the carried out by the media worker
|
||||
(that is, the worker that has the `enable_media_repo` homeserver config
|
||||
option set to 'true'). This may be the main process.
|
||||
|
||||
The `media_retention.local_media_lifetime` and
|
||||
`media_retention.remote_media_lifetime` config options control whether
|
||||
media will be purged if it has not been accessed in a given amount of
|
||||
time. Note that media is 'accessed' when loaded in a room in a client, or
|
||||
otherwise downloaded by a local or remote user. If the media has never
|
||||
been accessed, the media's creation time is used instead. Both thumbnails
|
||||
and the original media will be removed. If either of these options are unset,
|
||||
then media of that type will not be purged.
|
||||
|
||||
Local or cached remote media that has been
|
||||
[quarantined](../../admin_api/media_admin_api.md#quarantining-media-in-a-room)
|
||||
will not be deleted. Similarly, local media that has been marked as
|
||||
[protected from quarantine](../../admin_api/media_admin_api.md#protecting-media-from-being-quarantined)
|
||||
will not be deleted.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
media_retention:
|
||||
local_media_lifetime: 90d
|
||||
remote_media_lifetime: 14d
|
||||
```
|
||||
---
|
||||
Config option: `url_preview_enabled`
|
||||
|
||||
This setting determines whether the preview URL API is enabled.
|
||||
@@ -1635,10 +1737,10 @@ Defaults to "en".
|
||||
Example configuration:
|
||||
```yaml
|
||||
url_preview_accept_language:
|
||||
- en-UK
|
||||
- en-US;q=0.9
|
||||
- fr;q=0.8
|
||||
- *;q=0.7
|
||||
- 'en-UK'
|
||||
- 'en-US;q=0.9'
|
||||
- 'fr;q=0.8'
|
||||
- '*;q=0.7'
|
||||
```
|
||||
----
|
||||
Config option: `oembed`
|
||||
@@ -2873,6 +2975,9 @@ Use this setting to enable password-based logins.
|
||||
|
||||
This setting has the following sub-options:
|
||||
* `enabled`: Defaults to true.
|
||||
Set to false to disable password authentication.
|
||||
Set to `only_for_reauth` to allow users with existing passwords to use them
|
||||
to log in and reauthenticate, whilst preventing new users from setting passwords.
|
||||
* `localdb_enabled`: Set to false to disable authentication against the local password
|
||||
database. This is ignored if `enabled` is false, and is only useful
|
||||
if you have other `password_providers`. Defaults to true.
|
||||
@@ -3088,25 +3193,6 @@ Example configuration:
|
||||
encryption_enabled_by_default_for_room_type: invite
|
||||
```
|
||||
---
|
||||
Config option: `enable_group_creation`
|
||||
|
||||
Set to true to allow non-server-admin users to create groups on this server
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
enable_group_creation: true
|
||||
```
|
||||
---
|
||||
Config option: `group_creation_prefix`
|
||||
|
||||
If enabled/present, non-server admins can only create groups with local parts
|
||||
starting with this prefix.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
group_creation_prefix: "unofficial_"
|
||||
```
|
||||
---
|
||||
Config option: `user_directory`
|
||||
|
||||
This setting defines options related to the user directory.
|
||||
@@ -3298,6 +3384,32 @@ room_list_publication_rules:
|
||||
room_id: "*"
|
||||
action: allow
|
||||
```
|
||||
|
||||
---
|
||||
Config option: `default_power_level_content_override`
|
||||
|
||||
The `default_power_level_content_override` option controls the default power
|
||||
levels for rooms.
|
||||
|
||||
Useful if you know that your users need special permissions in rooms
|
||||
that they create (e.g. to send particular types of state events without
|
||||
needing an elevated power level). This takes the same shape as the
|
||||
`power_level_content_override` parameter in the /createRoom API, but
|
||||
is applied before that parameter.
|
||||
|
||||
Note that each key provided inside a preset (for example `events` in the example
|
||||
below) will overwrite all existing defaults inside that key. So in the example
|
||||
below, newly-created private_chat rooms will have no rules for any event types
|
||||
except `com.example.foo`.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
default_power_level_content_override:
|
||||
private_chat: { "events": { "com.example.foo" : 0 } }
|
||||
trusted_private_chat: null
|
||||
public_chat: null
|
||||
```
|
||||
|
||||
---
|
||||
## Opentracing ##
|
||||
Configuration options related to Opentracing support.
|
||||
@@ -3398,7 +3510,7 @@ stream_writers:
|
||||
typing: worker1
|
||||
```
|
||||
---
|
||||
Config option: `run_background_task_on`
|
||||
Config option: `run_background_tasks_on`
|
||||
|
||||
The worker that is used to run background tasks (e.g. cleaning up expired
|
||||
data). If not provided this defaults to the main process.
|
||||
|
||||
@@ -24,6 +24,11 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,6 +131,18 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -309,4 +321,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
78
docs/website_files/version-picker.css
Normal file
78
docs/website_files/version-picker.css
Normal file
@@ -0,0 +1,78 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
127
docs/website_files/version-picker.js
Normal file
127
docs/website_files/version-picker.js
Normal file
@@ -0,0 +1,127 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
1
docs/website_files/version.js
Normal file
1
docs/website_files/version.js
Normal file
@@ -0,0 +1 @@
|
||||
window.SYNAPSE_VERSION = 'v1.61';
|
||||
@@ -7,10 +7,10 @@ team.
|
||||
## Installing and using Synapse
|
||||
|
||||
This documentation covers topics for **installation**, **configuration** and
|
||||
**maintainence** of your Synapse process:
|
||||
**maintenance** of your Synapse process:
|
||||
|
||||
* Learn how to [install](setup/installation.md) and
|
||||
[configure](usage/configuration/index.html) your own instance, perhaps with [Single
|
||||
[configure](usage/configuration/config_documentation.md) your own instance, perhaps with [Single
|
||||
Sign-On](usage/configuration/user_authentication/index.html).
|
||||
|
||||
* See how to [upgrade](upgrade.md) between Synapse versions.
|
||||
@@ -65,7 +65,7 @@ following documentation:
|
||||
|
||||
Want to help keep Synapse going but don't know how to code? Synapse is a
|
||||
[Matrix.org Foundation](https://matrix.org) project. Consider becoming a
|
||||
supportor on [Liberapay](https://liberapay.com/matrixdotorg),
|
||||
supporter on [Liberapay](https://liberapay.com/matrixdotorg),
|
||||
[Patreon](https://patreon.com/matrixdotorg) or through
|
||||
[PayPal](https://paypal.me/matrixdotorg) via a one-time donation.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Scaling synapse via workers
|
||||
|
||||
For small instances it recommended to run Synapse in the default monolith mode.
|
||||
For small instances it is recommended to run Synapse in the default monolith mode.
|
||||
For larger instances where performance is a concern it can be helpful to split
|
||||
out functionality into multiple separate python processes. These processes are
|
||||
called 'workers', and are (eventually) intended to scale horizontally
|
||||
@@ -191,9 +191,8 @@ information.
|
||||
^/_matrix/federation/v1/event_auth/
|
||||
^/_matrix/federation/v1/exchange_third_party_invite/
|
||||
^/_matrix/federation/v1/user/devices/
|
||||
^/_matrix/federation/v1/get_groups_publicised$
|
||||
^/_matrix/key/v2/query
|
||||
^/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/
|
||||
^/_matrix/federation/v1/hierarchy/
|
||||
|
||||
# Inbound federation transaction request
|
||||
^/_matrix/federation/v1/send/
|
||||
@@ -205,15 +204,14 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
|
||||
^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$
|
||||
^/_matrix/client/v1/rooms/.*/hierarchy$
|
||||
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
|
||||
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
||||
^/_matrix/client/(r0|v3|unstable)/account/whoami$
|
||||
^/_matrix/client/(r0|v3|unstable)/devices$
|
||||
^/_matrix/client/versions$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
||||
^/_matrix/client/(r0|v3|unstable)/joined_groups$
|
||||
^/_matrix/client/(r0|v3|unstable)/publicised_groups$
|
||||
^/_matrix/client/(r0|v3|unstable)/publicised_groups/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
||||
@@ -237,9 +235,6 @@ information.
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/join/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
|
||||
|
||||
# Device requests
|
||||
^/_matrix/client/(r0|v3|unstable)/sendToDevice/
|
||||
|
||||
# Account data requests
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/tags
|
||||
^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
||||
@@ -251,12 +246,12 @@ information.
|
||||
# Presence requests
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
||||
|
||||
# User directory search requests
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
Additionally, the following REST endpoints can be handled for GET requests:
|
||||
|
||||
^/_matrix/federation/v1/groups/
|
||||
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
||||
^/_matrix/client/(r0|v3|unstable)/groups/
|
||||
|
||||
Pagination requests can also be handled, but all requests for a given
|
||||
room must be routed to the same instance. Additionally, care must be taken to
|
||||
@@ -448,6 +443,14 @@ update_user_directory_from_worker: worker_name
|
||||
This work cannot be load-balanced; please ensure the main process is restarted
|
||||
after setting this option in the shared configuration!
|
||||
|
||||
User directory updates allow REST endpoints matching the following regular
|
||||
expressions to work:
|
||||
|
||||
^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
||||
|
||||
The above endpoints can be routed to any worker, though you may choose to route
|
||||
it to the chosen user directory worker.
|
||||
|
||||
This style of configuration supersedes the legacy `synapse.app.user_dir`
|
||||
worker application type.
|
||||
|
||||
|
||||
140
mypy.ini
140
mypy.ini
@@ -10,6 +10,7 @@ warn_unreachable = True
|
||||
warn_unused_ignores = True
|
||||
local_partial_types = True
|
||||
no_implicit_optional = True
|
||||
disallow_untyped_defs = True
|
||||
|
||||
files =
|
||||
docker/,
|
||||
@@ -27,9 +28,6 @@ exclude = (?x)
|
||||
|synapse/storage/databases/__init__.py
|
||||
|synapse/storage/databases/main/cache.py
|
||||
|synapse/storage/databases/main/devices.py
|
||||
|synapse/storage/databases/main/event_federation.py
|
||||
|synapse/storage/databases/main/push_rule.py
|
||||
|synapse/storage/databases/main/roommember.py
|
||||
|synapse/storage/schema/
|
||||
|
||||
|tests/api/test_auth.py
|
||||
@@ -43,16 +41,11 @@ exclude = (?x)
|
||||
|tests/events/test_utils.py
|
||||
|tests/federation/test_federation_catch_up.py
|
||||
|tests/federation/test_federation_sender.py
|
||||
|tests/federation/test_federation_server.py
|
||||
|tests/federation/transport/test_knocking.py
|
||||
|tests/federation/transport/test_server.py
|
||||
|tests/handlers/test_typing.py
|
||||
|tests/http/federation/test_matrix_federation_agent.py
|
||||
|tests/http/federation/test_srv_resolver.py
|
||||
|tests/http/test_fedclient.py
|
||||
|tests/http/test_proxyagent.py
|
||||
|tests/http/test_servlet.py
|
||||
|tests/http/test_site.py
|
||||
|tests/logging/__init__.py
|
||||
|tests/logging/test_terse_json.py
|
||||
|tests/module_api/test_api.py
|
||||
@@ -61,12 +54,9 @@ exclude = (?x)
|
||||
|tests/push/test_push_rule_evaluator.py
|
||||
|tests/rest/client/test_transactions.py
|
||||
|tests/rest/media/v1/test_media_storage.py
|
||||
|tests/scripts/test_new_matrix_user.py
|
||||
|tests/server.py
|
||||
|tests/server_notices/test_resource_limits_server_notices.py
|
||||
|tests/state/test_v2.py
|
||||
|tests/storage/test_base.py
|
||||
|tests/storage/test_roommember.py
|
||||
|tests/test_metrics.py
|
||||
|tests/test_server.py
|
||||
|tests/test_state.py
|
||||
@@ -89,131 +79,39 @@ exclude = (?x)
|
||||
|tests/utils.py
|
||||
)$
|
||||
|
||||
[mypy-synapse._scripts.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.app.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.appservice.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.config.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.crypto.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.event_auth]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.events.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.federation.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.federation.transport.client]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.handlers.*]
|
||||
disallow_untyped_defs = True
|
||||
[mypy-synapse.http.client]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.http.server]
|
||||
disallow_untyped_defs = True
|
||||
[mypy-synapse.http.matrixfederationclient]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.logging.context]
|
||||
disallow_untyped_defs = True
|
||||
[mypy-synapse.logging.opentracing]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.metrics.*]
|
||||
disallow_untyped_defs = True
|
||||
[mypy-synapse.logging.scopecontextmanager]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.metrics._reactor_metrics]
|
||||
disallow_untyped_defs = False
|
||||
# This module imports select.epoll. That exists on Linux, but doesn't on macOS.
|
||||
# See https://github.com/matrix-org/synapse/pull/11771.
|
||||
warn_unused_ignores = False
|
||||
|
||||
[mypy-synapse.module_api.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.notifier]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.push.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.replication.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.rest.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.server_notices.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.state.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.account_data]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.client_ips]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.e2e_room_keys]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.end_to_end_keys]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.event_push_actions]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.events_bg_updates]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.events_worker]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.room]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.room_batch]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.profile]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.stats]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.state_deltas]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.transactions]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.databases.main.user_erasure_store]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.storage.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.streams.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.*]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
[mypy-synapse.util.caches.treecache]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.server]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-synapse.storage.database]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.*]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-tests.handlers.test_user_directory]
|
||||
disallow_untyped_defs = True
|
||||
|
||||
|
||||
14
poetry.lock
generated
14
poetry.lock
generated
@@ -813,7 +813,7 @@ python-versions = ">=3.5"
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.3.0"
|
||||
version = "2.4.0"
|
||||
description = "JSON Web Token implementation in Python"
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -1355,7 +1355,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-jsonschema"
|
||||
version = "4.4.1"
|
||||
version = "4.4.6"
|
||||
description = "Typing stubs for jsonschema"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1563,7 +1563,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "d39d5ac5d51c014581186b7691999b861058b569084c525523baf70b77f292b1"
|
||||
content-hash = "539e5326f401472d1ffc8325d53d72e544cd70156b3f43f32f1285c4c131f831"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2264,8 +2264,8 @@ pygments = [
|
||||
{file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
|
||||
]
|
||||
pyjwt = [
|
||||
{file = "PyJWT-2.3.0-py3-none-any.whl", hash = "sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"},
|
||||
{file = "PyJWT-2.3.0.tar.gz", hash = "sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41"},
|
||||
{file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"},
|
||||
{file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"},
|
||||
]
|
||||
pymacaroons = [
|
||||
{file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"},
|
||||
@@ -2618,8 +2618,8 @@ types-ipaddress = [
|
||||
{file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"},
|
||||
]
|
||||
types-jsonschema = [
|
||||
{file = "types-jsonschema-4.4.1.tar.gz", hash = "sha256:bd68b75217ebbb33b0242db10047581dad3b061a963a46ee80d4a9044080663e"},
|
||||
{file = "types_jsonschema-4.4.1-py3-none-any.whl", hash = "sha256:ab3ecfdc912d6091cc82f4b7556cfbf1a7cbabc26da0ceaa1cbbc232d1d09971"},
|
||||
{file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"},
|
||||
{file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"},
|
||||
]
|
||||
types-opentracing = [
|
||||
{file = "types-opentracing-2.4.7.tar.gz", hash = "sha256:be60e9618355aa892571ace002e6b353702538b1c0dc4fbc1c921219d6658830"},
|
||||
|
||||
@@ -54,7 +54,7 @@ skip_gitignore = true
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.59.0"
|
||||
version = "1.61.1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
@@ -113,7 +113,6 @@ unpaddedbase64 = ">=2.1.0"
|
||||
canonicaljson = ">=1.4.0"
|
||||
# we use the type definitions added in signedjson 1.1.
|
||||
signedjson = ">=1.1.0"
|
||||
PyNaCl = ">=1.2.1"
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
service-identity = ">=18.1.0"
|
||||
# Twisted 18.9 introduces some logger improvements that the structured
|
||||
|
||||
@@ -45,6 +45,8 @@ docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
|
||||
|
||||
extra_test_args=()
|
||||
|
||||
test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
|
||||
|
||||
# If we're using workers, modify the docker files slightly.
|
||||
if [[ -n "$WORKERS" ]]; then
|
||||
# Build the workers docker image (from the base Synapse image).
|
||||
@@ -65,6 +67,10 @@ if [[ -n "$WORKERS" ]]; then
|
||||
else
|
||||
export COMPLEMENT_BASE_IMAGE=complement-synapse
|
||||
COMPLEMENT_DOCKERFILE=Dockerfile
|
||||
|
||||
# We only test faster room joins on monoliths, because they are purposefully
|
||||
# being developed without worker support to start with.
|
||||
test_tags="$test_tags,faster_joins"
|
||||
fi
|
||||
|
||||
# Build the Complement image from the Synapse image we just built.
|
||||
@@ -73,4 +79,5 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
|
||||
# Run the tests!
|
||||
echo "Images built; running complement"
|
||||
cd "$COMPLEMENT_DIR"
|
||||
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/...
|
||||
|
||||
go test -v -tags $test_tags -count=1 "${extra_test_args[@]}" "$@" ./tests/...
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import Callable, Optional, Type
|
||||
from mypy.nodes import ARG_NAMED_OPT
|
||||
from mypy.plugin import MethodSigContext, Plugin
|
||||
from mypy.typeops import bind_self
|
||||
from mypy.types import CallableType, NoneType
|
||||
from mypy.types import CallableType, NoneType, UnionType
|
||||
|
||||
|
||||
class SynapsePlugin(Plugin):
|
||||
@@ -72,13 +72,20 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
|
||||
# Third, we add an optional "on_invalidate" argument.
|
||||
#
|
||||
# This is a callable which accepts no input and returns nothing.
|
||||
calltyp = CallableType(
|
||||
arg_types=[],
|
||||
arg_kinds=[],
|
||||
arg_names=[],
|
||||
ret_type=NoneType(),
|
||||
fallback=ctx.api.named_generic_type("builtins.function", []),
|
||||
# This is a either
|
||||
# - a callable which accepts no input and returns nothing, or
|
||||
# - None.
|
||||
calltyp = UnionType(
|
||||
[
|
||||
NoneType(),
|
||||
CallableType(
|
||||
arg_types=[],
|
||||
arg_kinds=[],
|
||||
arg_names=[],
|
||||
ret_type=NoneType(),
|
||||
fallback=ctx.api.named_generic_type("builtins.function", []),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
arg_types.append(calltyp)
|
||||
@@ -95,7 +102,7 @@ def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
|
||||
|
||||
|
||||
def plugin(version: str) -> Type[SynapsePlugin]:
|
||||
# This is the entry point of the plugin, and let's us deal with the fact
|
||||
# This is the entry point of the plugin, and lets us deal with the fact
|
||||
# that the mypy plugin interface is *not* stable by looking at the version
|
||||
# string.
|
||||
#
|
||||
|
||||
@@ -46,14 +46,14 @@ def main() -> None:
|
||||
"Path to server config file. "
|
||||
"Used to read in bcrypt_rounds and password_pepper."
|
||||
),
|
||||
required=True,
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
if "config" in args and args.config:
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", None) or {}
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
config = yaml.safe_load(args.config)
|
||||
bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
|
||||
password_config = config.get("password_config", None) or {}
|
||||
password_pepper = password_config.get("pepper", password_pepper)
|
||||
password = args.password
|
||||
|
||||
if not password:
|
||||
|
||||
@@ -62,7 +62,7 @@ from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackground
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
)
|
||||
from synapse.storage.databases.main.group_server import GroupServerWorkerStore
|
||||
from synapse.storage.databases.main.group_server import GroupServerStore
|
||||
from synapse.storage.databases.main.media_repository import (
|
||||
MediaRepositoryBackgroundUpdateStore,
|
||||
)
|
||||
@@ -102,14 +102,6 @@ BOOLEAN_COLUMNS = {
|
||||
"devices": ["hidden"],
|
||||
"device_lists_outbound_pokes": ["sent"],
|
||||
"users_who_share_rooms": ["share_private"],
|
||||
"groups": ["is_public"],
|
||||
"group_rooms": ["is_public"],
|
||||
"group_users": ["is_public", "is_admin"],
|
||||
"group_summary_rooms": ["is_public"],
|
||||
"group_room_categories": ["is_public"],
|
||||
"group_summary_users": ["is_public"],
|
||||
"group_roles": ["is_public"],
|
||||
"local_group_membership": ["is_publicised", "is_admin"],
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
"account_validity": ["email_sent"],
|
||||
"redactions": ["have_censored"],
|
||||
@@ -175,6 +167,22 @@ IGNORED_TABLES = {
|
||||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Groups/communities is no longer supported.
|
||||
"group_attestations_remote",
|
||||
"group_attestations_renewals",
|
||||
"group_invites",
|
||||
"group_roles",
|
||||
"group_room_categories",
|
||||
"group_rooms",
|
||||
"group_summary_roles",
|
||||
"group_summary_room_categories",
|
||||
"group_summary_rooms",
|
||||
"group_summary_users",
|
||||
"group_users",
|
||||
"groups",
|
||||
"local_group_membership",
|
||||
"local_group_updates",
|
||||
"remote_profile_cache",
|
||||
}
|
||||
|
||||
|
||||
@@ -211,7 +219,7 @@ class Store(
|
||||
PushRuleStore,
|
||||
PusherWorkerStore,
|
||||
PresenceBackgroundUpdateStore,
|
||||
GroupServerWorkerStore,
|
||||
GroupServerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
||||
@@ -29,12 +29,11 @@ from synapse.api.errors import (
|
||||
MissingClientTokenError,
|
||||
)
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.http import get_request_user_agent
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import Requester, StateMap, UserID, create_requester
|
||||
from synapse.types import Requester, UserID, create_requester
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
|
||||
@@ -61,8 +60,8 @@ class Auth:
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self.state = hs.get_state_handler()
|
||||
self._account_validity_handler = hs.get_account_validity_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self.token_cache: LruCache[str, Tuple[str, bool]] = LruCache(
|
||||
10000, "token_cache"
|
||||
@@ -79,9 +78,8 @@ class Auth:
|
||||
self,
|
||||
room_id: str,
|
||||
user_id: str,
|
||||
current_state: Optional[StateMap[EventBase]] = None,
|
||||
allow_departed_users: bool = False,
|
||||
) -> EventBase:
|
||||
) -> Tuple[str, Optional[str]]:
|
||||
"""Check if the user is in the room, or was at some point.
|
||||
Args:
|
||||
room_id: The room to check.
|
||||
@@ -99,29 +97,28 @@ class Auth:
|
||||
Raises:
|
||||
AuthError if the user is/was not in the room.
|
||||
Returns:
|
||||
Membership event for the user if the user was in the
|
||||
room. This will be the join event if they are currently joined to
|
||||
the room. This will be the leave event if they have left the room.
|
||||
The current membership of the user in the room and the
|
||||
membership event ID of the user.
|
||||
"""
|
||||
if current_state:
|
||||
member = current_state.get((EventTypes.Member, user_id), None)
|
||||
else:
|
||||
member = await self.state.get_current_state(
|
||||
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
|
||||
)
|
||||
|
||||
if member:
|
||||
membership = member.membership
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.store.get_local_current_membership_for_user_in_room(
|
||||
user_id=user_id,
|
||||
room_id=room_id,
|
||||
)
|
||||
|
||||
if membership:
|
||||
if membership == Membership.JOIN:
|
||||
return member
|
||||
return membership, member_event_id
|
||||
|
||||
# XXX this looks totally bogus. Why do we not allow users who have been banned,
|
||||
# or those who were members previously and have been re-invited?
|
||||
if allow_departed_users and membership == Membership.LEAVE:
|
||||
forgot = await self.store.did_forget(user_id, room_id)
|
||||
if not forgot:
|
||||
return member
|
||||
return membership, member_event_id
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
@@ -602,8 +599,11 @@ class Auth:
|
||||
# We currently require the user is a "moderator" in the room. We do this
|
||||
# by checking if they would (theoretically) be able to change the
|
||||
# m.room.canonical_alias events
|
||||
power_level_event = await self.state.get_current_state(
|
||||
room_id, EventTypes.PowerLevels, ""
|
||||
|
||||
power_level_event = (
|
||||
await self._storage_controllers.state.get_current_state_event(
|
||||
room_id, EventTypes.PowerLevels, ""
|
||||
)
|
||||
)
|
||||
|
||||
auth_events = {}
|
||||
@@ -693,12 +693,11 @@ class Auth:
|
||||
# * The user is a non-guest user, and was ever in the room
|
||||
# * The user is a guest user, and has joined the room
|
||||
# else it will throw.
|
||||
member_event = await self.check_user_in_room(
|
||||
return await self.check_user_in_room(
|
||||
room_id, user_id, allow_departed_users=allow_departed_users
|
||||
)
|
||||
return member_event.membership, member_event.event_id
|
||||
except AuthError:
|
||||
visibility = await self.state.get_current_state(
|
||||
visibility = await self._storage_controllers.state.get_current_state_event(
|
||||
room_id, EventTypes.RoomHistoryVisibility, ""
|
||||
)
|
||||
if (
|
||||
|
||||
@@ -31,11 +31,6 @@ MAX_ALIAS_LENGTH = 255
|
||||
# the maximum length for a user id is 255 characters
|
||||
MAX_USERID_LENGTH = 255
|
||||
|
||||
# The maximum length for a group id is 255 characters
|
||||
MAX_GROUPID_LENGTH = 255
|
||||
MAX_GROUP_CATEGORYID_LENGTH = 255
|
||||
MAX_GROUP_ROLEID_LENGTH = 255
|
||||
|
||||
|
||||
class Membership:
|
||||
|
||||
@@ -65,6 +60,8 @@ class JoinRules:
|
||||
PRIVATE: Final = "private"
|
||||
# As defined for MSC3083.
|
||||
RESTRICTED: Final = "restricted"
|
||||
# As defined for MSC3787.
|
||||
KNOCK_RESTRICTED: Final = "knock_restricted"
|
||||
|
||||
|
||||
class RestrictedJoinRuleTypes:
|
||||
@@ -98,7 +95,6 @@ class EventTypes:
|
||||
Aliases: Final = "m.room.aliases"
|
||||
Redaction: Final = "m.room.redaction"
|
||||
ThirdPartyInvite: Final = "m.room.third_party_invite"
|
||||
RelatedGroups: Final = "m.room.related_groups"
|
||||
|
||||
RoomHistoryVisibility: Final = "m.room.history_visibility"
|
||||
CanonicalAlias: Final = "m.room.canonical_alias"
|
||||
@@ -140,7 +136,13 @@ class DeviceKeyAlgorithms:
|
||||
|
||||
|
||||
class EduTypes:
|
||||
Presence: Final = "m.presence"
|
||||
PRESENCE: Final = "m.presence"
|
||||
TYPING: Final = "m.typing"
|
||||
RECEIPT: Final = "m.receipt"
|
||||
DEVICE_LIST_UPDATE: Final = "m.device_list_update"
|
||||
SIGNING_KEY_UPDATE: Final = "m.signing_key_update"
|
||||
UNSTABLE_SIGNING_KEY_UPDATE: Final = "org.matrix.signing_key_update"
|
||||
DIRECT_TO_DEVICE: Final = "m.direct_to_device"
|
||||
|
||||
|
||||
class RejectedReason:
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
|
||||
import logging
|
||||
import typing
|
||||
from enum import Enum
|
||||
from http import HTTPStatus
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
@@ -30,7 +31,11 @@ if typing.TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Codes:
|
||||
class Codes(str, Enum):
|
||||
"""
|
||||
All known error codes, as an enum of strings.
|
||||
"""
|
||||
|
||||
UNRECOGNIZED = "M_UNRECOGNIZED"
|
||||
UNAUTHORIZED = "M_UNAUTHORIZED"
|
||||
FORBIDDEN = "M_FORBIDDEN"
|
||||
@@ -74,6 +79,13 @@ class Codes:
|
||||
WEAK_PASSWORD = "M_WEAK_PASSWORD"
|
||||
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
|
||||
USER_DEACTIVATED = "M_USER_DEACTIVATED"
|
||||
|
||||
# The account has been suspended on the server.
|
||||
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
|
||||
# that can possibly be appealed and reverted.
|
||||
# Part of MSC3823.
|
||||
USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
|
||||
|
||||
BAD_ALIAS = "M_BAD_ALIAS"
|
||||
# For restricted join rules.
|
||||
UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN"
|
||||
@@ -134,7 +146,13 @@ class SynapseError(CodeMessageException):
|
||||
errcode: Matrix error code e.g 'M_FORBIDDEN'
|
||||
"""
|
||||
|
||||
def __init__(self, code: int, msg: str, errcode: str = Codes.UNKNOWN):
|
||||
def __init__(
|
||||
self,
|
||||
code: int,
|
||||
msg: str,
|
||||
errcode: str = Codes.UNKNOWN,
|
||||
additional_fields: Optional[Dict] = None,
|
||||
):
|
||||
"""Constructs a synapse error.
|
||||
|
||||
Args:
|
||||
@@ -144,9 +162,13 @@ class SynapseError(CodeMessageException):
|
||||
"""
|
||||
super().__init__(code, msg)
|
||||
self.errcode = errcode
|
||||
if additional_fields is None:
|
||||
self._additional_fields: Dict = {}
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode)
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
|
||||
|
||||
class InvalidAPICallError(SynapseError):
|
||||
@@ -171,14 +193,7 @@ class ProxiedRequestError(SynapseError):
|
||||
errcode: str = Codes.UNKNOWN,
|
||||
additional_fields: Optional[Dict] = None,
|
||||
):
|
||||
super().__init__(code, msg, errcode)
|
||||
if additional_fields is None:
|
||||
self._additional_fields: Dict = {}
|
||||
else:
|
||||
self._additional_fields = dict(additional_fields)
|
||||
|
||||
def error_dict(self) -> "JsonDict":
|
||||
return cs_error(self.msg, self.errcode, **self._additional_fields)
|
||||
super().__init__(code, msg, errcode, additional_fields)
|
||||
|
||||
|
||||
class ConsentNotGivenError(SynapseError):
|
||||
|
||||
@@ -19,6 +19,7 @@ from typing import (
|
||||
TYPE_CHECKING,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
@@ -32,7 +33,7 @@ from typing import (
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
from synapse.api.constants import EventContentFields
|
||||
from synapse.api.constants import EduTypes, EventContentFields
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.presence import UserPresenceState
|
||||
from synapse.events import EventBase
|
||||
@@ -346,7 +347,7 @@ class Filter:
|
||||
user_id = event.user_id
|
||||
field_matchers = {
|
||||
"senders": lambda v: user_id == v,
|
||||
"types": lambda v: "m.presence" == v,
|
||||
"types": lambda v: EduTypes.PRESENCE == v,
|
||||
}
|
||||
return self._check_fields(field_matchers)
|
||||
else:
|
||||
@@ -444,9 +445,9 @@ class Filter:
|
||||
return room_ids
|
||||
|
||||
async def _check_event_relations(
|
||||
self, events: Iterable[FilterEvent]
|
||||
self, events: Collection[FilterEvent]
|
||||
) -> List[FilterEvent]:
|
||||
# The event IDs to check, mypy doesn't understand the ifinstance check.
|
||||
# The event IDs to check, mypy doesn't understand the isinstance check.
|
||||
event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined]
|
||||
event_ids_to_keep = set(
|
||||
await self._store.events_have_relations(
|
||||
|
||||
@@ -81,6 +81,9 @@ class RoomVersion:
|
||||
msc2716_historical: bool
|
||||
# MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events
|
||||
msc2716_redactions: bool
|
||||
# MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of
|
||||
# knocks and restricted join rules into the same join condition.
|
||||
msc3787_knock_restricted_join_rule: bool
|
||||
|
||||
|
||||
class RoomVersions:
|
||||
@@ -99,6 +102,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
@@ -115,6 +119,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
@@ -131,6 +136,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
@@ -147,6 +153,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
@@ -163,6 +170,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
@@ -179,6 +187,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
@@ -195,6 +204,7 @@ class RoomVersions:
|
||||
msc2403_knocking=False,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
@@ -211,6 +221,7 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
@@ -227,6 +238,7 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
@@ -243,6 +255,7 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC2716v3 = RoomVersion(
|
||||
"org.matrix.msc2716v3",
|
||||
@@ -259,6 +272,24 @@ class RoomVersions:
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=True,
|
||||
msc2716_redactions=True,
|
||||
msc3787_knock_restricted_join_rule=False,
|
||||
)
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
strict_canonicaljson=True,
|
||||
limit_notifications_power_levels=True,
|
||||
msc2176_redaction_rules=False,
|
||||
msc3083_join_rules=True,
|
||||
msc3375_redaction_rules=True,
|
||||
msc2403_knocking=True,
|
||||
msc2716_historical=False,
|
||||
msc2716_redactions=False,
|
||||
msc3787_knock_restricted_join_rule=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -276,6 +307,7 @@ KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
|
||||
RoomVersions.V8,
|
||||
RoomVersions.V9,
|
||||
RoomVersions.MSC2716v3,
|
||||
RoomVersions.MSC3787,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -49,9 +49,12 @@ from twisted.logger import LoggingFile, LogLevel
|
||||
from twisted.protocols.tls import TLSMemoryBIOFactory
|
||||
from twisted.python.threadpool import ThreadPool
|
||||
|
||||
import synapse.util.caches
|
||||
from synapse.api.constants import MAX_PDU_SIZE
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.app.phone_stats_home import start_phone_stats_home
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import format_config_error
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ManholeConfig
|
||||
from synapse.crypto import context_factory
|
||||
@@ -432,6 +435,10 @@ async def start(hs: "HomeServer") -> None:
|
||||
signal.signal(signal.SIGHUP, run_sighup)
|
||||
|
||||
register_sighup(refresh_certificate, hs)
|
||||
register_sighup(reload_cache_config, hs.config)
|
||||
|
||||
# Apply the cache config.
|
||||
hs.config.caches.resize_all_caches()
|
||||
|
||||
# Load the certificate from disk.
|
||||
refresh_certificate(hs)
|
||||
@@ -486,6 +493,43 @@ async def start(hs: "HomeServer") -> None:
|
||||
atexit.register(gc.freeze)
|
||||
|
||||
|
||||
def reload_cache_config(config: HomeServerConfig) -> None:
|
||||
"""Reload cache config from disk and immediately apply it.resize caches accordingly.
|
||||
|
||||
If the config is invalid, a `ConfigError` is logged and no changes are made.
|
||||
|
||||
Otherwise, this:
|
||||
- replaces the `caches` section on the given `config` object,
|
||||
- resizes all caches according to the new cache factors, and
|
||||
|
||||
Note that the following cache config keys are read, but not applied:
|
||||
- event_cache_size: used to set a max_size and _original_max_size on
|
||||
EventsWorkerStore._get_event_cache when it is created. We'd have to update
|
||||
the _original_max_size (and maybe
|
||||
- sync_response_cache_duration: would have to update the timeout_sec attribute on
|
||||
HomeServer -> SyncHandler -> ResponseCache.
|
||||
- track_memory_usage. This affects synapse.util.caches.TRACK_MEMORY_USAGE which
|
||||
influences Synapse's self-reported metrics.
|
||||
|
||||
Also, the HTTPConnectionPool in SimpleHTTPClient sets its maxPersistentPerHost
|
||||
parameter based on the global_factor. This won't be applied on a config reload.
|
||||
"""
|
||||
try:
|
||||
previous_cache_config = config.reload_config_section("caches")
|
||||
except ConfigError as e:
|
||||
logger.warning("Failed to reload cache config")
|
||||
for f in format_config_error(e):
|
||||
logger.warning(f)
|
||||
else:
|
||||
logger.debug(
|
||||
"New cache config. Was:\n %s\nNow:\n",
|
||||
previous_cache_config.__dict__,
|
||||
config.caches.__dict__,
|
||||
)
|
||||
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
|
||||
config.caches.resize_all_caches()
|
||||
|
||||
|
||||
def setup_sentry(hs: "HomeServer") -> None:
|
||||
"""Enable sentry integration, if enabled in configuration"""
|
||||
|
||||
|
||||
@@ -37,7 +37,6 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
@@ -55,7 +54,6 @@ class AdminCmdSlavedStore(
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
|
||||
@@ -58,7 +58,6 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.keys import SlavedKeyStore
|
||||
from synapse.replication.slave.storage.profile import SlavedProfileStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
@@ -69,7 +68,6 @@ from synapse.rest.admin import register_servlets_for_media_repo
|
||||
from synapse.rest.client import (
|
||||
account_data,
|
||||
events,
|
||||
groups,
|
||||
initial_sync,
|
||||
login,
|
||||
presence,
|
||||
@@ -78,6 +76,7 @@ from synapse.rest.client import (
|
||||
read_marker,
|
||||
receipts,
|
||||
room,
|
||||
room_batch,
|
||||
room_keys,
|
||||
sendtodevice,
|
||||
sync,
|
||||
@@ -87,7 +86,7 @@ from synapse.rest.client import (
|
||||
voip,
|
||||
)
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.client.account import ThreepidRestServlet
|
||||
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
|
||||
from synapse.rest.client.devices import DevicesRestServlet
|
||||
from synapse.rest.client.keys import (
|
||||
KeyChangesServlet,
|
||||
@@ -233,7 +232,6 @@ class GenericWorkerSlavedStore(
|
||||
SlavedDeviceStore,
|
||||
SlavedReceiptsStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedPusherStore,
|
||||
CensorEventsStore,
|
||||
@@ -289,6 +287,7 @@ class GenericWorkerServer(HomeServer):
|
||||
RegistrationTokenValidityRestServlet(self).register(resource)
|
||||
login.register_servlets(self, resource)
|
||||
ThreepidRestServlet(self).register(resource)
|
||||
WhoamiRestServlet(self).register(resource)
|
||||
DevicesRestServlet(self).register(resource)
|
||||
|
||||
# Read-only
|
||||
@@ -308,6 +307,7 @@ class GenericWorkerServer(HomeServer):
|
||||
room.register_servlets(self, resource, is_worker=True)
|
||||
room.register_deprecated_servlets(self, resource)
|
||||
initial_sync.register_servlets(self, resource)
|
||||
room_batch.register_servlets(self, resource)
|
||||
room_keys.register_servlets(self, resource)
|
||||
tags.register_servlets(self, resource)
|
||||
account_data.register_servlets(self, resource)
|
||||
@@ -320,9 +320,6 @@ class GenericWorkerServer(HomeServer):
|
||||
|
||||
presence.register_servlets(self, resource)
|
||||
|
||||
if self.config.experimental.groups_enabled:
|
||||
groups.register_servlets(self, resource)
|
||||
|
||||
resources.update({CLIENT_API_PREFIX: resource})
|
||||
|
||||
resources.update(build_synapse_client_resource_tree(self))
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from typing import Dict, Iterable, Iterator, List
|
||||
from typing import Dict, Iterable, List
|
||||
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
|
||||
@@ -45,7 +45,7 @@ from synapse.app._base import (
|
||||
redirect_stdio_to_logs,
|
||||
register_start,
|
||||
)
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config._base import ConfigError, format_config_error
|
||||
from synapse.config.emailconfig import ThreepidBehaviour
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.server import ListenerConfig
|
||||
@@ -399,38 +399,6 @@ def setup(config_options: List[str]) -> SynapseHomeServer:
|
||||
return hs
|
||||
|
||||
|
||||
def format_config_error(e: ConfigError) -> Iterator[str]:
|
||||
"""
|
||||
Formats a config error neatly
|
||||
|
||||
The idea is to format the immediate error, plus the "causes" of those errors,
|
||||
hopefully in a way that makes sense to the user. For example:
|
||||
|
||||
Error in configuration at 'oidc_config.user_mapping_provider.config.display_name_template':
|
||||
Failed to parse config for module 'JinjaOidcMappingProvider':
|
||||
invalid jinja template:
|
||||
unexpected end of template, expected 'end of print statement'.
|
||||
|
||||
Args:
|
||||
e: the error to be formatted
|
||||
|
||||
Returns: An iterator which yields string fragments to be formatted
|
||||
"""
|
||||
yield "Error in configuration"
|
||||
|
||||
if e.path:
|
||||
yield " at '%s'" % (".".join(e.path),)
|
||||
|
||||
yield ":\n %s" % (e.msg,)
|
||||
|
||||
parent_e = e.__cause__
|
||||
indent = 1
|
||||
while parent_e:
|
||||
indent += 1
|
||||
yield ":\n%s%s" % (" " * indent, str(parent_e))
|
||||
parent_e = parent_e.__cause__
|
||||
|
||||
|
||||
def run(hs: HomeServer) -> None:
|
||||
_base.start_reactor(
|
||||
"synapse-homeserver",
|
||||
|
||||
@@ -23,13 +23,7 @@ from netaddr import IPSet
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
GroupID,
|
||||
JsonDict,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
)
|
||||
from synapse.types import DeviceListUpdates, JsonDict, UserID
|
||||
from synapse.util.caches.descriptors import _CacheContext, cached
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -55,7 +49,6 @@ class ApplicationServiceState(Enum):
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class Namespace:
|
||||
exclusive: bool
|
||||
group_id: Optional[str]
|
||||
regex: Pattern[str]
|
||||
|
||||
|
||||
@@ -77,7 +70,6 @@ class ApplicationService:
|
||||
def __init__(
|
||||
self,
|
||||
token: str,
|
||||
hostname: str,
|
||||
id: str,
|
||||
sender: str,
|
||||
url: Optional[str] = None,
|
||||
@@ -95,7 +87,6 @@ class ApplicationService:
|
||||
) # url must not end with a slash
|
||||
self.hs_token = hs_token
|
||||
self.sender = sender
|
||||
self.server_name = hostname
|
||||
self.namespaces = self._check_namespaces(namespaces)
|
||||
self.id = id
|
||||
self.ip_range_whitelist = ip_range_whitelist
|
||||
@@ -141,30 +132,13 @@ class ApplicationService:
|
||||
exclusive = regex_obj.get("exclusive")
|
||||
if not isinstance(exclusive, bool):
|
||||
raise ValueError("Expected bool for 'exclusive' in ns '%s'" % ns)
|
||||
group_id = regex_obj.get("group_id")
|
||||
if group_id:
|
||||
if not isinstance(group_id, str):
|
||||
raise ValueError(
|
||||
"Expected string for 'group_id' in ns '%s'" % ns
|
||||
)
|
||||
try:
|
||||
GroupID.from_string(group_id)
|
||||
except Exception:
|
||||
raise ValueError(
|
||||
"Expected valid group ID for 'group_id' in ns '%s'" % ns
|
||||
)
|
||||
|
||||
if get_domain_from_id(group_id) != self.server_name:
|
||||
raise ValueError(
|
||||
"Expected 'group_id' to be this host in ns '%s'" % ns
|
||||
)
|
||||
|
||||
regex = regex_obj.get("regex")
|
||||
if not isinstance(regex, str):
|
||||
raise ValueError("Expected string for 'regex' in ns '%s'" % ns)
|
||||
|
||||
# Pre-compile regex.
|
||||
result[ns].append(Namespace(exclusive, group_id, re.compile(regex)))
|
||||
result[ns].append(Namespace(exclusive, re.compile(regex)))
|
||||
|
||||
return result
|
||||
|
||||
@@ -369,21 +343,6 @@ class ApplicationService:
|
||||
if namespace.exclusive
|
||||
]
|
||||
|
||||
def get_groups_for_user(self, user_id: str) -> Iterable[str]:
|
||||
"""Get the groups that this user is associated with by this AS
|
||||
|
||||
Args:
|
||||
user_id: The ID of the user.
|
||||
|
||||
Returns:
|
||||
An iterable that yields group_id strings.
|
||||
"""
|
||||
return (
|
||||
namespace.group_id
|
||||
for namespace in self.namespaces[ApplicationService.NS_USERS]
|
||||
if namespace.group_id and namespace.regex.match(user_id)
|
||||
)
|
||||
|
||||
def is_rate_limited(self) -> bool:
|
||||
return self.rate_limited
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import TypeGuard
|
||||
@@ -155,6 +155,9 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
if service.url is None:
|
||||
return []
|
||||
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
|
||||
uri = "%s%s/thirdparty/%s/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
@@ -162,7 +165,11 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
response = await self.get_json(uri, fields)
|
||||
args: Mapping[Any, Any] = {
|
||||
**fields,
|
||||
b"access_token": service.hs_token,
|
||||
}
|
||||
response = await self.get_json(uri, args=args)
|
||||
if not isinstance(response, list):
|
||||
logger.warning(
|
||||
"query_3pe to %s returned an invalid response %r", uri, response
|
||||
@@ -190,13 +197,15 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||
return {}
|
||||
|
||||
async def _get() -> Optional[JsonDict]:
|
||||
# This is required by the configuration.
|
||||
assert service.hs_token is not None
|
||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||
service.url,
|
||||
APP_SERVICE_PREFIX,
|
||||
urllib.parse.quote(protocol),
|
||||
)
|
||||
try:
|
||||
info = await self.get_json(uri)
|
||||
info = await self.get_json(uri, {"access_token": service.hs_token})
|
||||
|
||||
if not _is_valid_3pe_metadata(info):
|
||||
logger.warning(
|
||||
|
||||
@@ -384,6 +384,11 @@ class _TransactionController:
|
||||
device_list_summary: The device list summary to include in the transaction.
|
||||
"""
|
||||
try:
|
||||
service_is_up = await self._is_service_up(service)
|
||||
# Don't create empty txns when in recovery mode (ephemeral events are dropped)
|
||||
if not service_is_up and not events:
|
||||
return
|
||||
|
||||
txn = await self.store.create_appservice_txn(
|
||||
service=service,
|
||||
events=events,
|
||||
@@ -393,7 +398,6 @@ class _TransactionController:
|
||||
unused_fallback_keys=unused_fallback_keys or {},
|
||||
device_list_summary=device_list_summary or DeviceListUpdates(),
|
||||
)
|
||||
service_is_up = await self._is_service_up(service)
|
||||
if service_is_up:
|
||||
sent = await txn.send(self.as_api)
|
||||
if sent:
|
||||
|
||||
@@ -16,14 +16,18 @@
|
||||
|
||||
import argparse
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from hashlib import sha256
|
||||
from textwrap import dedent
|
||||
from typing import (
|
||||
Any,
|
||||
ClassVar,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
@@ -40,6 +44,8 @@ import yaml
|
||||
|
||||
from synapse.util.templates import _create_mxc_to_http_filter, _format_ts_filter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
"""Represents a problem parsing the configuration
|
||||
@@ -55,6 +61,38 @@ class ConfigError(Exception):
|
||||
self.path = path
|
||||
|
||||
|
||||
def format_config_error(e: ConfigError) -> Iterator[str]:
|
||||
"""
|
||||
Formats a config error neatly
|
||||
|
||||
The idea is to format the immediate error, plus the "causes" of those errors,
|
||||
hopefully in a way that makes sense to the user. For example:
|
||||
|
||||
Error in configuration at 'oidc_config.user_mapping_provider.config.display_name_template':
|
||||
Failed to parse config for module 'JinjaOidcMappingProvider':
|
||||
invalid jinja template:
|
||||
unexpected end of template, expected 'end of print statement'.
|
||||
|
||||
Args:
|
||||
e: the error to be formatted
|
||||
|
||||
Returns: An iterator which yields string fragments to be formatted
|
||||
"""
|
||||
yield "Error in configuration"
|
||||
|
||||
if e.path:
|
||||
yield " at '%s'" % (".".join(e.path),)
|
||||
|
||||
yield ":\n %s" % (e.msg,)
|
||||
|
||||
parent_e = e.__cause__
|
||||
indent = 1
|
||||
while parent_e:
|
||||
indent += 1
|
||||
yield ":\n%s%s" % (" " * indent, str(parent_e))
|
||||
parent_e = parent_e.__cause__
|
||||
|
||||
|
||||
# We split these messages out to allow packages to override with package
|
||||
# specific instructions.
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS = """\
|
||||
@@ -119,7 +157,7 @@ class Config:
|
||||
defined in subclasses.
|
||||
"""
|
||||
|
||||
section: str
|
||||
section: ClassVar[str]
|
||||
|
||||
def __init__(self, root_config: "RootConfig" = None):
|
||||
self.root = root_config
|
||||
@@ -309,9 +347,12 @@ class RootConfig:
|
||||
class, lower-cased and with "Config" removed.
|
||||
"""
|
||||
|
||||
config_classes = []
|
||||
config_classes: List[Type[Config]] = []
|
||||
|
||||
def __init__(self, config_files: Collection[str] = ()):
|
||||
# Capture absolute paths here, so we can reload config after we daemonize.
|
||||
self.config_files = [os.path.abspath(path) for path in config_files]
|
||||
|
||||
def __init__(self):
|
||||
for config_class in self.config_classes:
|
||||
if config_class.section is None:
|
||||
raise ValueError("%r requires a section name" % (config_class,))
|
||||
@@ -512,12 +553,10 @@ class RootConfig:
|
||||
object from parser.parse_args(..)`
|
||||
"""
|
||||
|
||||
obj = cls()
|
||||
|
||||
config_args = parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
obj = cls(config_files)
|
||||
if not config_files:
|
||||
parser.error("Must supply a config file.")
|
||||
|
||||
@@ -627,7 +666,7 @@ class RootConfig:
|
||||
|
||||
generate_missing_configs = config_args.generate_missing_configs
|
||||
|
||||
obj = cls()
|
||||
obj = cls(config_files)
|
||||
|
||||
if config_args.generate_config:
|
||||
if config_args.report_stats is None:
|
||||
@@ -727,6 +766,34 @@ class RootConfig:
|
||||
) -> None:
|
||||
self.invoke_all("generate_files", config_dict, config_dir_path)
|
||||
|
||||
def reload_config_section(self, section_name: str) -> Config:
|
||||
"""Reconstruct the given config section, leaving all others unchanged.
|
||||
|
||||
This works in three steps:
|
||||
|
||||
1. Create a new instance of the relevant `Config` subclass.
|
||||
2. Call `read_config` on that instance to parse the new config.
|
||||
3. Replace the existing config instance with the new one.
|
||||
|
||||
:raises ValueError: if the given `section` does not exist.
|
||||
:raises ConfigError: for any other problems reloading config.
|
||||
|
||||
:returns: the previous config object, which no longer has a reference to this
|
||||
RootConfig.
|
||||
"""
|
||||
existing_config: Optional[Config] = getattr(self, section_name, None)
|
||||
if existing_config is None:
|
||||
raise ValueError(f"Unknown config section '{section_name}'")
|
||||
logger.info("Reloading config section '%s'", section_name)
|
||||
|
||||
new_config_data = read_config_files(self.config_files)
|
||||
new_config = type(existing_config)(self)
|
||||
new_config.read_config(new_config_data)
|
||||
setattr(self, section_name, new_config)
|
||||
|
||||
existing_config.root = None
|
||||
return existing_config
|
||||
|
||||
|
||||
def read_config_files(config_files: Iterable[str]) -> Dict[str, Any]:
|
||||
"""Read the config files into a dict
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
import argparse
|
||||
from typing import (
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Literal,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
import jinja2
|
||||
@@ -28,7 +32,6 @@ from synapse.config import (
|
||||
emailconfig,
|
||||
experimental,
|
||||
federation,
|
||||
groups,
|
||||
jwt,
|
||||
key,
|
||||
logger,
|
||||
@@ -64,6 +67,8 @@ class ConfigError(Exception):
|
||||
self.msg = msg
|
||||
self.path = path
|
||||
|
||||
def format_config_error(e: ConfigError) -> Iterator[str]: ...
|
||||
|
||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS: str
|
||||
MISSING_REPORT_STATS_SPIEL: str
|
||||
MISSING_SERVER_NAME: str
|
||||
@@ -101,7 +106,6 @@ class RootConfig:
|
||||
push: push.PushConfig
|
||||
spamchecker: spam_checker.SpamCheckerConfig
|
||||
room: room.RoomConfig
|
||||
groups: groups.GroupsConfig
|
||||
userdirectory: user_directory.UserDirectoryConfig
|
||||
consent: consent.ConsentConfig
|
||||
stats: stats.StatsConfig
|
||||
@@ -117,7 +121,8 @@ class RootConfig:
|
||||
background_updates: background_updates.BackgroundUpdateConfig
|
||||
|
||||
config_classes: List[Type["Config"]] = ...
|
||||
def __init__(self) -> None: ...
|
||||
config_files: List[str]
|
||||
def __init__(self, config_files: Collection[str] = ...) -> None: ...
|
||||
def invoke_all(
|
||||
self, func_name: str, *args: Any, **kwargs: Any
|
||||
) -> MutableMapping[str, Any]: ...
|
||||
@@ -157,6 +162,12 @@ class RootConfig:
|
||||
def generate_missing_files(
|
||||
self, config_dict: dict, config_dir_path: str
|
||||
) -> None: ...
|
||||
@overload
|
||||
def reload_config_section(
|
||||
self, section_name: Literal["caches"]
|
||||
) -> cache.CacheConfig: ...
|
||||
@overload
|
||||
def reload_config_section(self, section_name: str) -> Config: ...
|
||||
|
||||
class Config:
|
||||
root: RootConfig
|
||||
|
||||
@@ -179,7 +179,6 @@ def _load_appservice(
|
||||
|
||||
return ApplicationService(
|
||||
token=as_info["as_token"],
|
||||
hostname=hostname,
|
||||
url=as_info["url"],
|
||||
namespaces=as_info["namespaces"],
|
||||
hs_token=as_info["hs_token"],
|
||||
|
||||
@@ -29,7 +29,18 @@ class AuthConfig(Config):
|
||||
if password_config is None:
|
||||
password_config = {}
|
||||
|
||||
self.password_enabled = password_config.get("enabled", True)
|
||||
passwords_enabled = password_config.get("enabled", True)
|
||||
# 'only_for_reauth' allows users who have previously set a password to use it,
|
||||
# even though passwords would otherwise be disabled.
|
||||
passwords_for_reauth_only = passwords_enabled == "only_for_reauth"
|
||||
|
||||
self.password_enabled_for_login = (
|
||||
passwords_enabled and not passwords_for_reauth_only
|
||||
)
|
||||
self.password_enabled_for_reauth = (
|
||||
passwords_for_reauth_only or passwords_enabled
|
||||
)
|
||||
|
||||
self.password_localdb_enabled = password_config.get("localdb_enabled", True)
|
||||
self.password_pepper = password_config.get("pepper", "")
|
||||
|
||||
@@ -46,7 +57,9 @@ class AuthConfig(Config):
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
password_config:
|
||||
# Uncomment to disable password login
|
||||
# Uncomment to disable password login.
|
||||
# Set to `only_for_reauth` to permit reauthentication for users that
|
||||
# have passwords and are already logged in.
|
||||
#
|
||||
#enabled: false
|
||||
|
||||
|
||||
@@ -69,11 +69,11 @@ def _canonicalise_cache_name(cache_name: str) -> str:
|
||||
def add_resizable_cache(
|
||||
cache_name: str, cache_resize_callback: Callable[[float], None]
|
||||
) -> None:
|
||||
"""Register a cache that's size can dynamically change
|
||||
"""Register a cache whose size can dynamically change
|
||||
|
||||
Args:
|
||||
cache_name: A reference to the cache
|
||||
cache_resize_callback: A callback function that will be ran whenever
|
||||
cache_resize_callback: A callback function that will run whenever
|
||||
the cache needs to be resized
|
||||
"""
|
||||
# Some caches have '*' in them which we strip out.
|
||||
@@ -96,6 +96,13 @@ class CacheConfig(Config):
|
||||
section = "caches"
|
||||
_environ = os.environ
|
||||
|
||||
event_cache_size: int
|
||||
cache_factors: Dict[str, float]
|
||||
global_factor: float
|
||||
track_memory_usage: bool
|
||||
expiry_time_msec: Optional[int]
|
||||
sync_response_cache_duration: int
|
||||
|
||||
@staticmethod
|
||||
def reset() -> None:
|
||||
"""Resets the caches to their defaults. Used for tests."""
|
||||
@@ -115,6 +122,12 @@ class CacheConfig(Config):
|
||||
# A cache 'factor' is a multiplier that can be applied to each of
|
||||
# Synapse's caches in order to increase or decrease the maximum
|
||||
# number of entries that can be stored.
|
||||
#
|
||||
# The configuration for cache factors (caches.global_factor and
|
||||
# caches.per_cache_factors) can be reloaded while the application is running,
|
||||
# by sending a SIGHUP signal to the Synapse process. Changes to other parts of
|
||||
# the caching config will NOT be applied after a SIGHUP is received; a restart
|
||||
# is necessary.
|
||||
|
||||
# The number of events to cache in memory. Not affected by
|
||||
# caches.global_factor.
|
||||
@@ -163,6 +176,24 @@ class CacheConfig(Config):
|
||||
#
|
||||
#cache_entry_ttl: 30m
|
||||
|
||||
# This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`,
|
||||
# `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain
|
||||
# a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize
|
||||
# this option, and all three of the options must be specified for this feature to work.
|
||||
#cache_autotuning:
|
||||
# This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted.
|
||||
# They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
|
||||
# the flag below, or until the `min_cache_ttl` is hit.
|
||||
#max_cache_memory_usage: 1024M
|
||||
|
||||
# This flag sets a rough target for the desired memory usage of the caches.
|
||||
#target_cache_memory_usage: 758M
|
||||
|
||||
# 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when
|
||||
# caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches
|
||||
# from being emptied while Synapse is evicting due to memory.
|
||||
#min_cache_ttl: 5m
|
||||
|
||||
# Controls how long the results of a /sync request are cached for after
|
||||
# a successful response is returned. A higher duration can help clients with
|
||||
# intermittent connections, at the cost of higher memory usage.
|
||||
@@ -174,21 +205,21 @@ class CacheConfig(Config):
|
||||
"""
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
"""Populate this config object with values from `config`.
|
||||
|
||||
This method does NOT resize existing or future caches: use `resize_all_caches`.
|
||||
We use two separate methods so that we can reject bad config before applying it.
|
||||
"""
|
||||
self.event_cache_size = self.parse_size(
|
||||
config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
|
||||
)
|
||||
self.cache_factors: Dict[str, float] = {}
|
||||
self.cache_factors = {}
|
||||
|
||||
cache_config = config.get("caches") or {}
|
||||
self.global_factor = cache_config.get(
|
||||
"global_factor", properties.default_factor_size
|
||||
)
|
||||
self.global_factor = cache_config.get("global_factor", _DEFAULT_FACTOR_SIZE)
|
||||
if not isinstance(self.global_factor, (int, float)):
|
||||
raise ConfigError("caches.global_factor must be a number.")
|
||||
|
||||
# Set the global one so that it's reflected in new caches
|
||||
properties.default_factor_size = self.global_factor
|
||||
|
||||
# Load cache factors from the config
|
||||
individual_factors = cache_config.get("per_cache_factors") or {}
|
||||
if not isinstance(individual_factors, dict):
|
||||
@@ -230,7 +261,7 @@ class CacheConfig(Config):
|
||||
cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m")
|
||||
|
||||
if expire_caches:
|
||||
self.expiry_time_msec: Optional[int] = self.parse_duration(cache_entry_ttl)
|
||||
self.expiry_time_msec = self.parse_duration(cache_entry_ttl)
|
||||
else:
|
||||
self.expiry_time_msec = None
|
||||
|
||||
@@ -250,23 +281,38 @@ class CacheConfig(Config):
|
||||
)
|
||||
self.expiry_time_msec = self.parse_duration(expiry_time)
|
||||
|
||||
self.cache_autotuning = cache_config.get("cache_autotuning")
|
||||
if self.cache_autotuning:
|
||||
max_memory_usage = self.cache_autotuning.get("max_cache_memory_usage")
|
||||
self.cache_autotuning["max_cache_memory_usage"] = self.parse_size(
|
||||
max_memory_usage
|
||||
)
|
||||
|
||||
target_mem_size = self.cache_autotuning.get("target_cache_memory_usage")
|
||||
self.cache_autotuning["target_cache_memory_usage"] = self.parse_size(
|
||||
target_mem_size
|
||||
)
|
||||
|
||||
min_cache_ttl = self.cache_autotuning.get("min_cache_ttl")
|
||||
self.cache_autotuning["min_cache_ttl"] = self.parse_duration(min_cache_ttl)
|
||||
|
||||
self.sync_response_cache_duration = self.parse_duration(
|
||||
cache_config.get("sync_response_cache_duration", 0)
|
||||
)
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
# Store this function so that it can be called from other classes without
|
||||
# needing an instance of Config
|
||||
properties.resize_all_caches_func = self.resize_all_caches
|
||||
|
||||
def resize_all_caches(self) -> None:
|
||||
"""Ensure all cache sizes are up to date
|
||||
"""Ensure all cache sizes are up-to-date.
|
||||
|
||||
For each cache, run the mapped callback function with either
|
||||
a specific cache factor or the default, global one.
|
||||
"""
|
||||
# Set the global factor size, so that new caches are appropriately sized.
|
||||
properties.default_factor_size = self.global_factor
|
||||
|
||||
# Store this function so that it can be called from other classes without
|
||||
# needing an instance of CacheConfig
|
||||
properties.resize_all_caches_func = self.resize_all_caches
|
||||
|
||||
# block other threads from modifying _CACHES while we iterate it.
|
||||
with _CACHES_LOCK:
|
||||
for cache_name, callback in _CACHES.items():
|
||||
|
||||
@@ -73,9 +73,6 @@ class ExperimentalConfig(Config):
|
||||
# MSC3720 (Account status endpoint)
|
||||
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
|
||||
|
||||
# The deprecated groups feature.
|
||||
self.groups_enabled: bool = experimental.get("groups_enabled", False)
|
||||
|
||||
# MSC2654: Unread counts
|
||||
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
|
||||
|
||||
@@ -84,3 +81,6 @@ class ExperimentalConfig(Config):
|
||||
|
||||
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
|
||||
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
|
||||
|
||||
# MSC3772: A push rule for mutual relations.
|
||||
self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
# Copyright 2017 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any
|
||||
|
||||
from synapse.types import JsonDict
|
||||
|
||||
from ._base import Config
|
||||
|
||||
|
||||
class GroupsConfig(Config):
|
||||
section = "groups"
|
||||
|
||||
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
|
||||
self.enable_group_creation = config.get("enable_group_creation", False)
|
||||
self.group_creation_prefix = config.get("group_creation_prefix", "")
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
# Uncomment to allow non-server-admin users to create groups on this server
|
||||
#
|
||||
#enable_group_creation: true
|
||||
|
||||
# If enabled, non server admins can only create groups with local parts
|
||||
# starting with this prefix
|
||||
#
|
||||
#group_creation_prefix: "unofficial_"
|
||||
"""
|
||||
@@ -25,7 +25,6 @@ from .database import DatabaseConfig
|
||||
from .emailconfig import EmailConfig
|
||||
from .experimental import ExperimentalConfig
|
||||
from .federation import FederationConfig
|
||||
from .groups import GroupsConfig
|
||||
from .jwt import JWTConfig
|
||||
from .key import KeyConfig
|
||||
from .logger import LoggingConfig
|
||||
@@ -89,7 +88,6 @@ class HomeServerConfig(RootConfig):
|
||||
PushConfig,
|
||||
SpamCheckerConfig,
|
||||
RoomConfig,
|
||||
GroupsConfig,
|
||||
UserDirectoryConfig,
|
||||
ConsentConfig,
|
||||
StatsConfig,
|
||||
|
||||
@@ -57,9 +57,9 @@ class OembedConfig(Config):
|
||||
"""
|
||||
# Whether to use the packaged providers.json file.
|
||||
if not oembed_config.get("disable_default_providers") or False:
|
||||
providers = json.load(
|
||||
pkg_resources.resource_stream("synapse", "res/providers.json")
|
||||
)
|
||||
with pkg_resources.resource_stream("synapse", "res/providers.json") as s:
|
||||
providers = json.load(s)
|
||||
|
||||
yield from self._parse_and_validate_provider(
|
||||
providers, config_path=("oembed",)
|
||||
)
|
||||
|
||||
@@ -223,6 +223,22 @@ class ContentRepositoryConfig(Config):
|
||||
"url_preview_accept_language"
|
||||
) or ["en"]
|
||||
|
||||
media_retention = config.get("media_retention") or {}
|
||||
|
||||
self.media_retention_local_media_lifetime_ms = None
|
||||
local_media_lifetime = media_retention.get("local_media_lifetime")
|
||||
if local_media_lifetime is not None:
|
||||
self.media_retention_local_media_lifetime_ms = self.parse_duration(
|
||||
local_media_lifetime
|
||||
)
|
||||
|
||||
self.media_retention_remote_media_lifetime_ms = None
|
||||
remote_media_lifetime = media_retention.get("remote_media_lifetime")
|
||||
if remote_media_lifetime is not None:
|
||||
self.media_retention_remote_media_lifetime_ms = self.parse_duration(
|
||||
remote_media_lifetime
|
||||
)
|
||||
|
||||
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
|
||||
assert data_dir_path is not None
|
||||
media_store = os.path.join(data_dir_path, "media_store")
|
||||
|
||||
@@ -63,6 +63,19 @@ class RoomConfig(Config):
|
||||
"Invalid value for encryption_enabled_by_default_for_room_type"
|
||||
)
|
||||
|
||||
self.default_power_level_content_override = config.get(
|
||||
"default_power_level_content_override",
|
||||
None,
|
||||
)
|
||||
if self.default_power_level_content_override is not None:
|
||||
for preset in self.default_power_level_content_override:
|
||||
if preset not in vars(RoomCreationPreset).values():
|
||||
raise ConfigError(
|
||||
"Unrecognised room preset %s in default_power_level_content_override"
|
||||
% preset
|
||||
)
|
||||
# We validate the actual overrides when we try to apply them.
|
||||
|
||||
def generate_config_section(self, **kwargs: Any) -> str:
|
||||
return """\
|
||||
## Rooms ##
|
||||
@@ -83,4 +96,38 @@ class RoomConfig(Config):
|
||||
# will also not affect rooms created by other servers.
|
||||
#
|
||||
#encryption_enabled_by_default_for_room_type: invite
|
||||
|
||||
# Override the default power levels for rooms created on this server, per
|
||||
# room creation preset.
|
||||
#
|
||||
# The appropriate dictionary for the room preset will be applied on top
|
||||
# of the existing power levels content.
|
||||
#
|
||||
# Useful if you know that your users need special permissions in rooms
|
||||
# that they create (e.g. to send particular types of state events without
|
||||
# needing an elevated power level). This takes the same shape as the
|
||||
# `power_level_content_override` parameter in the /createRoom API, but
|
||||
# is applied before that parameter.
|
||||
#
|
||||
# Valid keys are some or all of `private_chat`, `trusted_private_chat`
|
||||
# and `public_chat`. Inside each of those should be any of the
|
||||
# properties allowed in `power_level_content_override` in the
|
||||
# /createRoom API. If any property is missing, its default value will
|
||||
# continue to be used. If any property is present, it will overwrite
|
||||
# the existing default completely (so if the `events` property exists,
|
||||
# the default event power levels will be ignored).
|
||||
#
|
||||
#default_power_level_content_override:
|
||||
# private_chat:
|
||||
# "events":
|
||||
# "com.example.myeventtype" : 0
|
||||
# "m.room.avatar": 50
|
||||
# "m.room.canonical_alias": 50
|
||||
# "m.room.encryption": 100
|
||||
# "m.room.history_visibility": 100
|
||||
# "m.room.name": 50
|
||||
# "m.room.power_levels": 100
|
||||
# "m.room.server_acl": 100
|
||||
# "m.room.tombstone": 100
|
||||
# "events_default": 1
|
||||
"""
|
||||
|
||||
@@ -679,6 +679,17 @@ class ServerConfig(Config):
|
||||
config.get("exclude_rooms_from_sync") or []
|
||||
)
|
||||
|
||||
delete_stale_devices_after: Optional[str] = (
|
||||
config.get("delete_stale_devices_after") or None
|
||||
)
|
||||
|
||||
if delete_stale_devices_after is not None:
|
||||
self.delete_stale_devices_after: Optional[int] = self.parse_duration(
|
||||
delete_stale_devices_after
|
||||
)
|
||||
else:
|
||||
self.delete_stale_devices_after = None
|
||||
|
||||
def has_tls_listener(self) -> bool:
|
||||
return any(listener.tls for listener in self.listeners)
|
||||
|
||||
@@ -996,7 +1007,7 @@ class ServerConfig(Config):
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
#
|
||||
# keys: the key discovery API (/_matrix/keys).
|
||||
# keys: the key discovery API (/_matrix/key).
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Set
|
||||
from typing import Any, List, Set
|
||||
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import DependencyException, check_requirements
|
||||
@@ -49,7 +49,9 @@ class TracerConfig(Config):
|
||||
|
||||
# The tracer is enabled so sanitize the config
|
||||
|
||||
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
|
||||
self.opentracer_whitelist: List[str] = opentracing_config.get(
|
||||
"homeserver_whitelist", []
|
||||
)
|
||||
if not isinstance(self.opentracer_whitelist, list):
|
||||
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
||||
|
||||
|
||||
@@ -414,7 +414,12 @@ def _is_membership_change_allowed(
|
||||
raise AuthError(403, "You are banned from this room")
|
||||
elif join_rule == JoinRules.PUBLIC:
|
||||
pass
|
||||
elif room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED:
|
||||
elif (
|
||||
room_version.msc3083_join_rules and join_rule == JoinRules.RESTRICTED
|
||||
) or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
# This is the same as public, but the event must contain a reference
|
||||
# to the server who authorised the join. If the event does not contain
|
||||
# the proper content it is rejected.
|
||||
@@ -440,8 +445,13 @@ def _is_membership_change_allowed(
|
||||
if authorising_user_level < invite_level:
|
||||
raise AuthError(403, "Join event authorised by invalid server.")
|
||||
|
||||
elif join_rule == JoinRules.INVITE or (
|
||||
room_version.msc2403_knocking and join_rule == JoinRules.KNOCK
|
||||
elif (
|
||||
join_rule == JoinRules.INVITE
|
||||
or (room_version.msc2403_knocking and join_rule == JoinRules.KNOCK)
|
||||
or (
|
||||
room_version.msc3787_knock_restricted_join_rule
|
||||
and join_rule == JoinRules.KNOCK_RESTRICTED
|
||||
)
|
||||
):
|
||||
if not caller_in_room and not caller_invited:
|
||||
raise AuthError(403, "You are not invited to this room.")
|
||||
@@ -462,7 +472,10 @@ def _is_membership_change_allowed(
|
||||
if user_level < ban_level or user_level <= target_level:
|
||||
raise AuthError(403, "You don't have permission to ban")
|
||||
elif room_version.msc2403_knocking and Membership.KNOCK == membership:
|
||||
if join_rule != JoinRules.KNOCK:
|
||||
if join_rule != JoinRules.KNOCK and (
|
||||
not room_version.msc3787_knock_restricted_join_rule
|
||||
or join_rule != JoinRules.KNOCK_RESTRICTED
|
||||
):
|
||||
raise AuthError(403, "You don't have permission to knock")
|
||||
elif target_user_id != event.user_id:
|
||||
raise AuthError(403, "You cannot knock for other users")
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import collections.abc
|
||||
import os
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
@@ -32,9 +33,11 @@ from typing import (
|
||||
overload,
|
||||
)
|
||||
|
||||
import attr
|
||||
from typing_extensions import Literal
|
||||
from unpaddedbase64 import encode_base64
|
||||
|
||||
from synapse.api.constants import RelationTypes
|
||||
from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions
|
||||
from synapse.types import JsonDict, RoomStreamToken
|
||||
from synapse.util.caches import intern_dict
|
||||
@@ -615,3 +618,45 @@ def make_event_from_dict(
|
||||
return event_type(
|
||||
event_dict, room_version, internal_metadata_dict or {}, rejected_reason
|
||||
)
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _EventRelation:
|
||||
# The target event of the relation.
|
||||
parent_id: str
|
||||
# The relation type.
|
||||
rel_type: str
|
||||
# The aggregation key. Will be None if the rel_type is not m.annotation or is
|
||||
# not a string.
|
||||
aggregation_key: Optional[str]
|
||||
|
||||
|
||||
def relation_from_event(event: EventBase) -> Optional[_EventRelation]:
|
||||
"""
|
||||
Attempt to parse relation information an event.
|
||||
|
||||
Returns:
|
||||
The event relation information, if it is valid. None, otherwise.
|
||||
"""
|
||||
relation = event.content.get("m.relates_to")
|
||||
if not relation or not isinstance(relation, collections.abc.Mapping):
|
||||
# No relation information.
|
||||
return None
|
||||
|
||||
# Relations must have a type and parent event ID.
|
||||
rel_type = relation.get("rel_type")
|
||||
if not isinstance(rel_type, str):
|
||||
return None
|
||||
|
||||
parent_id = relation.get("event_id")
|
||||
if not isinstance(parent_id, str):
|
||||
return None
|
||||
|
||||
# Annotations have a key field.
|
||||
aggregation_key = None
|
||||
if rel_type == RelationTypes.ANNOTATION:
|
||||
aggregation_key = relation.get("key")
|
||||
if not isinstance(aggregation_key, str):
|
||||
aggregation_key = None
|
||||
|
||||
return _EventRelation(parent_id, rel_type, aggregation_key)
|
||||
|
||||
@@ -15,17 +15,16 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from frozendict import frozendict
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
from typing_extensions import Literal
|
||||
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.types import JsonDict, StateMap
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.storage import Storage
|
||||
from synapse.storage.controllers import StorageControllers
|
||||
from synapse.storage.databases.main import DataStore
|
||||
from synapse.storage.state import StateFilter
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
@@ -60,6 +59,9 @@ class EventContext:
|
||||
If ``state_group`` is None (ie, the event is an outlier),
|
||||
``state_group_before_event`` will always also be ``None``.
|
||||
|
||||
state_delta_due_to_event: If `state_group` and `state_group_before_event` are not None
|
||||
then this is the delta of the state between the two groups.
|
||||
|
||||
prev_group: If it is known, ``state_group``'s prev_group. Note that this being
|
||||
None does not necessarily mean that ``state_group`` does not have
|
||||
a prev_group!
|
||||
@@ -78,73 +80,47 @@ class EventContext:
|
||||
app_service: If this event is being sent by a (local) application service, that
|
||||
app service.
|
||||
|
||||
_current_state_ids: The room state map, including this event - ie, the state
|
||||
in ``state_group``.
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
For an outlier, this is {}
|
||||
|
||||
Note that this is a private attribute: it should be accessed via
|
||||
``get_current_state_ids``. _AsyncEventContext impl calculates this
|
||||
on-demand: it will be None until that happens.
|
||||
|
||||
_prev_state_ids: The room state map, excluding this event - ie, the state
|
||||
in ``state_group_before_event``. For a non-state
|
||||
event, this will be the same as _current_state_events.
|
||||
|
||||
Note that it is a completely different thing to prev_group!
|
||||
|
||||
(type, state_key) -> event_id
|
||||
|
||||
For an outlier, this is {}
|
||||
|
||||
As with _current_state_ids, this is a private attribute. It should be
|
||||
accessed via get_prev_state_ids.
|
||||
|
||||
partial_state: if True, we may be storing this event with a temporary,
|
||||
incomplete state.
|
||||
"""
|
||||
|
||||
rejected: Union[bool, str] = False
|
||||
_storage: "StorageControllers"
|
||||
rejected: Union[Literal[False], str] = False
|
||||
_state_group: Optional[int] = None
|
||||
state_group_before_event: Optional[int] = None
|
||||
_state_delta_due_to_event: Optional[StateMap[str]] = None
|
||||
prev_group: Optional[int] = None
|
||||
delta_ids: Optional[StateMap[str]] = None
|
||||
app_service: Optional[ApplicationService] = None
|
||||
|
||||
_current_state_ids: Optional[StateMap[str]] = None
|
||||
_prev_state_ids: Optional[StateMap[str]] = None
|
||||
|
||||
partial_state: bool = False
|
||||
|
||||
@staticmethod
|
||||
def with_state(
|
||||
storage: "StorageControllers",
|
||||
state_group: Optional[int],
|
||||
state_group_before_event: Optional[int],
|
||||
current_state_ids: Optional[StateMap[str]],
|
||||
prev_state_ids: Optional[StateMap[str]],
|
||||
state_delta_due_to_event: Optional[StateMap[str]],
|
||||
partial_state: bool,
|
||||
prev_group: Optional[int] = None,
|
||||
delta_ids: Optional[StateMap[str]] = None,
|
||||
) -> "EventContext":
|
||||
return EventContext(
|
||||
current_state_ids=current_state_ids,
|
||||
prev_state_ids=prev_state_ids,
|
||||
storage=storage,
|
||||
state_group=state_group,
|
||||
state_group_before_event=state_group_before_event,
|
||||
state_delta_due_to_event=state_delta_due_to_event,
|
||||
prev_group=prev_group,
|
||||
delta_ids=delta_ids,
|
||||
partial_state=partial_state,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def for_outlier() -> "EventContext":
|
||||
def for_outlier(
|
||||
storage: "StorageControllers",
|
||||
) -> "EventContext":
|
||||
"""Return an EventContext instance suitable for persisting an outlier event"""
|
||||
return EventContext(
|
||||
current_state_ids={},
|
||||
prev_state_ids={},
|
||||
)
|
||||
return EventContext(storage=storage)
|
||||
|
||||
async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict:
|
||||
"""Converts self to a type that can be serialized as JSON, and then
|
||||
@@ -157,31 +133,21 @@ class EventContext:
|
||||
The serialized event.
|
||||
"""
|
||||
|
||||
# We don't serialize the full state dicts, instead they get pulled out
|
||||
# of the DB on the other side. However, the other side can't figure out
|
||||
# the prev_state_ids, so if we're a state event we include the event
|
||||
# id that we replaced in the state.
|
||||
if event.is_state():
|
||||
prev_state_ids = await self.get_prev_state_ids()
|
||||
prev_state_id = prev_state_ids.get((event.type, event.state_key))
|
||||
else:
|
||||
prev_state_id = None
|
||||
|
||||
return {
|
||||
"prev_state_id": prev_state_id,
|
||||
"event_type": event.type,
|
||||
"event_state_key": event.get_state_key(),
|
||||
"state_group": self._state_group,
|
||||
"state_group_before_event": self.state_group_before_event,
|
||||
"rejected": self.rejected,
|
||||
"prev_group": self.prev_group,
|
||||
"state_delta_due_to_event": _encode_state_dict(
|
||||
self._state_delta_due_to_event
|
||||
),
|
||||
"delta_ids": _encode_state_dict(self.delta_ids),
|
||||
"app_service_id": self.app_service.id if self.app_service else None,
|
||||
"partial_state": self.partial_state,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def deserialize(storage: "Storage", input: JsonDict) -> "EventContext":
|
||||
def deserialize(storage: "StorageControllers", input: JsonDict) -> "EventContext":
|
||||
"""Converts a dict that was produced by `serialize` back into a
|
||||
EventContext.
|
||||
|
||||
@@ -192,16 +158,16 @@ class EventContext:
|
||||
Returns:
|
||||
The event context.
|
||||
"""
|
||||
context = _AsyncEventContextImpl(
|
||||
context = EventContext(
|
||||
# We use the state_group and prev_state_id stuff to pull the
|
||||
# current_state_ids out of the DB and construct prev_state_ids.
|
||||
storage=storage,
|
||||
prev_state_id=input["prev_state_id"],
|
||||
event_type=input["event_type"],
|
||||
event_state_key=input["event_state_key"],
|
||||
state_group=input["state_group"],
|
||||
state_group_before_event=input["state_group_before_event"],
|
||||
prev_group=input["prev_group"],
|
||||
state_delta_due_to_event=_decode_state_dict(
|
||||
input["state_delta_due_to_event"]
|
||||
),
|
||||
delta_ids=_decode_state_dict(input["delta_ids"]),
|
||||
rejected=input["rejected"],
|
||||
partial_state=input.get("partial_state", False),
|
||||
@@ -231,7 +197,9 @@ class EventContext:
|
||||
|
||||
return self._state_group
|
||||
|
||||
async def get_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
async def get_current_state_ids(
|
||||
self, state_filter: Optional["StateFilter"] = None
|
||||
) -> Optional[StateMap[str]]:
|
||||
"""
|
||||
Gets the room state map, including this event - ie, the state in ``state_group``
|
||||
|
||||
@@ -239,6 +207,9 @@ class EventContext:
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Arg:
|
||||
state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules
|
||||
|
||||
Returns:
|
||||
Returns None if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
@@ -249,15 +220,27 @@ class EventContext:
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
await self._ensure_fetched()
|
||||
return self._current_state_ids
|
||||
assert self._state_delta_due_to_event is not None
|
||||
|
||||
async def get_prev_state_ids(self) -> StateMap[str]:
|
||||
prev_state_ids = await self.get_prev_state_ids(state_filter)
|
||||
|
||||
if self._state_delta_due_to_event:
|
||||
prev_state_ids = dict(prev_state_ids)
|
||||
prev_state_ids.update(self._state_delta_due_to_event)
|
||||
|
||||
return prev_state_ids
|
||||
|
||||
async def get_prev_state_ids(
|
||||
self, state_filter: Optional["StateFilter"] = None
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Gets the room state map, excluding this event.
|
||||
|
||||
For a non-state event, this will be the same as get_current_state_ids().
|
||||
|
||||
Args:
|
||||
state_filter: specifies the type of state event to fetch from DB, example: EventTypes.JoinRules
|
||||
|
||||
Returns:
|
||||
Returns {} if state_group is None, which happens when the associated
|
||||
event is an outlier.
|
||||
@@ -265,94 +248,10 @@ class EventContext:
|
||||
Maps a (type, state_key) to the event ID of the state event matching
|
||||
this tuple.
|
||||
"""
|
||||
await self._ensure_fetched()
|
||||
# There *should* be previous state IDs now.
|
||||
assert self._prev_state_ids is not None
|
||||
return self._prev_state_ids
|
||||
|
||||
def get_cached_current_state_ids(self) -> Optional[StateMap[str]]:
|
||||
"""Gets the current state IDs if we have them already cached.
|
||||
|
||||
It is an error to access this for a rejected event, since rejected state should
|
||||
not make it into the room state. This method will raise an exception if
|
||||
``rejected`` is set.
|
||||
|
||||
Returns:
|
||||
Returns None if we haven't cached the state or if state_group is None
|
||||
(which happens when the associated event is an outlier).
|
||||
|
||||
Otherwise, returns the the current state IDs.
|
||||
"""
|
||||
if self.rejected:
|
||||
raise RuntimeError("Attempt to access state_ids of rejected event")
|
||||
|
||||
return self._current_state_ids
|
||||
|
||||
async def _ensure_fetched(self) -> None:
|
||||
return None
|
||||
|
||||
|
||||
@attr.s(slots=True)
|
||||
class _AsyncEventContextImpl(EventContext):
|
||||
"""
|
||||
An implementation of EventContext which fetches _current_state_ids and
|
||||
_prev_state_ids from the database on demand.
|
||||
|
||||
Attributes:
|
||||
|
||||
_storage
|
||||
|
||||
_fetching_state_deferred: Resolves when *_state_ids have been calculated.
|
||||
None if we haven't started calculating yet
|
||||
|
||||
_event_type: The type of the event the context is associated with.
|
||||
|
||||
_event_state_key: The state_key of the event the context is associated with.
|
||||
|
||||
_prev_state_id: If the event associated with the context is a state event,
|
||||
then `_prev_state_id` is the event_id of the state that was replaced.
|
||||
"""
|
||||
|
||||
# This needs to have a default as we're inheriting
|
||||
_storage: "Storage" = attr.ib(default=None)
|
||||
_prev_state_id: Optional[str] = attr.ib(default=None)
|
||||
_event_type: str = attr.ib(default=None)
|
||||
_event_state_key: Optional[str] = attr.ib(default=None)
|
||||
_fetching_state_deferred: Optional["Deferred[None]"] = attr.ib(default=None)
|
||||
|
||||
async def _ensure_fetched(self) -> None:
|
||||
if not self._fetching_state_deferred:
|
||||
self._fetching_state_deferred = run_in_background(self._fill_out_state)
|
||||
|
||||
await make_deferred_yieldable(self._fetching_state_deferred)
|
||||
|
||||
async def _fill_out_state(self) -> None:
|
||||
"""Called to populate the _current_state_ids and _prev_state_ids
|
||||
attributes by loading from the database.
|
||||
"""
|
||||
if self.state_group is None:
|
||||
# No state group means the event is an outlier. Usually the state_ids dicts are also
|
||||
# pre-set to empty dicts, but they get reset when the context is serialized, so set
|
||||
# them to empty dicts again here.
|
||||
self._current_state_ids = {}
|
||||
self._prev_state_ids = {}
|
||||
return
|
||||
|
||||
current_state_ids = await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group
|
||||
assert self.state_group_before_event is not None
|
||||
return await self._storage.state.get_state_ids_for_group(
|
||||
self.state_group_before_event, state_filter
|
||||
)
|
||||
# Set this separately so mypy knows current_state_ids is not None.
|
||||
self._current_state_ids = current_state_ids
|
||||
if self._event_state_key is not None:
|
||||
self._prev_state_ids = dict(current_state_ids)
|
||||
|
||||
key = (self._event_type, self._event_state_key)
|
||||
if self._prev_state_id:
|
||||
self._prev_state_ids[key] = self._prev_state_id
|
||||
else:
|
||||
self._prev_state_ids.pop(key, None)
|
||||
else:
|
||||
self._prev_state_ids = current_state_ids
|
||||
|
||||
|
||||
def _encode_state_dict(
|
||||
|
||||
@@ -21,17 +21,20 @@ from typing import (
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
)
|
||||
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.rest.media.v1._base import FileInfo
|
||||
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
|
||||
from synapse.spam_checker_api import RegistrationBehaviour
|
||||
from synapse.types import RoomAlias, UserProfile
|
||||
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import synapse.events
|
||||
@@ -40,6 +43,22 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[
|
||||
Union[
|
||||
str,
|
||||
Codes,
|
||||
# Highly experimental, not officially part of the spamchecker API, may
|
||||
# disappear without warning depending on the results of ongoing
|
||||
# experiments.
|
||||
# Use this to return additional information as part of an error.
|
||||
Tuple[Codes, Dict],
|
||||
# Deprecated
|
||||
bool,
|
||||
]
|
||||
],
|
||||
]
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[
|
||||
["synapse.events.EventBase"],
|
||||
Awaitable[Union[bool, str]],
|
||||
]
|
||||
@@ -162,8 +181,16 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None:
|
||||
|
||||
|
||||
class SpamChecker:
|
||||
def __init__(self) -> None:
|
||||
NOT_SPAM = "NOT_SPAM"
|
||||
|
||||
def __init__(self, hs: "synapse.server.HomeServer") -> None:
|
||||
self.hs = hs
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = []
|
||||
self._should_drop_federated_event_callbacks: List[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = []
|
||||
self._user_may_join_room_callbacks: List[USER_MAY_JOIN_ROOM_CALLBACK] = []
|
||||
self._user_may_invite_callbacks: List[USER_MAY_INVITE_CALLBACK] = []
|
||||
self._user_may_send_3pid_invite_callbacks: List[
|
||||
@@ -187,6 +214,9 @@ class SpamChecker:
|
||||
def register_callbacks(
|
||||
self,
|
||||
check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None,
|
||||
should_drop_federated_event: Optional[
|
||||
SHOULD_DROP_FEDERATED_EVENT_CALLBACK
|
||||
] = None,
|
||||
user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None,
|
||||
user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None,
|
||||
user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None,
|
||||
@@ -205,6 +235,11 @@ class SpamChecker:
|
||||
if check_event_for_spam is not None:
|
||||
self._check_event_for_spam_callbacks.append(check_event_for_spam)
|
||||
|
||||
if should_drop_federated_event is not None:
|
||||
self._should_drop_federated_event_callbacks.append(
|
||||
should_drop_federated_event
|
||||
)
|
||||
|
||||
if user_may_join_room is not None:
|
||||
self._user_may_join_room_callbacks.append(user_may_join_room)
|
||||
|
||||
@@ -240,7 +275,7 @@ class SpamChecker:
|
||||
|
||||
async def check_event_for_spam(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[bool, str]:
|
||||
) -> Union[Tuple[Codes, Dict], str]:
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
||||
If the server considers an event spammy, then it will be rejected if
|
||||
@@ -251,11 +286,65 @@ class SpamChecker:
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
True or a string if the event is spammy. If a string is returned it
|
||||
will be used as the error message returned to the user.
|
||||
- `NOT_SPAM` if the event is considered good (non-spammy) and should be let
|
||||
through. Other spamcheck filters may still reject it.
|
||||
- A `Code` if the event is considered spammy and is rejected with a specific
|
||||
error message/code.
|
||||
- A string that isn't `NOT_SPAM` if the event is considered spammy and the
|
||||
string should be used as the client-facing error message. This usage is
|
||||
generally discouraged as it doesn't support internationalization.
|
||||
"""
|
||||
for callback in self._check_event_for_spam_callbacks:
|
||||
res: Union[bool, str] = await delay_cancellation(callback(event))
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
res = await delay_cancellation(callback(event))
|
||||
if res is False or res == self.NOT_SPAM:
|
||||
# This spam-checker accepts the event.
|
||||
# Other spam-checkers may reject it, though.
|
||||
continue
|
||||
elif res is True:
|
||||
# This spam-checker rejects the event with deprecated
|
||||
# return value `True`
|
||||
return Codes.FORBIDDEN
|
||||
elif not isinstance(res, str):
|
||||
# mypy complains that we can't reach this code because of the
|
||||
# return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know
|
||||
# for sure that the module actually returns it.
|
||||
logger.warning(
|
||||
"Module returned invalid value, rejecting message as spam"
|
||||
)
|
||||
res = "This message has been rejected as probable spam"
|
||||
else:
|
||||
# The module rejected the event either with a `Codes`
|
||||
# or some other `str`. In either case, we stop here.
|
||||
pass
|
||||
|
||||
return res
|
||||
|
||||
# No spam-checker has rejected the event, let it pass.
|
||||
return self.NOT_SPAM
|
||||
|
||||
async def should_drop_federated_event(
|
||||
self, event: "synapse.events.EventBase"
|
||||
) -> Union[bool, str]:
|
||||
"""Checks if a given federated event is considered "spammy" by this
|
||||
server.
|
||||
|
||||
If the server considers an event spammy, it will be silently dropped,
|
||||
and in doing so will split-brain our view of the room's DAG.
|
||||
|
||||
Args:
|
||||
event: the event to be checked
|
||||
|
||||
Returns:
|
||||
True if the event should be silently dropped
|
||||
"""
|
||||
for callback in self._should_drop_federated_event_callbacks:
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
res: Union[bool, str] = await delay_cancellation(callback(event))
|
||||
if res:
|
||||
return res
|
||||
|
||||
@@ -276,9 +365,12 @@ class SpamChecker:
|
||||
Whether the user may join the room
|
||||
"""
|
||||
for callback in self._user_may_join_room_callbacks:
|
||||
may_join_room = await delay_cancellation(
|
||||
callback(user_id, room_id, is_invited)
|
||||
)
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_join_room = await delay_cancellation(
|
||||
callback(user_id, room_id, is_invited)
|
||||
)
|
||||
if may_join_room is False:
|
||||
return False
|
||||
|
||||
@@ -300,9 +392,12 @@ class SpamChecker:
|
||||
True if the user may send an invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_invite_callbacks:
|
||||
may_invite = await delay_cancellation(
|
||||
callback(inviter_userid, invitee_userid, room_id)
|
||||
)
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_invite = await delay_cancellation(
|
||||
callback(inviter_userid, invitee_userid, room_id)
|
||||
)
|
||||
if may_invite is False:
|
||||
return False
|
||||
|
||||
@@ -328,9 +423,12 @@ class SpamChecker:
|
||||
True if the user may send the invite, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_send_3pid_invite_callbacks:
|
||||
may_send_3pid_invite = await delay_cancellation(
|
||||
callback(inviter_userid, medium, address, room_id)
|
||||
)
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_send_3pid_invite = await delay_cancellation(
|
||||
callback(inviter_userid, medium, address, room_id)
|
||||
)
|
||||
if may_send_3pid_invite is False:
|
||||
return False
|
||||
|
||||
@@ -348,7 +446,10 @@ class SpamChecker:
|
||||
True if the user may create a room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_callbacks:
|
||||
may_create_room = await delay_cancellation(callback(userid))
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_create_room = await delay_cancellation(callback(userid))
|
||||
if may_create_room is False:
|
||||
return False
|
||||
|
||||
@@ -369,9 +470,12 @@ class SpamChecker:
|
||||
True if the user may create a room alias, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_create_room_alias_callbacks:
|
||||
may_create_room_alias = await delay_cancellation(
|
||||
callback(userid, room_alias)
|
||||
)
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_create_room_alias = await delay_cancellation(
|
||||
callback(userid, room_alias)
|
||||
)
|
||||
if may_create_room_alias is False:
|
||||
return False
|
||||
|
||||
@@ -390,7 +494,10 @@ class SpamChecker:
|
||||
True if the user may publish the room, otherwise False
|
||||
"""
|
||||
for callback in self._user_may_publish_room_callbacks:
|
||||
may_publish_room = await delay_cancellation(callback(userid, room_id))
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
may_publish_room = await delay_cancellation(callback(userid, room_id))
|
||||
if may_publish_room is False:
|
||||
return False
|
||||
|
||||
@@ -412,9 +519,13 @@ class SpamChecker:
|
||||
True if the user is spammy.
|
||||
"""
|
||||
for callback in self._check_username_for_spam_callbacks:
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
if await delay_cancellation(callback(user_profile.copy())):
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
# Make a copy of the user profile object to ensure the spam checker cannot
|
||||
# modify it.
|
||||
res = await delay_cancellation(callback(user_profile.copy()))
|
||||
if res:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -442,9 +553,12 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_registration_for_spam_callbacks:
|
||||
behaviour = await delay_cancellation(
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
behaviour = await delay_cancellation(
|
||||
callback(email_threepid, username, request_info, auth_provider_id)
|
||||
)
|
||||
assert isinstance(behaviour, RegistrationBehaviour)
|
||||
if behaviour != RegistrationBehaviour.ALLOW:
|
||||
return behaviour
|
||||
@@ -486,7 +600,10 @@ class SpamChecker:
|
||||
"""
|
||||
|
||||
for callback in self._check_media_file_for_spam_callbacks:
|
||||
spam = await delay_cancellation(callback(file_wrapper, file_info))
|
||||
with Measure(
|
||||
self.clock, "{}.{}".format(callback.__module__, callback.__qualname__)
|
||||
):
|
||||
spam = await delay_cancellation(callback(file_wrapper, file_info))
|
||||
if spam:
|
||||
return True
|
||||
|
||||
|
||||
@@ -152,6 +152,7 @@ class ThirdPartyEventRules:
|
||||
self.third_party_rules = None
|
||||
|
||||
self.store = hs.get_datastores().main
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = []
|
||||
self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = []
|
||||
@@ -463,7 +464,7 @@ class ThirdPartyEventRules:
|
||||
Returns:
|
||||
A dict mapping (event type, state key) to state event.
|
||||
"""
|
||||
state_ids = await self.store.get_filtered_current_state_ids(room_id)
|
||||
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
|
||||
room_state_events = await self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import collections.abc
|
||||
from typing import Iterable, Type, Union
|
||||
from typing import Iterable, Type, Union, cast
|
||||
|
||||
import jsonschema
|
||||
|
||||
@@ -103,7 +103,12 @@ class EventValidator:
|
||||
except jsonschema.ValidationError as e:
|
||||
if e.path:
|
||||
# example: "users_default": '0' is not of type 'integer'
|
||||
message = '"' + e.path[-1] + '": ' + e.message # noqa: B306
|
||||
# cast safety: path entries can be integers, if we fail to validate
|
||||
# items in an array. However the POWER_LEVELS_SCHEMA doesn't expect
|
||||
# to see any arrays.
|
||||
message = (
|
||||
'"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306
|
||||
)
|
||||
# jsonschema.ValidationError.message is a valid attribute
|
||||
else:
|
||||
# example: '0' is not of type 'integer'
|
||||
|
||||
@@ -32,6 +32,18 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InvalidEventSignatureError(RuntimeError):
|
||||
"""Raised when the signature on an event is invalid.
|
||||
|
||||
The stringification of this exception is just the error message without reference
|
||||
to the event id. The event id is available as a property.
|
||||
"""
|
||||
|
||||
def __init__(self, message: str, event_id: str):
|
||||
super().__init__(message)
|
||||
self.event_id = event_id
|
||||
|
||||
|
||||
class FederationBase:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
@@ -41,6 +53,7 @@ class FederationBase:
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.store = hs.get_datastores().main
|
||||
self._clock = hs.get_clock()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
async def _check_sigs_and_hash(
|
||||
self, room_version: RoomVersion, pdu: EventBase
|
||||
@@ -59,20 +72,13 @@ class FederationBase:
|
||||
Returns:
|
||||
* the original event if the checks pass
|
||||
* a redacted version of the event (if the signature
|
||||
matched but the hash did not)
|
||||
matched but the hash did not). In this case a warning will be logged.
|
||||
|
||||
Raises:
|
||||
SynapseError if the signature check failed.
|
||||
InvalidEventSignatureError if the signature check failed. Nothing
|
||||
will be logged in this case.
|
||||
"""
|
||||
try:
|
||||
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
|
||||
except SynapseError as e:
|
||||
logger.warning(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
|
||||
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
@@ -87,7 +93,7 @@ class FederationBase:
|
||||
if set(redacted_event.keys()) == set(pdu.keys()) and set(
|
||||
redacted_event.content.keys()
|
||||
) == set(pdu.content.keys()):
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Event %s seems to have been redacted; using our redacted copy",
|
||||
pdu.event_id,
|
||||
)
|
||||
@@ -98,9 +104,9 @@ class FederationBase:
|
||||
)
|
||||
return redacted_event
|
||||
|
||||
result = await self.spam_checker.check_event_for_spam(pdu)
|
||||
spam_check = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
||||
if result:
|
||||
if spam_check != self.spam_checker.NOT_SPAM:
|
||||
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
|
||||
# we redact (to save disk space) as well as soft-failing (to stop
|
||||
# using the event in prev_events).
|
||||
@@ -116,12 +122,13 @@ async def _check_sigs_on_pdu(
|
||||
) -> None:
|
||||
"""Check that the given events are correctly signed
|
||||
|
||||
Raise a SynapseError if the event wasn't correctly signed.
|
||||
|
||||
Args:
|
||||
keyring: keyring object to do the checks
|
||||
room_version: the room version of the PDUs
|
||||
pdus: the events to be checked
|
||||
|
||||
Raises:
|
||||
InvalidEventSignatureError if the event wasn't correctly signed.
|
||||
"""
|
||||
|
||||
# we want to check that the event is signed by:
|
||||
@@ -147,44 +154,38 @@ async def _check_sigs_on_pdu(
|
||||
|
||||
# First we check that the sender event is signed by the sender's domain
|
||||
# (except if its a 3pid invite, in which case it may be sent by any server)
|
||||
sender_domain = get_domain_from_id(pdu.sender)
|
||||
if not _is_invite_via_3pid(pdu):
|
||||
try:
|
||||
await keyring.verify_event_for_server(
|
||||
get_domain_from_id(pdu.sender),
|
||||
sender_domain,
|
||||
pdu,
|
||||
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
)
|
||||
except Exception as e:
|
||||
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
|
||||
raise InvalidEventSignatureError(
|
||||
f"unable to verify signature for sender domain {sender_domain}: {e}",
|
||||
pdu.event_id,
|
||||
get_domain_from_id(pdu.sender),
|
||||
e,
|
||||
)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
) from None
|
||||
|
||||
# now let's look for events where the sender's domain is different to the
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
# (ie, the room version uses old-style non-hash event IDs).
|
||||
if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
|
||||
pdu.event_id
|
||||
) != get_domain_from_id(pdu.sender):
|
||||
try:
|
||||
await keyring.verify_event_for_server(
|
||||
get_domain_from_id(pdu.event_id),
|
||||
pdu,
|
||||
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
)
|
||||
except Exception as e:
|
||||
errmsg = (
|
||||
"event id %s: unable to verify signature for event id domain %s: %s"
|
||||
% (
|
||||
pdu.event_id,
|
||||
get_domain_from_id(pdu.event_id),
|
||||
e,
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
event_domain = get_domain_from_id(pdu.event_id)
|
||||
if event_domain != sender_domain:
|
||||
try:
|
||||
await keyring.verify_event_for_server(
|
||||
event_domain,
|
||||
pdu,
|
||||
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
)
|
||||
)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
except Exception as e:
|
||||
raise InvalidEventSignatureError(
|
||||
f"unable to verify signature for event domain {event_domain}: {e}",
|
||||
pdu.event_id,
|
||||
) from None
|
||||
|
||||
# If this is a join event for a restricted room it may have been authorised
|
||||
# via a different server from the sending server. Check those signatures.
|
||||
@@ -204,15 +205,10 @@ async def _check_sigs_on_pdu(
|
||||
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
|
||||
)
|
||||
except Exception as e:
|
||||
errmsg = (
|
||||
"event id %s: unable to verify signature for authorising server %s: %s"
|
||||
% (
|
||||
pdu.event_id,
|
||||
authorising_server,
|
||||
e,
|
||||
)
|
||||
)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
raise InvalidEventSignatureError(
|
||||
f"unable to verify signature for authorising serve {authorising_server}: {e}",
|
||||
pdu.event_id,
|
||||
) from None
|
||||
|
||||
|
||||
def _is_invite_via_3pid(event: EventBase) -> bool:
|
||||
|
||||
@@ -54,7 +54,11 @@ from synapse.api.room_versions import (
|
||||
RoomVersions,
|
||||
)
|
||||
from synapse.events import EventBase, builder
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
event_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.transport.client import SendJoinResponse
|
||||
from synapse.http.types import QueryParams
|
||||
from synapse.types import JsonDict, UserID, get_domain_from_id
|
||||
@@ -319,7 +323,13 @@ class FederationClient(FederationBase):
|
||||
pdu = pdu_list[0]
|
||||
|
||||
# Check signatures are correct.
|
||||
signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
try:
|
||||
signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except InvalidEventSignatureError as e:
|
||||
errmsg = f"event id {pdu.event_id}: {e}"
|
||||
logger.warning("%s", errmsg)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
return signed_pdu
|
||||
|
||||
return None
|
||||
@@ -405,6 +415,9 @@ class FederationClient(FederationBase):
|
||||
|
||||
Returns:
|
||||
a tuple of (state event_ids, auth event_ids)
|
||||
|
||||
Raises:
|
||||
InvalidResponseError: if fields in the response have the wrong type.
|
||||
"""
|
||||
result = await self.transport_layer.get_room_state_ids(
|
||||
destination, room_id, event_id=event_id
|
||||
@@ -416,7 +429,7 @@ class FederationClient(FederationBase):
|
||||
if not isinstance(state_event_ids, list) or not isinstance(
|
||||
auth_event_ids, list
|
||||
):
|
||||
raise Exception("invalid response from /state_ids")
|
||||
raise InvalidResponseError("invalid response from /state_ids")
|
||||
|
||||
return state_event_ids, auth_event_ids
|
||||
|
||||
@@ -552,20 +565,24 @@ class FederationClient(FederationBase):
|
||||
|
||||
Returns:
|
||||
The PDU (possibly redacted) if it has valid signatures and hashes.
|
||||
None if no valid copy could be found.
|
||||
"""
|
||||
|
||||
res = None
|
||||
try:
|
||||
res = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except SynapseError:
|
||||
pass
|
||||
|
||||
if not res:
|
||||
# Check local db.
|
||||
res = await self.store.get_event(
|
||||
pdu.event_id, allow_rejected=True, allow_none=True
|
||||
return await self._check_sigs_and_hash(room_version, pdu)
|
||||
except InvalidEventSignatureError as e:
|
||||
logger.warning(
|
||||
"Signature on retrieved event %s was invalid (%s). "
|
||||
"Checking local store/orgin server",
|
||||
pdu.event_id,
|
||||
e,
|
||||
)
|
||||
|
||||
# Check local db.
|
||||
res = await self.store.get_event(
|
||||
pdu.event_id, allow_rejected=True, allow_none=True
|
||||
)
|
||||
|
||||
pdu_origin = get_domain_from_id(pdu.sender)
|
||||
if not res and pdu_origin != origin:
|
||||
try:
|
||||
@@ -1040,9 +1057,14 @@ class FederationClient(FederationBase):
|
||||
pdu = event_from_pdu_json(pdu_dict, room_version)
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
try:
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except InvalidEventSignatureError as e:
|
||||
errmsg = f"event id {pdu.event_id}: {e}"
|
||||
logger.warning("%s", errmsg)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
|
||||
return pdu
|
||||
|
||||
|
||||
@@ -48,7 +48,11 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.crypto.event_signing import compute_event_signature
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.federation_base import (
|
||||
FederationBase,
|
||||
InvalidEventSignatureError,
|
||||
event_from_pdu_json,
|
||||
)
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
@@ -109,11 +113,13 @@ class FederationServer(FederationBase):
|
||||
super().__init__(hs)
|
||||
|
||||
self.handler = hs.get_federation_handler()
|
||||
self.storage = hs.get_storage()
|
||||
self._spam_checker = hs.get_spam_checker()
|
||||
self._federation_event_handler = hs.get_federation_event_handler()
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
|
||||
self._state_storage_controller = hs.get_storage_controllers().state
|
||||
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
# Ensure the following handlers are loaded since they register callbacks
|
||||
@@ -631,7 +637,12 @@ class FederationServer(FederationBase):
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
try:
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except InvalidEventSignatureError as e:
|
||||
errmsg = f"event id {pdu.event_id}: {e}"
|
||||
logger.warning("%s", errmsg)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||
@@ -864,7 +875,12 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
)
|
||||
|
||||
event = await self._check_sigs_and_hash(room_version, event)
|
||||
try:
|
||||
event = await self._check_sigs_and_hash(room_version, event)
|
||||
except InvalidEventSignatureError as e:
|
||||
errmsg = f"event id {event.event_id}: {e}"
|
||||
logger.warning("%s", errmsg)
|
||||
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
|
||||
|
||||
return await self._federation_event_handler.on_send_membership_event(
|
||||
origin, event
|
||||
@@ -1016,8 +1032,15 @@ class FederationServer(FederationBase):
|
||||
# Check signature.
|
||||
try:
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
except SynapseError as e:
|
||||
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
||||
except InvalidEventSignatureError as e:
|
||||
logger.warning("event id %s: %s", pdu.event_id, e)
|
||||
raise FederationError("ERROR", 403, str(e), affected=pdu.event_id)
|
||||
|
||||
if await self._spam_checker.should_drop_federated_event(pdu):
|
||||
logger.warning(
|
||||
"Unstaged federated event contains spam, dropping %s", pdu.event_id
|
||||
)
|
||||
return
|
||||
|
||||
# Add the event to our staging area
|
||||
await self.store.insert_received_event_to_staging(origin, pdu)
|
||||
@@ -1032,6 +1055,41 @@ class FederationServer(FederationBase):
|
||||
pdu.room_id, room_version, lock, origin, pdu
|
||||
)
|
||||
|
||||
async def _get_next_nonspam_staged_event_for_room(
|
||||
self, room_id: str, room_version: RoomVersion
|
||||
) -> Optional[Tuple[str, EventBase]]:
|
||||
"""Fetch the first non-spam event from staging queue.
|
||||
|
||||
Args:
|
||||
room_id: the room to fetch the first non-spam event in.
|
||||
room_version: the version of the room.
|
||||
|
||||
Returns:
|
||||
The first non-spam event in that room.
|
||||
"""
|
||||
|
||||
while True:
|
||||
# We need to do this check outside the lock to avoid a race between
|
||||
# a new event being inserted by another instance and it attempting
|
||||
# to acquire the lock.
|
||||
next = await self.store.get_next_staged_event_for_room(
|
||||
room_id, room_version
|
||||
)
|
||||
|
||||
if next is None:
|
||||
return None
|
||||
|
||||
origin, event = next
|
||||
|
||||
if await self._spam_checker.should_drop_federated_event(event):
|
||||
logger.warning(
|
||||
"Staged federated event contains spam, dropping %s",
|
||||
event.event_id,
|
||||
)
|
||||
continue
|
||||
|
||||
return next
|
||||
|
||||
@wrap_as_background_process("_process_incoming_pdus_in_room_inner")
|
||||
async def _process_incoming_pdus_in_room_inner(
|
||||
self,
|
||||
@@ -1109,12 +1167,10 @@ class FederationServer(FederationBase):
|
||||
(self._clock.time_msec() - received_ts) / 1000
|
||||
)
|
||||
|
||||
# We need to do this check outside the lock to avoid a race between
|
||||
# a new event being inserted by another instance and it attempting
|
||||
# to acquire the lock.
|
||||
next = await self.store.get_next_staged_event_for_room(
|
||||
next = await self._get_next_nonspam_staged_event_for_room(
|
||||
room_id, room_version
|
||||
)
|
||||
|
||||
if not next:
|
||||
break
|
||||
|
||||
@@ -1167,14 +1223,10 @@ class FederationServer(FederationBase):
|
||||
Raises:
|
||||
AuthError if the server does not match the ACL
|
||||
"""
|
||||
state_ids = await self.store.get_current_state_ids(room_id)
|
||||
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
|
||||
|
||||
if not acl_event_id:
|
||||
return
|
||||
|
||||
acl_event = await self.store.get_event(acl_event_id)
|
||||
if server_matches_acl_event(server_name, acl_event):
|
||||
acl_event = await self._storage_controllers.state.get_current_state_event(
|
||||
room_id, EventTypes.ServerACL, ""
|
||||
)
|
||||
if not acl_event or server_matches_acl_event(server_name, acl_event):
|
||||
return
|
||||
|
||||
raise AuthError(code=403, msg="Server is banned from room")
|
||||
@@ -1313,7 +1365,7 @@ class FederationHandlerRegistry:
|
||||
self._edu_type_to_instance[edu_type] = instance_names
|
||||
|
||||
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
|
||||
if not self.config.server.use_presence and edu_type == EduTypes.Presence:
|
||||
if not self.config.server.use_presence and edu_type == EduTypes.PRESENCE:
|
||||
return
|
||||
|
||||
# Check if we have a handler on this instance
|
||||
|
||||
@@ -15,7 +15,17 @@
|
||||
import abc
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
Hashable,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
@@ -235,6 +245,8 @@ class FederationSender(AbstractFederationSender):
|
||||
self.store = hs.get_datastores().main
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
|
||||
@@ -409,7 +421,7 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
return
|
||||
|
||||
destinations: Optional[Set[str]] = None
|
||||
destinations: Optional[Collection[str]] = None
|
||||
if not event.prev_event_ids():
|
||||
# If there are no prev event IDs then the state is empty
|
||||
# and so no remote servers in the room
|
||||
@@ -444,7 +456,7 @@ class FederationSender(AbstractFederationSender):
|
||||
)
|
||||
return
|
||||
|
||||
destinations = {
|
||||
sharded_destinations = {
|
||||
d
|
||||
for d in destinations
|
||||
if self._federation_shard_config.should_handle(
|
||||
@@ -456,12 +468,12 @@ class FederationSender(AbstractFederationSender):
|
||||
# If we are sending the event on behalf of another server
|
||||
# then it already has the event and there is no reason to
|
||||
# send the event to it.
|
||||
destinations.discard(send_on_behalf_of)
|
||||
sharded_destinations.discard(send_on_behalf_of)
|
||||
|
||||
logger.debug("Sending %s to %r", event, destinations)
|
||||
logger.debug("Sending %s to %r", event, sharded_destinations)
|
||||
|
||||
if destinations:
|
||||
await self._send_pdu(event, destinations)
|
||||
if sharded_destinations:
|
||||
await self._send_pdu(event, sharded_destinations)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = await self.store.get_received_ts(event.event_id)
|
||||
@@ -592,7 +604,9 @@ class FederationSender(AbstractFederationSender):
|
||||
room_id = receipt.room_id
|
||||
|
||||
# Work out which remote servers should be poked and poke them.
|
||||
domains_set = await self.state.get_current_hosts_in_room(room_id)
|
||||
domains_set = await self._storage_controllers.state.get_current_hosts_in_room(
|
||||
room_id
|
||||
)
|
||||
domains = [
|
||||
d
|
||||
for d in domains_set
|
||||
|
||||
@@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tupl
|
||||
import attr
|
||||
from prometheus_client import Counter
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import (
|
||||
FederationDeniedError,
|
||||
HttpResponseException,
|
||||
@@ -223,7 +224,7 @@ class PerDestinationQueue:
|
||||
"""Marks that the destination has new data to send, without starting a
|
||||
new transaction.
|
||||
|
||||
If a transaction loop is already in progress then a new transcation will
|
||||
If a transaction loop is already in progress then a new transaction will
|
||||
be attempted when the current one finishes.
|
||||
"""
|
||||
|
||||
@@ -542,7 +543,7 @@ class PerDestinationQueue:
|
||||
edu = Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.receipt",
|
||||
edu_type=EduTypes.RECEIPT,
|
||||
content=self._pending_rrs,
|
||||
)
|
||||
self._pending_rrs = {}
|
||||
@@ -592,7 +593,7 @@ class PerDestinationQueue:
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
edu_type="m.direct_to_device",
|
||||
edu_type=EduTypes.DIRECT_TO_DEVICE,
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
@@ -670,7 +671,7 @@ class _TransactionQueueManager:
|
||||
Edu(
|
||||
origin=self.queue._server_name,
|
||||
destination=self.queue._destination,
|
||||
edu_type="m.presence",
|
||||
edu_type=EduTypes.PRESENCE,
|
||||
content={
|
||||
"push": [
|
||||
format_user_presence_state(
|
||||
|
||||
@@ -16,6 +16,7 @@ from typing import TYPE_CHECKING, List
|
||||
|
||||
from prometheus_client import Gauge
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.events import EventBase
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
@@ -126,7 +127,10 @@ class TransactionManager:
|
||||
len(edus),
|
||||
)
|
||||
if issue_8631_logger.isEnabledFor(logging.DEBUG):
|
||||
DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"}
|
||||
DEVICE_UPDATE_EDUS = {
|
||||
EduTypes.DEVICE_LIST_UPDATE,
|
||||
EduTypes.SIGNING_KEY_UPDATE,
|
||||
}
|
||||
device_list_updates = [
|
||||
edu.content for edu in edus if edu.edu_type in DEVICE_UPDATE_EDUS
|
||||
]
|
||||
|
||||
@@ -17,7 +17,6 @@ import logging
|
||||
import urllib
|
||||
from typing import (
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
Collection,
|
||||
Dict,
|
||||
@@ -49,11 +48,6 @@ from synapse.types import JsonDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Send join responses can be huge, so we set a separate limit here. The response
|
||||
# is parsed in a streaming manner, which helps alleviate the issue of memory
|
||||
# usage a bit.
|
||||
MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024
|
||||
|
||||
|
||||
class TransportLayerClient:
|
||||
"""Sends federation HTTP requests to other servers"""
|
||||
@@ -349,7 +343,6 @@ class TransportLayerClient:
|
||||
path=path,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=True),
|
||||
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
|
||||
)
|
||||
|
||||
async def send_join_v2(
|
||||
@@ -372,7 +365,6 @@ class TransportLayerClient:
|
||||
args=query_params,
|
||||
data=content,
|
||||
parser=SendJoinParser(room_version, v1_api=False),
|
||||
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
|
||||
)
|
||||
|
||||
async def send_leave_v1(
|
||||
@@ -688,488 +680,6 @@ class TransportLayerClient:
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
async def get_group_profile(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get a group profile"""
|
||||
path = _create_v1_path("/groups/%s/profile", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def update_group_profile(
|
||||
self, destination: str, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Update a remote group profile
|
||||
|
||||
Args:
|
||||
destination
|
||||
group_id
|
||||
requester_user_id
|
||||
content: The new profile of the group
|
||||
"""
|
||||
path = _create_v1_path("/groups/%s/profile", group_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_group_summary(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get a group summary"""
|
||||
path = _create_v1_path("/groups/%s/summary", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_rooms_in_group(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get all rooms in a group"""
|
||||
path = _create_v1_path("/groups/%s/rooms", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def add_room_to_group(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
room_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Add a room to a group"""
|
||||
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def update_room_in_group(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
room_id: str,
|
||||
config_key: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Update room in group"""
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/room/%s/config/%s", group_id, room_id, config_key
|
||||
)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def remove_room_from_group(
|
||||
self, destination: str, group_id: str, requester_user_id: str, room_id: str
|
||||
) -> JsonDict:
|
||||
"""Remove a room from a group"""
|
||||
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id)
|
||||
|
||||
return await self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_users_in_group(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get users in a group"""
|
||||
path = _create_v1_path("/groups/%s/users", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_invited_users_in_group(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get users that have been invited to a group"""
|
||||
path = _create_v1_path("/groups/%s/invited_users", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def accept_group_invite(
|
||||
self, destination: str, group_id: str, user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Accept a group invite"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
def join_group(
|
||||
self, destination: str, group_id: str, user_id: str, content: JsonDict
|
||||
) -> Awaitable[JsonDict]:
|
||||
"""Attempts to join a group"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
async def invite_to_group(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
requester_user_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Invite a user to a group"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def invite_to_group_notification(
|
||||
self, destination: str, group_id: str, user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Sent by group server to inform a user's server that they have been
|
||||
invited.
|
||||
"""
|
||||
|
||||
path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
async def remove_user_from_group(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
user_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Remove a user from a group"""
|
||||
path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def remove_user_from_group_notification(
|
||||
self, destination: str, group_id: str, user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Sent by group server to inform a user's server that they have been
|
||||
kicked from the group.
|
||||
"""
|
||||
|
||||
path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
async def renew_group_attestation(
|
||||
self, destination: str, group_id: str, user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Sent by either a group server or a user's server to periodically update
|
||||
the attestations
|
||||
"""
|
||||
|
||||
path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
async def update_group_summary_room(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
category_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Update a room entry in a group summary"""
|
||||
if category_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/categories/%s/rooms/%s",
|
||||
group_id,
|
||||
category_id,
|
||||
room_id,
|
||||
)
|
||||
else:
|
||||
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def delete_group_summary_room(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
room_id: str,
|
||||
category_id: str,
|
||||
) -> JsonDict:
|
||||
"""Delete a room entry in a group summary"""
|
||||
if category_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/categories/%s/rooms/%s",
|
||||
group_id,
|
||||
category_id,
|
||||
room_id,
|
||||
)
|
||||
else:
|
||||
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id)
|
||||
|
||||
return await self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_group_categories(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get all categories in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_group_category(
|
||||
self, destination: str, group_id: str, requester_user_id: str, category_id: str
|
||||
) -> JsonDict:
|
||||
"""Get category info in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def update_group_category(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
category_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Update a category in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def delete_group_category(
|
||||
self, destination: str, group_id: str, requester_user_id: str, category_id: str
|
||||
) -> JsonDict:
|
||||
"""Delete a category in a group"""
|
||||
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
|
||||
|
||||
return await self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_group_roles(
|
||||
self, destination: str, group_id: str, requester_user_id: str
|
||||
) -> JsonDict:
|
||||
"""Get all roles in a group"""
|
||||
path = _create_v1_path("/groups/%s/roles", group_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def get_group_role(
|
||||
self, destination: str, group_id: str, requester_user_id: str, role_id: str
|
||||
) -> JsonDict:
|
||||
"""Get a roles info"""
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
|
||||
|
||||
return await self.client.get_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def update_group_role(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
role_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Update a role in a group"""
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def delete_group_role(
|
||||
self, destination: str, group_id: str, requester_user_id: str, role_id: str
|
||||
) -> JsonDict:
|
||||
"""Delete a role in a group"""
|
||||
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
|
||||
|
||||
return await self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def update_group_summary_user(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
user_id: str,
|
||||
role_id: str,
|
||||
content: JsonDict,
|
||||
) -> JsonDict:
|
||||
"""Update a users entry in a group"""
|
||||
if role_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id
|
||||
)
|
||||
else:
|
||||
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id)
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def set_group_join_policy(
|
||||
self, destination: str, group_id: str, requester_user_id: str, content: JsonDict
|
||||
) -> JsonDict:
|
||||
"""Sets the join policy for a group"""
|
||||
path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id)
|
||||
|
||||
return await self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def delete_group_summary_user(
|
||||
self,
|
||||
destination: str,
|
||||
group_id: str,
|
||||
requester_user_id: str,
|
||||
user_id: str,
|
||||
role_id: str,
|
||||
) -> JsonDict:
|
||||
"""Delete a users entry in a group"""
|
||||
if role_id:
|
||||
path = _create_v1_path(
|
||||
"/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id
|
||||
)
|
||||
else:
|
||||
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id)
|
||||
|
||||
return await self.client.delete_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
async def bulk_get_publicised_groups(
|
||||
self, destination: str, user_ids: Iterable[str]
|
||||
) -> JsonDict:
|
||||
"""Get the groups a list of users are publicising"""
|
||||
|
||||
path = _create_v1_path("/get_groups_publicised")
|
||||
|
||||
content = {"user_ids": user_ids}
|
||||
|
||||
return await self.client.post_json(
|
||||
destination=destination, path=path, data=content, ignore_backoff=True
|
||||
)
|
||||
|
||||
async def get_room_complexity(self, destination: str, room_id: str) -> JsonDict:
|
||||
"""
|
||||
Args:
|
||||
@@ -1360,10 +870,15 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
|
||||
CONTENT_TYPE = "application/json"
|
||||
|
||||
# /send_join responses can be huge, so we override the size limit here. The response
|
||||
# is parsed in a streaming manner, which helps alleviate the issue of memory
|
||||
# usage a bit.
|
||||
MAX_RESPONSE_SIZE = 500 * 1024 * 1024
|
||||
|
||||
def __init__(self, room_version: RoomVersion, v1_api: bool):
|
||||
self._response = SendJoinResponse([], [], event_dict={})
|
||||
self._room_version = room_version
|
||||
self._coros = []
|
||||
self._coros: List[Generator[None, bytes, None]] = []
|
||||
|
||||
# The V1 API has the shape of `[200, {...}]`, which we handle by
|
||||
# prefixing with `item.*`.
|
||||
@@ -1411,6 +926,9 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
|
||||
return len(data)
|
||||
|
||||
def finish(self) -> SendJoinResponse:
|
||||
for c in self._coros:
|
||||
c.close()
|
||||
|
||||
if self._response.event_dict:
|
||||
self._response.event = make_event_from_dict(
|
||||
self._response.event_dict, self._room_version
|
||||
@@ -1427,10 +945,13 @@ class _StateParser(ByteParser[StateRequestResponse]):
|
||||
|
||||
CONTENT_TYPE = "application/json"
|
||||
|
||||
# As with /send_join, /state responses can be huge.
|
||||
MAX_RESPONSE_SIZE = 500 * 1024 * 1024
|
||||
|
||||
def __init__(self, room_version: RoomVersion):
|
||||
self._response = StateRequestResponse([], [])
|
||||
self._room_version = room_version
|
||||
self._coros = [
|
||||
self._coros: List[Generator[None, bytes, None]] = [
|
||||
ijson.items_coro(
|
||||
_event_list_parser(room_version, self._response.state),
|
||||
"pdus.item",
|
||||
@@ -1449,4 +970,6 @@ class _StateParser(ByteParser[StateRequestResponse]):
|
||||
return len(data)
|
||||
|
||||
def finish(self) -> StateRequestResponse:
|
||||
for c in self._coros:
|
||||
c.close()
|
||||
return self._response
|
||||
|
||||
@@ -27,10 +27,6 @@ from synapse.federation.transport.server.federation import (
|
||||
FederationAccountStatusServlet,
|
||||
FederationTimestampLookupServlet,
|
||||
)
|
||||
from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES
|
||||
from synapse.federation.transport.server.groups_server import (
|
||||
GROUP_SERVER_SERVLET_CLASSES,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
parse_boolean_from_args,
|
||||
@@ -199,38 +195,6 @@ class PublicRoomList(BaseFederationServlet):
|
||||
return 200, data
|
||||
|
||||
|
||||
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
|
||||
"""A group or user's server renews their attestation"""
|
||||
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_groups_attestation_renewer()
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
# We don't need to check auth here as we check the attestation signatures
|
||||
|
||||
new_content = await self.handler.on_renew_attestation(
|
||||
group_id, user_id, content
|
||||
)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class OpenIdUserInfo(BaseFederationServlet):
|
||||
"""
|
||||
Exchange a bearer token for information about a user.
|
||||
@@ -292,16 +256,9 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||
SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
|
||||
"federation": FEDERATION_SERVLET_CLASSES,
|
||||
"room_list": (PublicRoomList,),
|
||||
"group_server": GROUP_SERVER_SERVLET_CLASSES,
|
||||
"group_local": GROUP_LOCAL_SERVLET_CLASSES,
|
||||
"group_attestation": (FederationGroupsRenewAttestaionServlet,),
|
||||
"openid": (OpenIdUserInfo,),
|
||||
}
|
||||
|
||||
DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid")
|
||||
|
||||
GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation")
|
||||
|
||||
|
||||
def register_servlets(
|
||||
hs: "HomeServer",
|
||||
@@ -324,10 +281,7 @@ def register_servlets(
|
||||
Defaults to ``DEFAULT_SERVLET_GROUPS``.
|
||||
"""
|
||||
if not servlet_groups:
|
||||
servlet_groups = DEFAULT_SERVLET_GROUPS
|
||||
# Only allow the groups servlets if the deprecated groups feature is enabled.
|
||||
if hs.config.experimental.groups_enabled:
|
||||
servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS
|
||||
servlet_groups = SERVLET_GROUPS.keys()
|
||||
|
||||
for servlet_group in servlet_groups:
|
||||
# Skip unknown servlet groups.
|
||||
|
||||
@@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl
|
||||
|
||||
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX
|
||||
from synapse.http.server import HttpServer, ServletCallback
|
||||
from synapse.http.server import HttpServer, ServletCallback, is_method_cancellable
|
||||
from synapse.http.servlet import parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import run_in_background
|
||||
@@ -169,14 +169,16 @@ def _parse_auth_header(header_bytes: bytes) -> Tuple[str, str, str, Optional[str
|
||||
"""
|
||||
try:
|
||||
header_str = header_bytes.decode("utf-8")
|
||||
params = header_str.split(" ")[1].split(",")
|
||||
params = re.split(" +", header_str)[1].split(",")
|
||||
param_dict: Dict[str, str] = {
|
||||
k: v for k, v in [param.split("=", maxsplit=1) for param in params]
|
||||
k.lower(): v for k, v in [param.split("=", maxsplit=1) for param in params]
|
||||
}
|
||||
|
||||
def strip_quotes(value: str) -> str:
|
||||
if value.startswith('"'):
|
||||
return value[1:-1]
|
||||
return re.sub(
|
||||
"\\\\(.)", lambda matchobj: matchobj.group(1), value[1:-1]
|
||||
)
|
||||
else:
|
||||
return value
|
||||
|
||||
@@ -373,6 +375,17 @@ class BaseFederationServlet:
|
||||
if code is None:
|
||||
continue
|
||||
|
||||
if is_method_cancellable(code):
|
||||
# The wrapper added by `self._wrap` will inherit the cancellable flag,
|
||||
# but the wrapper itself does not support cancellation yet.
|
||||
# Once resolved, the cancellation tests in
|
||||
# `tests/federation/transport/server/test__base.py` can be re-enabled.
|
||||
raise Exception(
|
||||
f"{self.__class__.__name__}.on_{method} has been marked as "
|
||||
"cancellable, but federation servlets do not support cancellation "
|
||||
"yet."
|
||||
)
|
||||
|
||||
server.register_paths(
|
||||
method,
|
||||
(pattern,),
|
||||
|
||||
@@ -27,6 +27,7 @@ from typing import (
|
||||
from matrix_common.versionstring import get_distribution_version_string
|
||||
from typing_extensions import Literal
|
||||
|
||||
from synapse.api.constants import EduTypes
|
||||
from synapse.api.errors import Codes, SynapseError
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
|
||||
@@ -108,7 +109,10 @@ class FederationSendServlet(BaseFederationServerServlet):
|
||||
)
|
||||
|
||||
if issue_8631_logger.isEnabledFor(logging.DEBUG):
|
||||
DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"]
|
||||
DEVICE_UPDATE_EDUS = [
|
||||
EduTypes.DEVICE_LIST_UPDATE,
|
||||
EduTypes.SIGNING_KEY_UPDATE,
|
||||
]
|
||||
device_list_updates = [
|
||||
edu.get("content", {})
|
||||
for edu in transaction_data.get("edus", [])
|
||||
@@ -650,10 +654,6 @@ class FederationRoomHierarchyServlet(BaseFederationServlet):
|
||||
)
|
||||
|
||||
|
||||
class FederationRoomHierarchyUnstableServlet(FederationRoomHierarchyServlet):
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
|
||||
|
||||
|
||||
class RoomComplexityServlet(BaseFederationServlet):
|
||||
"""
|
||||
Indicates to other servers how complex (and therefore likely
|
||||
@@ -752,7 +752,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationVersionServlet,
|
||||
RoomComplexityServlet,
|
||||
FederationRoomHierarchyServlet,
|
||||
FederationRoomHierarchyUnstableServlet,
|
||||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
FederationAccountStatusServlet,
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, Dict, List, Tuple, Type
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.federation.transport.server._base import (
|
||||
Authenticator,
|
||||
BaseFederationServlet,
|
||||
)
|
||||
from synapse.handlers.groups_local import GroupsLocalHandler
|
||||
from synapse.types import JsonDict, get_domain_from_id
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
class BaseGroupsLocalServlet(BaseFederationServlet):
|
||||
"""Abstract base class for federation servlet classes which provides a groups local handler.
|
||||
|
||||
See BaseFederationServlet for more information.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
authenticator: Authenticator,
|
||||
ratelimiter: FederationRateLimiter,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.handler = hs.get_groups_local_handler()
|
||||
|
||||
|
||||
class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
|
||||
"""A group server has invited a local user"""
|
||||
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
if get_domain_from_id(group_id) != origin:
|
||||
raise SynapseError(403, "group_id doesn't match origin")
|
||||
|
||||
assert isinstance(
|
||||
self.handler, GroupsLocalHandler
|
||||
), "Workers cannot handle group invites."
|
||||
|
||||
new_content = await self.handler.on_invite(group_id, user_id, content)
|
||||
|
||||
return 200, new_content
|
||||
|
||||
|
||||
class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
|
||||
"""A group server has removed a local user"""
|
||||
|
||||
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
origin: str,
|
||||
content: JsonDict,
|
||||
query: Dict[bytes, List[bytes]],
|
||||
group_id: str,
|
||||
user_id: str,
|
||||
) -> Tuple[int, None]:
|
||||
if get_domain_from_id(group_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
assert isinstance(
|
||||
self.handler, GroupsLocalHandler
|
||||
), "Workers cannot handle group removals."
|
||||
|
||||
await self.handler.user_removed_from_group(group_id, user_id, content)
|
||||
|
||||
return 200, None
|
||||
|
||||
|
||||
class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
|
||||
"""Get roles in a group"""
|
||||
|
||||
PATH = "/get_groups_publicised"
|
||||
|
||||
async def on_POST(
|
||||
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
|
||||
) -> Tuple[int, JsonDict]:
|
||||
resp = await self.handler.bulk_get_publicised_groups(
|
||||
content["user_ids"], proxy=False
|
||||
)
|
||||
|
||||
return 200, resp
|
||||
|
||||
|
||||
GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationGroupsLocalInviteServlet,
|
||||
FederationGroupsRemoveLocalUserServlet,
|
||||
FederationGroupsBulkPublicisedServlet,
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user