diff --git a/.ci/scripts/checkout_complement.sh b/.ci/scripts/checkout_complement.sh new file mode 100755 index 0000000000..379f5d4387 --- /dev/null +++ b/.ci/scripts/checkout_complement.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Fetches a version of complement which best matches the current build. +# +# The tarball is unpacked into `./complement`. + +set -e +mkdir -p complement + +# Pick an appropriate version of complement. Depending on whether this is a PR or release, +# etc. we need to use different fallbacks: +# +# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF +# for pull requests, otherwise GITHUB_REF). +# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y +# (GITHUB_BASE_REF for pull requests). +# 3. Use the default complement branch ("HEAD"). +for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do + # Skip empty branch names and merge commits. + if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then + continue + fi + + (wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break +done diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5cd1418687..92977ea5a0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -314,7 +314,7 @@ jobs: - run: .ci/scripts/test_synapse_port_db.sh complement: - if: ${{ !failure() && !cancelled() }} + if: "${{ !failure() && !cancelled() }}" needs: linting-done runs-on: ubuntu-latest @@ -341,26 +341,7 @@ jobs: # Attempt to check out the same branch of Complement as the PR. If it # doesn't exist, fallback to HEAD. - name: Checkout complement - shell: bash - run: | - mkdir -p complement - # Attempt to use the version of complement which best matches the current - # build. Depending on whether this is a PR or release, etc. we need to - # use different fallbacks. - # - # 1. First check if there's a similarly named branch (GITHUB_HEAD_REF - # for pull requests, otherwise GITHUB_REF). - # 2. Attempt to use the base branch, e.g. when merging into release-vX.Y - # (GITHUB_BASE_REF for pull requests). - # 3. Use the default complement branch ("HEAD"). - for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do - # Skip empty branch names and merge commits. - if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then - continue - fi - - (wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break - done + run: synapse/.ci/scripts/checkout_complement.sh - run: | set -o pipefail @@ -368,6 +349,45 @@ jobs: shell: bash name: Run Complement Tests + # We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs. + # Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now. + # GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now. + complement-developonly: + if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}" + needs: linting-done + runs-on: ubuntu-latest + + steps: + # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. + # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path + - name: "Set Go Version" + run: | + # Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2 + echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH + # Add the Go path to the PATH: We need this so we can call gotestfmt + echo "~/go/bin" >> $GITHUB_PATH + + - name: "Install Complement Dependencies" + run: | + sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev + go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest + + - name: Run actions/checkout@v2 for synapse + uses: actions/checkout@v2 + with: + path: synapse + + # Attempt to check out the same branch of Complement as the PR. If it + # doesn't exist, fallback to HEAD. + - name: Checkout complement + run: .ci/scripts/checkout_complement.sh + + - run: | + set -o pipefail + WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + shell: bash + name: Run Complement Tests + # a job which combines the coverage reports from all trials runs, ready for upload to SonarQube. coverage: needs: trial diff --git a/CHANGES.md b/CHANGES.md index e10ac0314a..2bf8cdea75 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,145 @@ +Synapse 1.60.0 (2022-05-31) +=========================== + +This release of Synapse adds a unique index to the `state_group_edges` table, in +order to prevent accidentally introducing duplicate information (for example, +because a database backup was restored multiple times). If your Synapse database +already has duplicate rows in this table, this could fail with an error and +require manual remediation. + +Additionally, the signature of the `check_event_for_spam` module callback has changed. +The previous signature has been deprecated and remains working for now. Module authors +should update their modules to use the new signature where possible. + +See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1600) +for more details. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.60.0rc1 that would break some imports from `synapse.module_api`. ([\#12918](https://github.com/matrix-org/synapse/issues/12918)) + + +Synapse 1.60.0rc2 (2022-05-27) +============================== + +Features +-------- + +- Add an option allowing users to use their password to reauthenticate for privileged actions even though password login is disabled. ([\#12883](https://github.com/matrix-org/synapse/issues/12883)) + + +Bugfixes +-------- + +- Explicitly close `ijson` coroutines once we are done with them, instead of leaving the garbage collector to close them. ([\#12875](https://github.com/matrix-org/synapse/issues/12875)) + + +Internal Changes +---------------- + +- Improve URL previews by not including the content of media tags in the generated description. ([\#12887](https://github.com/matrix-org/synapse/issues/12887)) + + +Synapse 1.60.0rc1 (2022-05-24) +============================== + +Features +-------- + +- Measure the time taken in spam-checking callbacks and expose those measurements as metrics. ([\#12513](https://github.com/matrix-org/synapse/issues/12513)) +- Add a `default_power_level_content_override` config option to set default room power levels per room preset. ([\#12618](https://github.com/matrix-org/synapse/issues/12618)) +- Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). ([\#12623](https://github.com/matrix-org/synapse/issues/12623)) +- Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. ([\#12672](https://github.com/matrix-org/synapse/issues/12672), [\#12809](https://github.com/matrix-org/synapse/issues/12809)) +- Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. ([\#12673](https://github.com/matrix-org/synapse/issues/12673)) +- Add a config options to allow for auto-tuning of caches. ([\#12701](https://github.com/matrix-org/synapse/issues/12701)) +- Update [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to process marker events from the current state to avoid markers being lost in timeline gaps for federated servers which would cause the imported history to be undiscovered. ([\#12718](https://github.com/matrix-org/synapse/issues/12718)) +- Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. ([\#12744](https://github.com/matrix-org/synapse/issues/12744)) +- Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). ([\#12786](https://github.com/matrix-org/synapse/issues/12786), [\#12792](https://github.com/matrix-org/synapse/issues/12792)) +- Update to the `check_event_for_spam` module callback. Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. ([\#12611](https://github.com/matrix-org/synapse/issues/12611)) +- Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. ([\#12683](https://github.com/matrix-org/synapse/issues/12683)) +- Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. ([\#12687](https://github.com/matrix-org/synapse/issues/12687)) +- Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. ([\#12696](https://github.com/matrix-org/synapse/issues/12696)) +- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713)) +- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721)) +- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747)) +- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762)) +- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770)) +- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779)) +- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794)) +- Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. ([\#12803](https://github.com/matrix-org/synapse/issues/12803)) +- Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. ([\#12823](https://github.com/matrix-org/synapse/issues/12823)) + + +Updates to the Docker image +--------------------------- + +- Fix the docker file after a dependency update. ([\#12853](https://github.com/matrix-org/synapse/issues/12853)) + + +Improved Documentation +---------------------- + +- Fix a typo in the Media Admin API documentation. ([\#12715](https://github.com/matrix-org/synapse/issues/12715)) +- Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. ([\#12727](https://github.com/matrix-org/synapse/issues/12727)) +- Fix typo in server listener documentation. ([\#12742](https://github.com/matrix-org/synapse/issues/12742)) +- Link to the configuration manual from the welcome page of the documentation. ([\#12748](https://github.com/matrix-org/synapse/issues/12748)) +- Fix typo in `run_background_tasks_on` option name in configuration manual documentation. ([\#12749](https://github.com/matrix-org/synapse/issues/12749)) +- Add information regarding the `rc_invites` ratelimiting option to the configuration docs. ([\#12759](https://github.com/matrix-org/synapse/issues/12759)) +- Add documentation for cancellation of request processing. ([\#12761](https://github.com/matrix-org/synapse/issues/12761)) +- Recommend using docker to run tests against postgres. ([\#12765](https://github.com/matrix-org/synapse/issues/12765)) +- Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. ([\#12773](https://github.com/matrix-org/synapse/issues/12773)) +- Add additional info to documentation of config option `cache_autotuning`. ([\#12776](https://github.com/matrix-org/synapse/issues/12776)) +- Update configuration manual documentation to document size-related suffixes. ([\#12777](https://github.com/matrix-org/synapse/issues/12777)) +- Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. ([\#12785](https://github.com/matrix-org/synapse/issues/12785)) + + +Deprecations and Removals +------------------------- + +- Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. ([\#12709](https://github.com/matrix-org/synapse/issues/12709)) + + +Internal Changes +---------------- + +- Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. ([\#10533](https://github.com/matrix-org/synapse/issues/10533)) +- Preparation for faster-room-join work: return subsets of room state which we already have, immediately. ([\#12498](https://github.com/matrix-org/synapse/issues/12498)) +- Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. ([\#12586](https://github.com/matrix-org/synapse/issues/12586), [\#12588](https://github.com/matrix-org/synapse/issues/12588), [\#12630](https://github.com/matrix-org/synapse/issues/12630), [\#12694](https://github.com/matrix-org/synapse/issues/12694), [\#12698](https://github.com/matrix-org/synapse/issues/12698), [\#12699](https://github.com/matrix-org/synapse/issues/12699), [\#12700](https://github.com/matrix-org/synapse/issues/12700), [\#12705](https://github.com/matrix-org/synapse/issues/12705)) +- Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. ([\#12708](https://github.com/matrix-org/synapse/issues/12708)) +- Improve documentation of the `synapse.push` module. ([\#12676](https://github.com/matrix-org/synapse/issues/12676)) +- Refactor functions to on `PushRuleEvaluatorForEvent`. ([\#12677](https://github.com/matrix-org/synapse/issues/12677)) +- Preparation for database schema simplifications: stop writing to `event_reference_hashes`. ([\#12679](https://github.com/matrix-org/synapse/issues/12679)) +- Remove code which updates unused database column `application_services_state.last_txn`. ([\#12680](https://github.com/matrix-org/synapse/issues/12680)) +- Refactor `EventContext` class. ([\#12689](https://github.com/matrix-org/synapse/issues/12689)) +- Remove an unneeded class in the push code. ([\#12691](https://github.com/matrix-org/synapse/issues/12691)) +- Consolidate parsing of relation information from events. ([\#12693](https://github.com/matrix-org/synapse/issues/12693)) +- Convert namespace class `Codes` into a string enum. ([\#12703](https://github.com/matrix-org/synapse/issues/12703)) +- Optimize private read receipt filtering. ([\#12711](https://github.com/matrix-org/synapse/issues/12711)) +- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720)) +- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723)) +- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731)) +- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769)) +- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772)) +- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774)) +- Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. ([\#12775](https://github.com/matrix-org/synapse/issues/12775)) +- Do not keep going if there are 5 back-to-back background update failures. ([\#12781](https://github.com/matrix-org/synapse/issues/12781)) +- Fix federation when using the demo scripts. ([\#12783](https://github.com/matrix-org/synapse/issues/12783)) +- The `hash_password` script now fails when it is called without specifying a config file. Contributed by @jae1911. ([\#12789](https://github.com/matrix-org/synapse/issues/12789)) +- Improve and fix type hints. ([\#12567](https://github.com/matrix-org/synapse/issues/12567), [\#12477](https://github.com/matrix-org/synapse/issues/12477), [\#12717](https://github.com/matrix-org/synapse/issues/12717), [\#12753](https://github.com/matrix-org/synapse/issues/12753), [\#12695](https://github.com/matrix-org/synapse/issues/12695), [\#12734](https://github.com/matrix-org/synapse/issues/12734), [\#12716](https://github.com/matrix-org/synapse/issues/12716), [\#12726](https://github.com/matrix-org/synapse/issues/12726), [\#12790](https://github.com/matrix-org/synapse/issues/12790), [\#12833](https://github.com/matrix-org/synapse/issues/12833)) +- Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. ([\#12791](https://github.com/matrix-org/synapse/issues/12791)) +- Remove Caddy from the Synapse workers image used in Complement. ([\#12818](https://github.com/matrix-org/synapse/issues/12818)) +- Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. ([\#12819](https://github.com/matrix-org/synapse/issues/12819)) +- Support registering Application Services when running with workers under Complement. ([\#12826](https://github.com/matrix-org/synapse/issues/12826)) +- Disable 'faster room join' Complement tests when testing against Synapse with workers. ([\#12842](https://github.com/matrix-org/synapse/issues/12842)) + + Synapse 1.59.1 (2022-05-18) =========================== @@ -89,7 +231,7 @@ Deprecations and Removals ------------------------- - Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596)) -- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from +- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from [MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597)) - Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613)) diff --git a/changelog.d/10533.misc b/changelog.d/10533.misc deleted file mode 100644 index f70dc6496f..0000000000 --- a/changelog.d/10533.misc +++ /dev/null @@ -1 +0,0 @@ -Improve event caching mechanism to avoid having multiple copies of an event in memory at a time. diff --git a/changelog.d/12477.misc b/changelog.d/12477.misc deleted file mode 100644 index e793d08e5e..0000000000 --- a/changelog.d/12477.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type hints to datastore. \ No newline at end of file diff --git a/changelog.d/12498.misc b/changelog.d/12498.misc deleted file mode 100644 index 8a00b94fbe..0000000000 --- a/changelog.d/12498.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: return subsets of room state which we already have, immediately. diff --git a/changelog.d/12513.feature b/changelog.d/12513.feature deleted file mode 100644 index 01bf1d9d2c..0000000000 --- a/changelog.d/12513.feature +++ /dev/null @@ -1 +0,0 @@ -Measure the time taken in spam-checking callbacks and expose those measurements as metrics. diff --git a/changelog.d/12553.removal b/changelog.d/12553.removal new file mode 100644 index 0000000000..41f6fae5da --- /dev/null +++ b/changelog.d/12553.removal @@ -0,0 +1 @@ +Remove support for the non-standard groups/communities feature from Synapse. diff --git a/changelog.d/12558.removal b/changelog.d/12558.removal new file mode 100644 index 0000000000..41f6fae5da --- /dev/null +++ b/changelog.d/12558.removal @@ -0,0 +1 @@ +Remove support for the non-standard groups/communities feature from Synapse. diff --git a/changelog.d/12563.removal b/changelog.d/12563.removal new file mode 100644 index 0000000000..41f6fae5da --- /dev/null +++ b/changelog.d/12563.removal @@ -0,0 +1 @@ +Remove support for the non-standard groups/communities feature from Synapse. diff --git a/changelog.d/12567.misc b/changelog.d/12567.misc deleted file mode 100644 index 35f08569ba..0000000000 --- a/changelog.d/12567.misc +++ /dev/null @@ -1 +0,0 @@ -Replace string literal instances of stream key types with typed constants. \ No newline at end of file diff --git a/changelog.d/12586.misc b/changelog.d/12586.misc deleted file mode 100644 index d26e332305..0000000000 --- a/changelog.d/12586.misc +++ /dev/null @@ -1 +0,0 @@ -Add `@cancellable` decorator, for use on endpoint methods that can be cancelled when clients disconnect. diff --git a/changelog.d/12588.misc b/changelog.d/12588.misc deleted file mode 100644 index f62d5c8e21..0000000000 --- a/changelog.d/12588.misc +++ /dev/null @@ -1 +0,0 @@ -Add ability to cancel disconnected requests to `SynapseRequest`. diff --git a/changelog.d/12611.bugfix b/changelog.d/12611.bugfix deleted file mode 100644 index 093c45a20b..0000000000 --- a/changelog.d/12611.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled. diff --git a/changelog.d/12618.feature b/changelog.d/12618.feature deleted file mode 100644 index 37fa03b3cb..0000000000 --- a/changelog.d/12618.feature +++ /dev/null @@ -1 +0,0 @@ -Add a `default_power_level_content_override` config option to set default room power levels per room preset. diff --git a/changelog.d/12623.feature b/changelog.d/12623.feature deleted file mode 100644 index cdee19fafa..0000000000 --- a/changelog.d/12623.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for [MSC3787: Allowing knocks to restricted rooms](https://github.com/matrix-org/matrix-spec-proposals/pull/3787). \ No newline at end of file diff --git a/changelog.d/12630.misc b/changelog.d/12630.misc deleted file mode 100644 index 43e12603e2..0000000000 --- a/changelog.d/12630.misc +++ /dev/null @@ -1 +0,0 @@ -Add a helper class for testing request cancellation. diff --git a/changelog.d/12672.feature b/changelog.d/12672.feature deleted file mode 100644 index b989e0d208..0000000000 --- a/changelog.d/12672.feature +++ /dev/null @@ -1 +0,0 @@ -Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. \ No newline at end of file diff --git a/changelog.d/12673.feature b/changelog.d/12673.feature deleted file mode 100644 index f2bddd6e1c..0000000000 --- a/changelog.d/12673.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse will now reload [cache config](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#caching) when it receives a [SIGHUP](https://en.wikipedia.org/wiki/SIGHUP) signal. diff --git a/changelog.d/12676.misc b/changelog.d/12676.misc deleted file mode 100644 index 26490af00d..0000000000 --- a/changelog.d/12676.misc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation of the `synapse.push` module. diff --git a/changelog.d/12677.misc b/changelog.d/12677.misc deleted file mode 100644 index eed12e69e9..0000000000 --- a/changelog.d/12677.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor functions to on `PushRuleEvaluatorForEvent`. diff --git a/changelog.d/12679.misc b/changelog.d/12679.misc deleted file mode 100644 index 6df1116b49..0000000000 --- a/changelog.d/12679.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for database schema simplifications: stop writing to `event_reference_hashes`. diff --git a/changelog.d/12680.misc b/changelog.d/12680.misc deleted file mode 100644 index dfd1f0a6c6..0000000000 --- a/changelog.d/12680.misc +++ /dev/null @@ -1 +0,0 @@ -Remove code which updates unused database column `application_services_state.last_txn`. diff --git a/changelog.d/12683.bugfix b/changelog.d/12683.bugfix deleted file mode 100644 index 2ce84a223a..0000000000 --- a/changelog.d/12683.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.57.0 where `/messages` would throw a 500 error when querying for a non-existent room. diff --git a/changelog.d/12687.bugfix b/changelog.d/12687.bugfix deleted file mode 100644 index 196d976670..0000000000 --- a/changelog.d/12687.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add a unique index to `state_group_edges` to prevent duplicates being accidentally introduced and the consequential impact to performance. \ No newline at end of file diff --git a/changelog.d/12689.misc b/changelog.d/12689.misc deleted file mode 100644 index daa484ea30..0000000000 --- a/changelog.d/12689.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `EventContext` class. diff --git a/changelog.d/12691.misc b/changelog.d/12691.misc deleted file mode 100644 index c635434211..0000000000 --- a/changelog.d/12691.misc +++ /dev/null @@ -1 +0,0 @@ -Remove an unneeded class in the push code. diff --git a/changelog.d/12693.misc b/changelog.d/12693.misc deleted file mode 100644 index 8bd1e1cb0c..0000000000 --- a/changelog.d/12693.misc +++ /dev/null @@ -1 +0,0 @@ -Consolidate parsing of relation information from events. diff --git a/changelog.d/12694.misc b/changelog.d/12694.misc deleted file mode 100644 index e1e956a513..0000000000 --- a/changelog.d/12694.misc +++ /dev/null @@ -1 +0,0 @@ -Capture the `Deferred` for request cancellation in `_AsyncResource`. diff --git a/changelog.d/12695.misc b/changelog.d/12695.misc deleted file mode 100644 index 1b39d969a4..0000000000 --- a/changelog.d/12695.misc +++ /dev/null @@ -1 +0,0 @@ -Fixes an incorrect type hint for `Filter._check_event_relations`. diff --git a/changelog.d/12696.bugfix b/changelog.d/12696.bugfix deleted file mode 100644 index e410184a22..0000000000 --- a/changelog.d/12696.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where an empty room would be created when a user with an insufficient power level tried to upgrade a room. diff --git a/changelog.d/12698.misc b/changelog.d/12698.misc deleted file mode 100644 index 5d626352f9..0000000000 --- a/changelog.d/12698.misc +++ /dev/null @@ -1 +0,0 @@ -Respect the `@cancellable` flag for `DirectServe{Html,Json}Resource`s. diff --git a/changelog.d/12699.misc b/changelog.d/12699.misc deleted file mode 100644 index d278a956c7..0000000000 --- a/changelog.d/12699.misc +++ /dev/null @@ -1 +0,0 @@ -Respect the `@cancellable` flag for `RestServlet`s and `BaseFederationServlet`s. diff --git a/changelog.d/12700.misc b/changelog.d/12700.misc deleted file mode 100644 index d93eb5dada..0000000000 --- a/changelog.d/12700.misc +++ /dev/null @@ -1 +0,0 @@ -Respect the `@cancellable` flag for `ReplicationEndpoint`s. diff --git a/changelog.d/12701.feature b/changelog.d/12701.feature deleted file mode 100644 index bb2264602c..0000000000 --- a/changelog.d/12701.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config options to allow for auto-tuning of caches. diff --git a/changelog.d/12703.misc b/changelog.d/12703.misc deleted file mode 100644 index 9aaa1bbaa3..0000000000 --- a/changelog.d/12703.misc +++ /dev/null @@ -1 +0,0 @@ -Convert namespace class `Codes` into a string enum. \ No newline at end of file diff --git a/changelog.d/12705.misc b/changelog.d/12705.misc deleted file mode 100644 index a913d8bb85..0000000000 --- a/changelog.d/12705.misc +++ /dev/null @@ -1 +0,0 @@ -Complain if a federation endpoint has the `@cancellable` flag, since some of the wrapper code may not handle cancellation correctly yet. diff --git a/changelog.d/12708.misc b/changelog.d/12708.misc deleted file mode 100644 index aa99e7311b..0000000000 --- a/changelog.d/12708.misc +++ /dev/null @@ -1 +0,0 @@ -Enable cancellation of `GET /rooms/$room_id/members`, `GET /rooms/$room_id/state` and `GET /rooms/$room_id/state/$event_type/*` requests. diff --git a/changelog.d/12709.removal b/changelog.d/12709.removal deleted file mode 100644 index 6bb03e2894..0000000000 --- a/changelog.d/12709.removal +++ /dev/null @@ -1 +0,0 @@ -Require a body in POST requests to `/rooms/{roomId}/receipt/{receiptType}/{eventId}`, as required by the [Matrix specification](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3roomsroomidreceiptreceipttypeeventid). This breaks compatibility with Element Android 1.2.0 and earlier: users of those clients will be unable to send read receipts. diff --git a/changelog.d/12711.misc b/changelog.d/12711.misc deleted file mode 100644 index 0831ce0452..0000000000 --- a/changelog.d/12711.misc +++ /dev/null @@ -1 +0,0 @@ -Optimize private read receipt filtering. diff --git a/changelog.d/12713.bugfix b/changelog.d/12713.bugfix deleted file mode 100644 index 91e70f102c..0000000000 --- a/changelog.d/12713.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. diff --git a/changelog.d/12715.doc b/changelog.d/12715.doc deleted file mode 100644 index 150d78c3f6..0000000000 --- a/changelog.d/12715.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in the Media Admin API documentation. diff --git a/changelog.d/12716.misc b/changelog.d/12716.misc deleted file mode 100644 index b07e1b52ee..0000000000 --- a/changelog.d/12716.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to increase the number of modules passing `disallow-untyped-defs`. \ No newline at end of file diff --git a/changelog.d/12717.misc b/changelog.d/12717.misc deleted file mode 100644 index e793d08e5e..0000000000 --- a/changelog.d/12717.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type hints to datastore. \ No newline at end of file diff --git a/changelog.d/12720.misc b/changelog.d/12720.misc deleted file mode 100644 index 01b427f200..0000000000 --- a/changelog.d/12720.misc +++ /dev/null @@ -1 +0,0 @@ -Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. \ No newline at end of file diff --git a/changelog.d/12721.bugfix b/changelog.d/12721.bugfix deleted file mode 100644 index 6987f7ab15..0000000000 --- a/changelog.d/12721.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. diff --git a/changelog.d/12723.misc b/changelog.d/12723.misc deleted file mode 100644 index 4f5bffeda6..0000000000 --- a/changelog.d/12723.misc +++ /dev/null @@ -1 +0,0 @@ -Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. diff --git a/changelog.d/12726.misc b/changelog.d/12726.misc deleted file mode 100644 index b07e1b52ee..0000000000 --- a/changelog.d/12726.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to increase the number of modules passing `disallow-untyped-defs`. \ No newline at end of file diff --git a/changelog.d/12727.doc b/changelog.d/12727.doc deleted file mode 100644 index c41e50c85b..0000000000 --- a/changelog.d/12727.doc +++ /dev/null @@ -1 +0,0 @@ -Update the OpenID Connect example for Keycloak to be compatible with newer versions of Keycloak. Contributed by @nhh. diff --git a/changelog.d/12731.misc b/changelog.d/12731.misc deleted file mode 100644 index 962100d516..0000000000 --- a/changelog.d/12731.misc +++ /dev/null @@ -1 +0,0 @@ -Update configs used by Complement to allow more invites/3PID validations during tests. \ No newline at end of file diff --git a/changelog.d/12732.feature b/changelog.d/12732.feature new file mode 100644 index 0000000000..3c73363d28 --- /dev/null +++ b/changelog.d/12732.feature @@ -0,0 +1 @@ +Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. \ No newline at end of file diff --git a/changelog.d/12734.misc b/changelog.d/12734.misc deleted file mode 100644 index ffbfb0d632..0000000000 --- a/changelog.d/12734.misc +++ /dev/null @@ -1 +0,0 @@ -Tidy up and type-hint the database engine modules. diff --git a/changelog.d/12740.feature b/changelog.d/12740.feature new file mode 100644 index 0000000000..e674c31ae8 --- /dev/null +++ b/changelog.d/12740.feature @@ -0,0 +1 @@ +Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. diff --git a/changelog.d/12742.doc b/changelog.d/12742.doc deleted file mode 100644 index 0084e27a7d..0000000000 --- a/changelog.d/12742.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in server listener documentation. \ No newline at end of file diff --git a/changelog.d/12744.feature b/changelog.d/12744.feature deleted file mode 100644 index 9836d94f8c..0000000000 --- a/changelog.d/12744.feature +++ /dev/null @@ -1 +0,0 @@ -Add a `drop_federated_event` callback to `SpamChecker` to disregard inbound federated events before they take up much processing power, in an emergency. diff --git a/changelog.d/12746.bugfix b/changelog.d/12746.bugfix new file mode 100644 index 0000000000..67e7fc854c --- /dev/null +++ b/changelog.d/12746.bugfix @@ -0,0 +1 @@ +Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Matrix specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). \ No newline at end of file diff --git a/changelog.d/12747.bugfix b/changelog.d/12747.bugfix deleted file mode 100644 index 0fb0059237..0000000000 --- a/changelog.d/12747.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. diff --git a/changelog.d/12748.doc b/changelog.d/12748.doc deleted file mode 100644 index 996ad3a1b9..0000000000 --- a/changelog.d/12748.doc +++ /dev/null @@ -1 +0,0 @@ -Link to the configuration manual from the welcome page of the documentation. diff --git a/changelog.d/12749.doc b/changelog.d/12749.doc deleted file mode 100644 index 4560319ee4..0000000000 --- a/changelog.d/12749.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in 'run_background_tasks_on' option name in configuration manual documentation. diff --git a/changelog.d/12753.misc b/changelog.d/12753.misc deleted file mode 100644 index e793d08e5e..0000000000 --- a/changelog.d/12753.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type hints to datastore. \ No newline at end of file diff --git a/changelog.d/12759.doc b/changelog.d/12759.doc deleted file mode 100644 index 45d1c9c0ca..0000000000 --- a/changelog.d/12759.doc +++ /dev/null @@ -1 +0,0 @@ -Add information regarding the `rc_invites` ratelimiting option to the configuration docs. diff --git a/changelog.d/12761.doc b/changelog.d/12761.doc deleted file mode 100644 index 2eb2c0976f..0000000000 --- a/changelog.d/12761.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation for cancellation of request processing. diff --git a/changelog.d/12762.misc b/changelog.d/12762.misc deleted file mode 100644 index 990fb6fe74..0000000000 --- a/changelog.d/12762.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. diff --git a/changelog.d/12765.doc b/changelog.d/12765.doc deleted file mode 100644 index 277b037d6b..0000000000 --- a/changelog.d/12765.doc +++ /dev/null @@ -1 +0,0 @@ -Recommend using docker to run tests against postgres. diff --git a/changelog.d/12769.misc b/changelog.d/12769.misc deleted file mode 100644 index 27bd53abe3..0000000000 --- a/changelog.d/12769.misc +++ /dev/null @@ -1 +0,0 @@ -Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. diff --git a/changelog.d/12772.misc b/changelog.d/12772.misc deleted file mode 100644 index da66f376fe..0000000000 --- a/changelog.d/12772.misc +++ /dev/null @@ -1 +0,0 @@ -Move methods that call `add_push_rule` to the `PushRuleStore` class. diff --git a/changelog.d/12773.doc b/changelog.d/12773.doc deleted file mode 100644 index 6de3716534..0000000000 --- a/changelog.d/12773.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing user directory endpoint from the generic worker documentation. Contributed by @olmari. \ No newline at end of file diff --git a/changelog.d/12774.misc b/changelog.d/12774.misc deleted file mode 100644 index 8651f2e0e0..0000000000 --- a/changelog.d/12774.misc +++ /dev/null @@ -1 +0,0 @@ -Make handling of federation Authorization header (more) compliant with RFC7230. diff --git a/changelog.d/12775.misc b/changelog.d/12775.misc deleted file mode 100644 index eac326cde3..0000000000 --- a/changelog.d/12775.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `resolve_state_groups_for_events` to not pull out full state when no state resolution happens. \ No newline at end of file diff --git a/changelog.d/12776.doc b/changelog.d/12776.doc deleted file mode 100644 index c00489a8ce..0000000000 --- a/changelog.d/12776.doc +++ /dev/null @@ -1,2 +0,0 @@ -Add additional info to documentation of config option `cache_autotuning`. - diff --git a/changelog.d/12777.doc b/changelog.d/12777.doc deleted file mode 100644 index cc9c07704d..0000000000 --- a/changelog.d/12777.doc +++ /dev/null @@ -1,2 +0,0 @@ -Update configuration manual documentation to document size-related suffixes. - diff --git a/changelog.d/12779.bugfix b/changelog.d/12779.bugfix deleted file mode 100644 index 7cf7a1f65f..0000000000 --- a/changelog.d/12779.bugfix +++ /dev/null @@ -1 +0,0 @@ -Give a meaningful error message when a client tries to create a room with an invalid alias localpart. \ No newline at end of file diff --git a/changelog.d/12781.misc b/changelog.d/12781.misc deleted file mode 100644 index 8a04571617..0000000000 --- a/changelog.d/12781.misc +++ /dev/null @@ -1 +0,0 @@ -Do not keep going if there are 5 back-to-back background update failures. \ No newline at end of file diff --git a/changelog.d/12783.misc b/changelog.d/12783.misc deleted file mode 100644 index 97575608bb..0000000000 --- a/changelog.d/12783.misc +++ /dev/null @@ -1 +0,0 @@ -Fix federation when using the demo scripts. diff --git a/changelog.d/12770.bugfix b/changelog.d/12784.bugfix similarity index 100% rename from changelog.d/12770.bugfix rename to changelog.d/12784.bugfix diff --git a/changelog.d/12785.doc b/changelog.d/12785.doc deleted file mode 100644 index 5209dfeb05..0000000000 --- a/changelog.d/12785.doc +++ /dev/null @@ -1 +0,0 @@ -Fix invalid YAML syntax in the example documentation for the `url_preview_accept_language` config option. diff --git a/changelog.d/12786.feature b/changelog.d/12786.feature deleted file mode 100644 index c90ddd411e..0000000000 --- a/changelog.d/12786.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). diff --git a/changelog.d/12789.misc b/changelog.d/12789.misc deleted file mode 100644 index 3398d00110..0000000000 --- a/changelog.d/12789.misc +++ /dev/null @@ -1 +0,0 @@ -The `hash_password` script now fails when it is called without specifying a config file. diff --git a/changelog.d/12790.misc b/changelog.d/12790.misc deleted file mode 100644 index b78156cf4e..0000000000 --- a/changelog.d/12790.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify `disallow_untyped_defs` config in `mypy.ini`. diff --git a/changelog.d/12791.misc b/changelog.d/12791.misc deleted file mode 100644 index b6e92b7eaf..0000000000 --- a/changelog.d/12791.misc +++ /dev/null @@ -1 +0,0 @@ -Update EventContext `get_current_event_ids` and `get_prev_event_ids` to accept state filters and update calls where possible. diff --git a/changelog.d/12792.feature b/changelog.d/12792.feature deleted file mode 100644 index 4778b8a394..0000000000 --- a/changelog.d/12792.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3818: Copy room type on upgrade](https://github.com/matrix-org/matrix-spec-proposals/pull/3818). \ No newline at end of file diff --git a/changelog.d/12794.bugfix b/changelog.d/12794.bugfix deleted file mode 100644 index 2d1a2838e1..0000000000 --- a/changelog.d/12794.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. diff --git a/changelog.d/12803.bugfix b/changelog.d/12803.bugfix deleted file mode 100644 index 6ddd3d24e0..0000000000 --- a/changelog.d/12803.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where finished log contexts would be re-started when failing to contact remote homeservers. diff --git a/changelog.d/12809.feature b/changelog.d/12809.feature deleted file mode 100644 index b989e0d208..0000000000 --- a/changelog.d/12809.feature +++ /dev/null @@ -1 +0,0 @@ -Send `USER_IP` commands on a different Redis channel, in order to reduce traffic to workers that do not process these commands. \ No newline at end of file diff --git a/changelog.d/12810.misc b/changelog.d/12810.misc new file mode 100644 index 0000000000..fe5fb81d5e --- /dev/null +++ b/changelog.d/12810.misc @@ -0,0 +1 @@ +Test Synapse against Complement with workers. \ No newline at end of file diff --git a/changelog.d/12812.misc b/changelog.d/12812.misc new file mode 100644 index 0000000000..53cb936a02 --- /dev/null +++ b/changelog.d/12812.misc @@ -0,0 +1 @@ +Try other homeservers when re-syncing state for rooms with partial state. diff --git a/changelog.d/12813.misc b/changelog.d/12813.misc new file mode 100644 index 0000000000..8be9f3eb44 --- /dev/null +++ b/changelog.d/12813.misc @@ -0,0 +1 @@ +Resume state re-syncing for rooms with partial state after a Synapse restart. diff --git a/changelog.d/12818.misc b/changelog.d/12818.misc deleted file mode 100644 index 2f9dacc21d..0000000000 --- a/changelog.d/12818.misc +++ /dev/null @@ -1 +0,0 @@ -Remove Caddy from the Synapse workers image used in Complement. \ No newline at end of file diff --git a/changelog.d/12819.misc b/changelog.d/12819.misc deleted file mode 100644 index 7a03102a63..0000000000 --- a/changelog.d/12819.misc +++ /dev/null @@ -1 +0,0 @@ -Add Complement's shared registration secret to the Complement worker image. This fixes tests that depend on it. \ No newline at end of file diff --git a/changelog.d/12823.bugfix b/changelog.d/12823.bugfix deleted file mode 100644 index 1a1f5957e7..0000000000 --- a/changelog.d/12823.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug, introduced in Synapse 1.21.0, that led to media thumbnails being unusable before the index has been added in the background. diff --git a/changelog.d/12826.misc b/changelog.d/12826.misc deleted file mode 100644 index f5e91f1ed5..0000000000 --- a/changelog.d/12826.misc +++ /dev/null @@ -1 +0,0 @@ -Support registering Application Services when running with workers under Complement. \ No newline at end of file diff --git a/changelog.d/12829.bugfix b/changelog.d/12829.bugfix new file mode 100644 index 0000000000..dfa1fed34e --- /dev/null +++ b/changelog.d/12829.bugfix @@ -0,0 +1 @@ +Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. diff --git a/changelog.d/12832.bugfix b/changelog.d/12832.bugfix new file mode 100644 index 0000000000..497d5184ea --- /dev/null +++ b/changelog.d/12832.bugfix @@ -0,0 +1 @@ +Fixed a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. diff --git a/changelog.d/12833.misc b/changelog.d/12833.misc deleted file mode 100644 index fad5df1afa..0000000000 --- a/changelog.d/12833.misc +++ /dev/null @@ -1 +0,0 @@ -Add some type hints to test files. \ No newline at end of file diff --git a/changelog.d/12836.misc b/changelog.d/12836.misc new file mode 100644 index 0000000000..85909c6a2d --- /dev/null +++ b/changelog.d/12836.misc @@ -0,0 +1 @@ +Remove Mutual Rooms ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. \ No newline at end of file diff --git a/changelog.d/12838.feature b/changelog.d/12838.feature new file mode 100644 index 0000000000..b24489aaad --- /dev/null +++ b/changelog.d/12838.feature @@ -0,0 +1 @@ +Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. diff --git a/changelog.d/12840.bugfix b/changelog.d/12840.bugfix new file mode 100644 index 0000000000..b15cedf896 --- /dev/null +++ b/changelog.d/12840.bugfix @@ -0,0 +1 @@ +Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. diff --git a/changelog.d/12842.misc b/changelog.d/12842.misc deleted file mode 100644 index cec3f97d86..0000000000 --- a/changelog.d/12842.misc +++ /dev/null @@ -1 +0,0 @@ -Disable 'faster room join' Complement tests when testing against Synapse with workers. \ No newline at end of file diff --git a/changelog.d/12843.bugfix b/changelog.d/12843.bugfix new file mode 100644 index 0000000000..f87c0799a0 --- /dev/null +++ b/changelog.d/12843.bugfix @@ -0,0 +1 @@ +Fix bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). diff --git a/changelog.d/12845.feature b/changelog.d/12845.feature new file mode 100644 index 0000000000..815a1f10ea --- /dev/null +++ b/changelog.d/12845.feature @@ -0,0 +1 @@ +Support the new error code "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). \ No newline at end of file diff --git a/changelog.d/12846.misc b/changelog.d/12846.misc new file mode 100644 index 0000000000..f72d3d2bea --- /dev/null +++ b/changelog.d/12846.misc @@ -0,0 +1 @@ +Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. \ No newline at end of file diff --git a/changelog.d/12849.misc b/changelog.d/12849.misc new file mode 100644 index 0000000000..4c2a15ce2b --- /dev/null +++ b/changelog.d/12849.misc @@ -0,0 +1 @@ +Remove `dont_notify` from the `.m.rule.room.server_acl` rule. \ No newline at end of file diff --git a/changelog.d/12851.misc b/changelog.d/12851.misc new file mode 100644 index 0000000000..ca6f48c369 --- /dev/null +++ b/changelog.d/12851.misc @@ -0,0 +1 @@ +Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). diff --git a/changelog.d/12852.misc b/changelog.d/12852.misc new file mode 100644 index 0000000000..afca32471f --- /dev/null +++ b/changelog.d/12852.misc @@ -0,0 +1 @@ +Pull out less state when handling gaps in room DAG. diff --git a/changelog.d/12855.feature b/changelog.d/12855.feature new file mode 100644 index 0000000000..915f008ec6 --- /dev/null +++ b/changelog.d/12855.feature @@ -0,0 +1 @@ +Add a configurable background job to delete stale devices. diff --git a/changelog.d/12856.misc b/changelog.d/12856.misc new file mode 100644 index 0000000000..19ecefd9af --- /dev/null +++ b/changelog.d/12856.misc @@ -0,0 +1 @@ +Clean-up the push rules datastore. diff --git a/changelog.d/12858.bugfix b/changelog.d/12858.bugfix new file mode 100644 index 0000000000..8c95a3e3a3 --- /dev/null +++ b/changelog.d/12858.bugfix @@ -0,0 +1 @@ +Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. diff --git a/changelog.d/12859.feature b/changelog.d/12859.feature new file mode 100644 index 0000000000..e674c31ae8 --- /dev/null +++ b/changelog.d/12859.feature @@ -0,0 +1 @@ +Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. diff --git a/changelog.d/12860.misc b/changelog.d/12860.misc new file mode 100644 index 0000000000..b7d2943023 --- /dev/null +++ b/changelog.d/12860.misc @@ -0,0 +1 @@ +Correct a type annotation in the URL preview source code. diff --git a/changelog.d/12863.doc b/changelog.d/12863.doc new file mode 100644 index 0000000000..94f7b8371a --- /dev/null +++ b/changelog.d/12863.doc @@ -0,0 +1 @@ +Fix typos in documentation. \ No newline at end of file diff --git a/changelog.d/12865.misc b/changelog.d/12865.misc new file mode 100644 index 0000000000..d982ca7622 --- /dev/null +++ b/changelog.d/12865.misc @@ -0,0 +1 @@ +Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). diff --git a/changelog.d/12866.misc b/changelog.d/12866.misc new file mode 100644 index 0000000000..3f7ef59253 --- /dev/null +++ b/changelog.d/12866.misc @@ -0,0 +1 @@ +Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. diff --git a/changelog.d/12867.doc b/changelog.d/12867.doc new file mode 100644 index 0000000000..1caeb7a290 --- /dev/null +++ b/changelog.d/12867.doc @@ -0,0 +1 @@ +Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. diff --git a/changelog.d/12868.misc b/changelog.d/12868.misc new file mode 100644 index 0000000000..382a876dab --- /dev/null +++ b/changelog.d/12868.misc @@ -0,0 +1 @@ +Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. diff --git a/changelog.d/12869.misc b/changelog.d/12869.misc new file mode 100644 index 0000000000..1d9d1c8921 --- /dev/null +++ b/changelog.d/12869.misc @@ -0,0 +1 @@ +Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. diff --git a/changelog.d/12871.misc b/changelog.d/12871.misc new file mode 100644 index 0000000000..94bd6c4974 --- /dev/null +++ b/changelog.d/12871.misc @@ -0,0 +1 @@ +Fix up the variable `state_store` naming. diff --git a/changelog.d/12877.bugfix b/changelog.d/12877.bugfix new file mode 100644 index 0000000000..1ecf448baf --- /dev/null +++ b/changelog.d/12877.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.54 which could sometimes cause exceptions when handling federated traffic. diff --git a/changelog.d/12879.misc b/changelog.d/12879.misc new file mode 100644 index 0000000000..24fa0d0de0 --- /dev/null +++ b/changelog.d/12879.misc @@ -0,0 +1 @@ +Avoid running queries which will never result in deletions. diff --git a/changelog.d/12884.misc b/changelog.d/12884.misc new file mode 100644 index 0000000000..56eead9472 --- /dev/null +++ b/changelog.d/12884.misc @@ -0,0 +1 @@ +Use constants for EDU types. diff --git a/changelog.d/12885.misc b/changelog.d/12885.misc new file mode 100644 index 0000000000..2524056307 --- /dev/null +++ b/changelog.d/12885.misc @@ -0,0 +1 @@ +Reduce database load of `/sync` when presence is enabled. diff --git a/changelog.d/12886.misc b/changelog.d/12886.misc new file mode 100644 index 0000000000..3dd08f74ba --- /dev/null +++ b/changelog.d/12886.misc @@ -0,0 +1 @@ +Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. diff --git a/changelog.d/12888.misc b/changelog.d/12888.misc new file mode 100644 index 0000000000..8ed2ea65b5 --- /dev/null +++ b/changelog.d/12888.misc @@ -0,0 +1 @@ +Refactor receipt linearization code. diff --git a/changelog.d/12889.bugfix b/changelog.d/12889.bugfix new file mode 100644 index 0000000000..582b2f0642 --- /dev/null +++ b/changelog.d/12889.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation. diff --git a/changelog.d/12894.misc b/changelog.d/12894.misc new file mode 100644 index 0000000000..646a62fccb --- /dev/null +++ b/changelog.d/12894.misc @@ -0,0 +1 @@ +Add type annotations to `synapse.logging.opentracing`. diff --git a/changelog.d/12895.removal b/changelog.d/12895.removal new file mode 100644 index 0000000000..41f6fae5da --- /dev/null +++ b/changelog.d/12895.removal @@ -0,0 +1 @@ +Remove support for the non-standard groups/communities feature from Synapse. diff --git a/changelog.d/12897.removal b/changelog.d/12897.removal new file mode 100644 index 0000000000..41f6fae5da --- /dev/null +++ b/changelog.d/12897.removal @@ -0,0 +1 @@ +Remove support for the non-standard groups/communities feature from Synapse. diff --git a/changelog.d/12900.removal b/changelog.d/12900.removal new file mode 100644 index 0000000000..41f6fae5da --- /dev/null +++ b/changelog.d/12900.removal @@ -0,0 +1 @@ +Remove support for the non-standard groups/communities feature from Synapse. diff --git a/changelog.d/12903.bugfix b/changelog.d/12903.bugfix new file mode 100644 index 0000000000..f264399483 --- /dev/null +++ b/changelog.d/12903.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. diff --git a/changelog.d/12904.misc b/changelog.d/12904.misc new file mode 100644 index 0000000000..afca32471f --- /dev/null +++ b/changelog.d/12904.misc @@ -0,0 +1 @@ +Pull out less state when handling gaps in room DAG. diff --git a/changelog.d/12908.removal b/changelog.d/12908.removal new file mode 100644 index 0000000000..a1d05d69e8 --- /dev/null +++ b/changelog.d/12908.removal @@ -0,0 +1 @@ +Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. diff --git a/changelog.d/12909.removal b/changelog.d/12909.removal new file mode 100644 index 0000000000..0baff46ea9 --- /dev/null +++ b/changelog.d/12909.removal @@ -0,0 +1 @@ +Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. diff --git a/changelog.d/12910.removal b/changelog.d/12910.removal new file mode 100644 index 0000000000..4bd4f877f6 --- /dev/null +++ b/changelog.d/12910.removal @@ -0,0 +1 @@ +Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. diff --git a/changelog.d/12911.removal b/changelog.d/12911.removal new file mode 100644 index 0000000000..5178cd6532 --- /dev/null +++ b/changelog.d/12911.removal @@ -0,0 +1 @@ +Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. diff --git a/changelog.d/12912.misc b/changelog.d/12912.misc new file mode 100644 index 0000000000..6396fd9d36 --- /dev/null +++ b/changelog.d/12912.misc @@ -0,0 +1 @@ +Bump types-jsonschema from 4.4.1 to 4.4.6. diff --git a/changelog.d/12913.misc b/changelog.d/12913.misc new file mode 100644 index 0000000000..a2bc940557 --- /dev/null +++ b/changelog.d/12913.misc @@ -0,0 +1 @@ +Rename storage classes. diff --git a/changelog.d/12914.misc b/changelog.d/12914.misc new file mode 100644 index 0000000000..07d819932a --- /dev/null +++ b/changelog.d/12914.misc @@ -0,0 +1 @@ +Preparation for database schema simplifications: stop reading from `event_edges.room_id`. diff --git a/changelog.d/12916.misc b/changelog.d/12916.misc new file mode 100644 index 0000000000..347eb096db --- /dev/null +++ b/changelog.d/12916.misc @@ -0,0 +1 @@ +Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. diff --git a/changelog.d/12917.feature b/changelog.d/12917.feature new file mode 100644 index 0000000000..b24489aaad --- /dev/null +++ b/changelog.d/12917.feature @@ -0,0 +1 @@ +Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. diff --git a/changelog.d/12923.feature b/changelog.d/12923.feature new file mode 100644 index 0000000000..815a1f10ea --- /dev/null +++ b/changelog.d/12923.feature @@ -0,0 +1 @@ +Support the new error code "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). \ No newline at end of file diff --git a/changelog.d/12925.misc b/changelog.d/12925.misc new file mode 100644 index 0000000000..71ca956dc5 --- /dev/null +++ b/changelog.d/12925.misc @@ -0,0 +1 @@ +Improve the logging when signature checks on events fail. diff --git a/contrib/experiments/cursesio.py b/contrib/experiments/cursesio.py deleted file mode 100644 index 7695cc77ca..0000000000 --- a/contrib/experiments/cursesio.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import curses -import curses.wrapper -from curses.ascii import isprint - -from twisted.internet import reactor - - -class CursesStdIO: - def __init__(self, stdscr, callback=None): - self.statusText = "Synapse test app -" - self.searchText = "" - self.stdscr = stdscr - - self.logLine = "" - - self.callback = callback - - self._setup() - - def _setup(self): - self.stdscr.nodelay(1) # Make non blocking - - self.rows, self.cols = self.stdscr.getmaxyx() - self.lines = [] - - curses.use_default_colors() - - self.paintStatus(self.statusText) - self.stdscr.refresh() - - def set_callback(self, callback): - self.callback = callback - - def fileno(self): - """We want to select on FD 0""" - return 0 - - def connectionLost(self, reason): - self.close() - - def print_line(self, text): - """add a line to the internal list of lines""" - - self.lines.append(text) - self.redraw() - - def print_log(self, text): - self.logLine = text - self.redraw() - - def redraw(self): - """method for redisplaying lines based on internal list of lines""" - - self.stdscr.clear() - self.paintStatus(self.statusText) - i = 0 - index = len(self.lines) - 1 - while i < (self.rows - 3) and index >= 0: - self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL) - i = i + 1 - index = index - 1 - - self.printLogLine(self.logLine) - - self.stdscr.refresh() - - def paintStatus(self, text): - if len(text) > self.cols: - raise RuntimeError("TextTooLongError") - - self.stdscr.addstr( - self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT - ) - - def printLogLine(self, text): - self.stdscr.addstr( - 0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT - ) - - def doRead(self): - """Input is ready!""" - curses.noecho() - c = self.stdscr.getch() # read a character - - if c == curses.KEY_BACKSPACE: - self.searchText = self.searchText[:-1] - - elif c == curses.KEY_ENTER or c == 10: - text = self.searchText - self.searchText = "" - - self.print_line(">> %s" % text) - - try: - if self.callback: - self.callback.on_line(text) - except Exception as e: - self.print_line(str(e)) - - self.stdscr.refresh() - - elif isprint(c): - if len(self.searchText) == self.cols - 2: - return - self.searchText = self.searchText + chr(c) - - self.stdscr.addstr( - self.rows - 1, - 0, - self.searchText + (" " * (self.cols - len(self.searchText) - 2)), - ) - - self.paintStatus(self.statusText + " %d" % len(self.searchText)) - self.stdscr.move(self.rows - 1, len(self.searchText)) - self.stdscr.refresh() - - def logPrefix(self): - return "CursesStdIO" - - def close(self): - """clean up""" - - curses.nocbreak() - self.stdscr.keypad(0) - curses.echo() - curses.endwin() - - -class Callback: - def __init__(self, stdio): - self.stdio = stdio - - def on_line(self, text): - self.stdio.print_line(text) - - -def main(stdscr): - screen = CursesStdIO(stdscr) # create Screen object - - callback = Callback(screen) - - screen.set_callback(callback) - - stdscr.refresh() - reactor.addReader(screen) - reactor.run() - screen.close() - - -if __name__ == "__main__": - curses.wrapper(main) diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py deleted file mode 100644 index 31b8a68225..0000000000 --- a/contrib/experiments/test_messaging.py +++ /dev/null @@ -1,367 +0,0 @@ -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" This is an example of using the server to server implementation to do a -basic chat style thing. It accepts commands from stdin and outputs to stdout. - -It assumes that ucids are of the form @, and uses as -the address of the remote home server to hit. - -Usage: - python test_messaging.py - -Currently assumes the local address is localhost: - -""" - - -import argparse -import curses.wrapper -import json -import logging -import os -import re - -import cursesio - -from twisted.internet import defer, reactor -from twisted.python import log - -from synapse.app.homeserver import SynapseHomeServer -from synapse.federation import ReplicationHandler -from synapse.federation.units import Pdu -from synapse.util import origin_from_ucid - -# from synapse.logging.utils import log_function - - -logger = logging.getLogger("example") - - -def excpetion_errback(failure): - logging.exception(failure) - - -class InputOutput: - """This is responsible for basic I/O so that a user can interact with - the example app. - """ - - def __init__(self, screen, user): - self.screen = screen - self.user = user - - def set_home_server(self, server): - self.server = server - - def on_line(self, line): - """This is where we process commands.""" - - try: - m = re.match(r"^join (\S+)$", line) - if m: - # The `sender` wants to join a room. - (room_name,) = m.groups() - self.print_line("%s joining %s" % (self.user, room_name)) - self.server.join_room(room_name, self.user, self.user) - # self.print_line("OK.") - return - - m = re.match(r"^invite (\S+) (\S+)$", line) - if m: - # `sender` wants to invite someone to a room - room_name, invitee = m.groups() - self.print_line("%s invited to %s" % (invitee, room_name)) - self.server.invite_to_room(room_name, self.user, invitee) - # self.print_line("OK.") - return - - m = re.match(r"^send (\S+) (.*)$", line) - if m: - # `sender` wants to message a room - room_name, body = m.groups() - self.print_line("%s send to %s" % (self.user, room_name)) - self.server.send_message(room_name, self.user, body) - # self.print_line("OK.") - return - - m = re.match(r"^backfill (\S+)$", line) - if m: - # we want to backfill a room - (room_name,) = m.groups() - self.print_line("backfill %s" % room_name) - self.server.backfill(room_name) - return - - self.print_line("Unrecognized command") - - except Exception as e: - logger.exception(e) - - def print_line(self, text): - self.screen.print_line(text) - - def print_log(self, text): - self.screen.print_log(text) - - -class IOLoggerHandler(logging.Handler): - def __init__(self, io): - logging.Handler.__init__(self) - self.io = io - - def emit(self, record): - if record.levelno < logging.WARN: - return - - msg = self.format(record) - self.io.print_log(msg) - - -class Room: - """Used to store (in memory) the current membership state of a room, and - which home servers we should send PDUs associated with the room to. - """ - - def __init__(self, room_name): - self.room_name = room_name - self.invited = set() - self.participants = set() - self.servers = set() - - self.oldest_server = None - - self.have_got_metadata = False - - def add_participant(self, participant): - """Someone has joined the room""" - self.participants.add(participant) - self.invited.discard(participant) - - server = origin_from_ucid(participant) - self.servers.add(server) - - if not self.oldest_server: - self.oldest_server = server - - def add_invited(self, invitee): - """Someone has been invited to the room""" - self.invited.add(invitee) - self.servers.add(origin_from_ucid(invitee)) - - -class HomeServer(ReplicationHandler): - """A very basic home server implentation that allows people to join a - room and then invite other people. - """ - - def __init__(self, server_name, replication_layer, output): - self.server_name = server_name - self.replication_layer = replication_layer - self.replication_layer.set_handler(self) - - self.joined_rooms = {} - - self.output = output - - def on_receive_pdu(self, pdu): - """We just received a PDU""" - pdu_type = pdu.pdu_type - - if pdu_type == "sy.room.message": - self._on_message(pdu) - elif pdu_type == "sy.room.member" and "membership" in pdu.content: - if pdu.content["membership"] == "join": - self._on_join(pdu.context, pdu.state_key) - elif pdu.content["membership"] == "invite": - self._on_invite(pdu.origin, pdu.context, pdu.state_key) - else: - self.output.print_line( - "#%s (unrec) %s = %s" - % (pdu.context, pdu.pdu_type, json.dumps(pdu.content)) - ) - - def _on_message(self, pdu): - """We received a message""" - self.output.print_line( - "#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"]) - ) - - def _on_join(self, context, joinee): - """Someone has joined a room, either a remote user or a local user""" - room = self._get_or_create_room(context) - room.add_participant(joinee) - - self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED")) - - def _on_invite(self, origin, context, invitee): - """Someone has been invited""" - room = self._get_or_create_room(context) - room.add_invited(invitee) - - self.output.print_line("#%s %s %s" % (context, invitee, "*** INVITED")) - - if not room.have_got_metadata and origin is not self.server_name: - logger.debug("Get room state") - self.replication_layer.get_state_for_context(origin, context) - room.have_got_metadata = True - - @defer.inlineCallbacks - def send_message(self, room_name, sender, body): - """Send a message to a room!""" - destinations = yield self.get_servers_for_context(room_name) - - try: - yield self.replication_layer.send_pdu( - Pdu.create_new( - context=room_name, - pdu_type="sy.room.message", - content={"sender": sender, "body": body}, - origin=self.server_name, - destinations=destinations, - ) - ) - except Exception as e: - logger.exception(e) - - @defer.inlineCallbacks - def join_room(self, room_name, sender, joinee): - """Join a room!""" - self._on_join(room_name, joinee) - - destinations = yield self.get_servers_for_context(room_name) - - try: - pdu = Pdu.create_new( - context=room_name, - pdu_type="sy.room.member", - is_state=True, - state_key=joinee, - content={"membership": "join"}, - origin=self.server_name, - destinations=destinations, - ) - yield self.replication_layer.send_pdu(pdu) - except Exception as e: - logger.exception(e) - - @defer.inlineCallbacks - def invite_to_room(self, room_name, sender, invitee): - """Invite someone to a room!""" - self._on_invite(self.server_name, room_name, invitee) - - destinations = yield self.get_servers_for_context(room_name) - - try: - yield self.replication_layer.send_pdu( - Pdu.create_new( - context=room_name, - is_state=True, - pdu_type="sy.room.member", - state_key=invitee, - content={"membership": "invite"}, - origin=self.server_name, - destinations=destinations, - ) - ) - except Exception as e: - logger.exception(e) - - def backfill(self, room_name, limit=5): - room = self.joined_rooms.get(room_name) - - if not room: - return - - dest = room.oldest_server - - return self.replication_layer.backfill(dest, room_name, limit) - - def _get_room_remote_servers(self, room_name): - return list(self.joined_rooms.setdefault(room_name).servers) - - def _get_or_create_room(self, room_name): - return self.joined_rooms.setdefault(room_name, Room(room_name)) - - def get_servers_for_context(self, context): - return defer.succeed( - self.joined_rooms.setdefault(context, Room(context)).servers - ) - - -def main(stdscr): - parser = argparse.ArgumentParser() - parser.add_argument("user", type=str) - parser.add_argument("-v", "--verbose", action="count") - args = parser.parse_args() - - user = args.user - server_name = origin_from_ucid(user) - - # Set up logging - - root_logger = logging.getLogger() - - formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s" - ) - if not os.path.exists("logs"): - os.makedirs("logs") - fh = logging.FileHandler("logs/%s" % user) - fh.setFormatter(formatter) - - root_logger.addHandler(fh) - root_logger.setLevel(logging.DEBUG) - - # Hack: The only way to get it to stop logging to sys.stderr :( - log.theLogPublisher.observers = [] - observer = log.PythonLoggingObserver() - observer.start() - - # Set up synapse server - - curses_stdio = cursesio.CursesStdIO(stdscr) - input_output = InputOutput(curses_stdio, user) - - curses_stdio.set_callback(input_output) - - app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user) - replication = app_hs.get_replication_layer() - - hs = HomeServer(server_name, replication, curses_stdio) - - input_output.set_home_server(hs) - - # Add input_output logger - io_logger = IOLoggerHandler(input_output) - io_logger.setFormatter(formatter) - root_logger.addHandler(io_logger) - - # Start! - - try: - port = int(server_name.split(":")[1]) - except Exception: - port = 12345 - - app_hs.get_http_server().start_listening(port) - - reactor.addReader(curses_stdio) - - reactor.run() - - -if __name__ == "__main__": - curses.wrapper(main) diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py deleted file mode 100644 index b3de468687..0000000000 --- a/contrib/jitsimeetbridge/jitsimeetbridge.py +++ /dev/null @@ -1,298 +0,0 @@ -#!/usr/bin/env python - -""" -This is an attempt at bridging matrix clients into a Jitis meet room via Matrix -video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one -of the streams from the Jitsi bridge until the second lot of SDP comes down and -we set the remote SDP at which point the stream ends. Our video never gets to -the bridge. - -Requires: -npm install jquery jsdom -""" -import json -import subprocess -import time - -import gevent -import grequests -from BeautifulSoup import BeautifulSoup - -ACCESS_TOKEN = "" - -MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/" -MYUSERNAME = "@davetest:matrix.org" - -HTTPBIND = "https://meet.jit.si/http-bind" -# HTTPBIND = 'https://jitsi.vuc.me/http-bind' -# ROOMNAME = "matrix" -ROOMNAME = "pibble" - -HOST = "guest.jit.si" -# HOST="jitsi.vuc.me" - -TURNSERVER = "turn.guest.jit.si" -# TURNSERVER="turn.jitsi.vuc.me" - -ROOMDOMAIN = "meet.jit.si" -# ROOMDOMAIN="conference.jitsi.vuc.me" - - -class TrivialMatrixClient: - def __init__(self, access_token): - self.token = None - self.access_token = access_token - - def getEvent(self): - while True: - url = ( - MATRIXBASE - + "events?access_token=" - + self.access_token - + "&timeout=60000" - ) - if self.token: - url += "&from=" + self.token - req = grequests.get(url) - resps = grequests.map([req]) - obj = json.loads(resps[0].content) - print("incoming from matrix", obj) - if "end" not in obj: - continue - self.token = obj["end"] - if len(obj["chunk"]): - return obj["chunk"][0] - - def joinRoom(self, roomId): - url = MATRIXBASE + "rooms/" + roomId + "/join?access_token=" + self.access_token - print(url) - headers = {"Content-Type": "application/json"} - req = grequests.post(url, headers=headers, data="{}") - resps = grequests.map([req]) - obj = json.loads(resps[0].content) - print("response: ", obj) - - def sendEvent(self, roomId, evType, event): - url = ( - MATRIXBASE - + "rooms/" - + roomId - + "/send/" - + evType - + "?access_token=" - + self.access_token - ) - print(url) - print(json.dumps(event)) - headers = {"Content-Type": "application/json"} - req = grequests.post(url, headers=headers, data=json.dumps(event)) - resps = grequests.map([req]) - obj = json.loads(resps[0].content) - print("response: ", obj) - - -xmppClients = {} - - -def matrixLoop(): - while True: - ev = matrixCli.getEvent() - print(ev) - if ev["type"] == "m.room.member": - print("membership event") - if ev["membership"] == "invite" and ev["state_key"] == MYUSERNAME: - roomId = ev["room_id"] - print("joining room %s" % (roomId)) - matrixCli.joinRoom(roomId) - elif ev["type"] == "m.room.message": - if ev["room_id"] in xmppClients: - print("already have a bridge for that user, ignoring") - continue - print("got message, connecting") - xmppClients[ev["room_id"]] = TrivialXmppClient(ev["room_id"], ev["user_id"]) - gevent.spawn(xmppClients[ev["room_id"]].xmppLoop) - elif ev["type"] == "m.call.invite": - print("Incoming call") - # sdp = ev['content']['offer']['sdp'] - # print "sdp: %s" % (sdp) - # xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id']) - # gevent.spawn(xmppClients[ev['room_id']].xmppLoop) - elif ev["type"] == "m.call.answer": - print("Call answered") - sdp = ev["content"]["answer"]["sdp"] - if ev["room_id"] not in xmppClients: - print("We didn't have a call for that room") - continue - # should probably check call ID too - xmppCli = xmppClients[ev["room_id"]] - xmppCli.sendAnswer(sdp) - elif ev["type"] == "m.call.hangup": - if ev["room_id"] in xmppClients: - xmppClients[ev["room_id"]].stop() - del xmppClients[ev["room_id"]] - - -class TrivialXmppClient: - def __init__(self, matrixRoom, userId): - self.rid = 0 - self.matrixRoom = matrixRoom - self.userId = userId - self.running = True - - def stop(self): - self.running = False - - def nextRid(self): - self.rid += 1 - return "%d" % (self.rid) - - def sendIq(self, xml): - fullXml = ( - "%s" - % (self.nextRid(), self.sid, xml) - ) - # print "\t>>>%s" % (fullXml) - return self.xmppPoke(fullXml) - - def xmppPoke(self, xml): - headers = {"Content-Type": "application/xml"} - req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml) - resps = grequests.map([req]) - obj = BeautifulSoup(resps[0].content) - return obj - - def sendAnswer(self, answer): - print("sdp from matrix client", answer) - p = subprocess.Popen( - ["node", "unjingle/unjingle.js", "--sdp"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - ) - jingle, out_err = p.communicate(answer) - jingle = jingle % { - "tojid": self.callfrom, - "action": "session-accept", - "initiator": self.callfrom, - "responder": self.jid, - "sid": self.callsid, - } - print("answer jingle from sdp", jingle) - res = self.sendIq(jingle) - print("reply from answer: ", res) - - self.ssrcs = {} - jingleSoup = BeautifulSoup(jingle) - for cont in jingleSoup.iq.jingle.findAll("content"): - if cont.description: - self.ssrcs[cont["name"]] = cont.description["ssrc"] - print("my ssrcs:", self.ssrcs) - - gevent.joinall([gevent.spawn(self.advertiseSsrcs)]) - - def advertiseSsrcs(self): - time.sleep(7) - print("SSRC spammer started") - while self.running: - ssrcMsg = ( - "%(nick)s" - % { - "tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), - "nick": self.userId, - "assrc": self.ssrcs["audio"], - "vssrc": self.ssrcs["video"], - } - ) - res = self.sendIq(ssrcMsg) - print("reply from ssrc announce: ", res) - time.sleep(10) - - def xmppLoop(self): - self.matrixCallId = time.time() - res = self.xmppPoke( - "" - % (self.nextRid(), HOST) - ) - - print(res) - self.sid = res.body["sid"] - print("sid %s" % (self.sid)) - - res = self.sendIq( - "" - ) - - res = self.xmppPoke( - "" - % (self.nextRid(), self.sid, HOST) - ) - - res = self.sendIq( - "" - ) - print(res) - - self.jid = res.body.iq.bind.jid.string - print("jid: %s" % (self.jid)) - self.shortJid = self.jid.split("-")[0] - - res = self.sendIq( - "" - ) - - # randomthing = res.body.iq['to'] - # whatsitpart = randomthing.split('-')[0] - - # print "other random bind thing: %s" % (randomthing) - - # advertise preence to the jitsi room, with our nick - res = self.sendIq( - "%s" - % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId) - ) - self.muc = {"users": []} - for p in res.body.findAll("presence"): - u = {} - u["shortJid"] = p["from"].split("/")[1] - if p.c and p.c.nick: - u["nick"] = p.c.nick.string - self.muc["users"].append(u) - print("muc: ", self.muc) - - # wait for stuff - while True: - print("waiting...") - res = self.sendIq("") - print("got from stream: ", res) - if res.body.iq: - jingles = res.body.iq.findAll("jingle") - if len(jingles): - self.callfrom = res.body.iq["from"] - self.handleInvite(jingles[0]) - elif "type" in res.body and res.body["type"] == "terminate": - self.running = False - del xmppClients[self.matrixRoom] - return - - def handleInvite(self, jingle): - self.initiator = jingle["initiator"] - self.callsid = jingle["sid"] - p = subprocess.Popen( - ["node", "unjingle/unjingle.js", "--jingle"], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - ) - print("raw jingle invite", str(jingle)) - sdp, out_err = p.communicate(str(jingle)) - print("transformed remote offer sdp", sdp) - inviteEvent = { - "offer": {"type": "offer", "sdp": sdp}, - "call_id": self.matrixCallId, - "version": 0, - "lifetime": 30000, - } - matrixCli.sendEvent(self.matrixRoom, "m.call.invite", inviteEvent) - - -matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name - -gevent.joinall([gevent.spawn(matrixLoop)]) diff --git a/contrib/jitsimeetbridge/syweb-jitsi-conference.patch b/contrib/jitsimeetbridge/syweb-jitsi-conference.patch deleted file mode 100644 index aed23c78aa..0000000000 --- a/contrib/jitsimeetbridge/syweb-jitsi-conference.patch +++ /dev/null @@ -1,188 +0,0 @@ -diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js -index 9fbfff0..dc68077 100644 ---- a/syweb/webclient/app/components/matrix/matrix-call.js -+++ b/syweb/webclient/app/components/matrix/matrix-call.js -@@ -16,6 +16,45 @@ limitations under the License. - - 'use strict'; - -+ -+function sendKeyframe(pc) { -+ console.log('sendkeyframe', pc.iceConnectionState); -+ if (pc.iceConnectionState !== 'connected') return; // safe... -+ pc.setRemoteDescription( -+ pc.remoteDescription, -+ function () { -+ pc.createAnswer( -+ function (modifiedAnswer) { -+ pc.setLocalDescription( -+ modifiedAnswer, -+ function () { -+ // noop -+ }, -+ function (error) { -+ console.log('triggerKeyframe setLocalDescription failed', error); -+ messageHandler.showError(); -+ } -+ ); -+ }, -+ function (error) { -+ console.log('triggerKeyframe createAnswer failed', error); -+ messageHandler.showError(); -+ } -+ ); -+ }, -+ function (error) { -+ console.log('triggerKeyframe setRemoteDescription failed', error); -+ messageHandler.showError(); -+ } -+ ); -+} -+ -+ -+ -+ -+ -+ -+ - var forAllVideoTracksOnStream = function(s, f) { - var tracks = s.getVideoTracks(); - for (var i = 0; i < tracks.length; i++) { -@@ -83,7 +122,7 @@ angular.module('MatrixCall', []) - } - - // FIXME: we should prevent any calls from being placed or accepted before this has finished -- MatrixCall.getTurnServer(); -+ //MatrixCall.getTurnServer(); - - MatrixCall.CALL_TIMEOUT = 60000; - MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302'; -@@ -132,6 +171,22 @@ angular.module('MatrixCall', []) - pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); }; - pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); }; - pc.onaddstream = function(s) { self.onAddStream(s); }; -+ -+ var datachan = pc.createDataChannel('RTCDataChannel', { -+ reliable: false -+ }); -+ console.log("data chan: "+datachan); -+ datachan.onopen = function() { -+ console.log("data channel open"); -+ }; -+ datachan.onmessage = function() { -+ console.log("data channel message"); -+ }; -+ pc.ondatachannel = function(event) { -+ console.log("have data channel"); -+ event.channel.binaryType = 'blob'; -+ }; -+ - return pc; - } - -@@ -200,6 +255,12 @@ angular.module('MatrixCall', []) - }, this.msg.lifetime - event.age); - }; - -+ MatrixCall.prototype.receivedInvite = function(event) { -+ console.log("Got second invite for call "+this.call_id); -+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError); -+ }; -+ -+ - // perverse as it may seem, sometimes we want to instantiate a call with a hangup message - // (because when getting the state of the room on load, events come in reverse order and - // we want to remember that a call has been hung up) -@@ -349,7 +410,7 @@ angular.module('MatrixCall', []) - 'mandatory': { - 'OfferToReceiveAudio': true, - 'OfferToReceiveVideo': this.type == 'video' -- }, -+ } - }; - this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints); - // This can't be in an apply() because it's called by a predecessor call under glare conditions :( -@@ -359,8 +420,20 @@ angular.module('MatrixCall', []) - MatrixCall.prototype.gotLocalIceCandidate = function(event) { - if (event.candidate) { - console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate); -- this.sendCandidate(event.candidate); -- } -+ //this.sendCandidate(event.candidate); -+ } else { -+ console.log("have all candidates, sending answer"); -+ var content = { -+ version: 0, -+ call_id: this.call_id, -+ answer: this.peerConn.localDescription -+ }; -+ this.sendEventWithRetry('m.call.answer', content); -+ var self = this; -+ $rootScope.$apply(function() { -+ self.state = 'connecting'; -+ }); -+ } - } - - MatrixCall.prototype.gotRemoteIceCandidate = function(cand) { -@@ -418,15 +491,6 @@ angular.module('MatrixCall', []) - console.log("Created answer: "+description); - var self = this; - this.peerConn.setLocalDescription(description, function() { -- var content = { -- version: 0, -- call_id: self.call_id, -- answer: self.peerConn.localDescription -- }; -- self.sendEventWithRetry('m.call.answer', content); -- $rootScope.$apply(function() { -- self.state = 'connecting'; -- }); - }, function() { console.log("Error setting local description!"); } ); - }; - -@@ -448,6 +512,9 @@ angular.module('MatrixCall', []) - $rootScope.$apply(function() { - self.state = 'connected'; - self.didConnect = true; -+ /*$timeout(function() { -+ sendKeyframe(self.peerConn); -+ }, 1000);*/ - }); - } else if (this.peerConn.iceConnectionState == 'failed') { - this.hangup('ice_failed'); -@@ -518,6 +585,7 @@ angular.module('MatrixCall', []) - - MatrixCall.prototype.onRemoteStreamEnded = function(event) { - console.log("Remote stream ended"); -+ return; - var self = this; - $rootScope.$apply(function() { - self.state = 'ended'; -diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js -index 55dbbf5..272fa27 100644 ---- a/syweb/webclient/app/components/matrix/matrix-phone-service.js -+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js -@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', []) - return; - } - -+ // do we already have an entry for this call ID? -+ var existingEntry = matrixPhoneService.allCalls[msg.call_id]; -+ if (existingEntry) { -+ existingEntry.receivedInvite(msg); -+ return; -+ } -+ - var call = undefined; - if (!isLive) { - // if this event wasn't live then this call may already be over -@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', []) - call.hangup(); - } - } else { -- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call); -+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call); - } - } else if (event.type == 'm.call.answer') { - var call = matrixPhoneService.allCalls[msg.call_id]; diff --git a/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js b/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js deleted file mode 100644 index e99dd7bf96..0000000000 --- a/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.js +++ /dev/null @@ -1,712 +0,0 @@ -/* jshint -W117 */ -// SDP STUFF -function SDP(sdp) { - this.media = sdp.split('\r\nm='); - for (var i = 1; i < this.media.length; i++) { - this.media[i] = 'm=' + this.media[i]; - if (i != this.media.length - 1) { - this.media[i] += '\r\n'; - } - } - this.session = this.media.shift() + '\r\n'; - this.raw = this.session + this.media.join(''); -} - -exports.SDP = SDP; - -var jsdom = require("jsdom"); -var window = jsdom.jsdom().parentWindow; -var $ = require('jquery')(window); - -var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil; - -/** - * Returns map of MediaChannel mapped per channel idx. - */ -SDP.prototype.getMediaSsrcMap = function() { - var self = this; - var media_ssrcs = {}; - for (channelNum = 0; channelNum < self.media.length; channelNum++) { - modified = true; - tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:'); - var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:')); - var channel = new MediaChannel(channelNum, type); - media_ssrcs[channelNum] = channel; - tmp.forEach(function (line) { - var linessrc = line.substring(7).split(' ')[0]; - // allocate new ChannelSsrc - if(!channel.ssrcs[linessrc]) { - channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type); - } - channel.ssrcs[linessrc].lines.push(line); - }); - tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:'); - tmp.forEach(function(line){ - var semantics = line.substr(0, idx).substr(13); - var ssrcs = line.substr(14 + semantics.length).split(' '); - if (ssrcs.length != 0) { - var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs); - channel.ssrcGroups.push(ssrcGroup); - } - }); - } - return media_ssrcs; -}; -/** - * Returns true if this SDP contains given SSRC. - * @param ssrc the ssrc to check. - * @returns {boolean} true if this SDP contains given SSRC. - */ -SDP.prototype.containsSSRC = function(ssrc) { - var channels = this.getMediaSsrcMap(); - var contains = false; - Object.keys(channels).forEach(function(chNumber){ - var channel = channels[chNumber]; - //console.log("Check", channel, ssrc); - if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){ - contains = true; - } - }); - return contains; -}; - -/** - * Returns map of MediaChannel that contains only media not contained in otherSdp. Mapped by channel idx. - * @param otherSdp the other SDP to check ssrc with. - */ -SDP.prototype.getNewMedia = function(otherSdp) { - - // this could be useful in Array.prototype. - function arrayEquals(array) { - // if the other array is a falsy value, return - if (!array) - return false; - - // compare lengths - can save a lot of time - if (this.length != array.length) - return false; - - for (var i = 0, l=this.length; i < l; i++) { - // Check if we have nested arrays - if (this[i] instanceof Array && array[i] instanceof Array) { - // recurse into the nested arrays - if (!this[i].equals(array[i])) - return false; - } - else if (this[i] != array[i]) { - // Warning - two different object instances will never be equal: {x:20} != {x:20} - return false; - } - } - return true; - } - - var myMedia = this.getMediaSsrcMap(); - var othersMedia = otherSdp.getMediaSsrcMap(); - var newMedia = {}; - Object.keys(othersMedia).forEach(function(channelNum) { - var myChannel = myMedia[channelNum]; - var othersChannel = othersMedia[channelNum]; - if(!myChannel && othersChannel) { - // Add whole channel - newMedia[channelNum] = othersChannel; - return; - } - // Look for new ssrcs accross the channel - Object.keys(othersChannel.ssrcs).forEach(function(ssrc) { - if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) { - // Allocate channel if we've found ssrc that doesn't exist in our channel - if(!newMedia[channelNum]){ - newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType); - } - newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc]; - } - }); - - // Look for new ssrc groups across the channels - othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){ - - // try to match the other ssrc-group with an ssrc-group of ours - var matched = false; - for (var i = 0; i < myChannel.ssrcGroups.length; i++) { - var mySsrcGroup = myChannel.ssrcGroups[i]; - if (otherSsrcGroup.semantics == mySsrcGroup.semantics - && arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) { - - matched = true; - break; - } - } - - if (!matched) { - // Allocate channel if we've found an ssrc-group that doesn't - // exist in our channel - - if(!newMedia[channelNum]){ - newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType); - } - newMedia[channelNum].ssrcGroups.push(otherSsrcGroup); - } - }); - }); - return newMedia; -}; - -// remove iSAC and CN from SDP -SDP.prototype.mangle = function () { - var i, j, mline, lines, rtpmap, newdesc; - for (i = 0; i < this.media.length; i++) { - lines = this.media[i].split('\r\n'); - lines.pop(); // remove empty last element - mline = SDPUtil.parse_mline(lines.shift()); - if (mline.media != 'audio') - continue; - newdesc = ''; - mline.fmt.length = 0; - for (j = 0; j < lines.length; j++) { - if (lines[j].substr(0, 9) == 'a=rtpmap:') { - rtpmap = SDPUtil.parse_rtpmap(lines[j]); - if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC') - continue; - mline.fmt.push(rtpmap.id); - newdesc += lines[j] + '\r\n'; - } else { - newdesc += lines[j] + '\r\n'; - } - } - this.media[i] = SDPUtil.build_mline(mline) + '\r\n'; - this.media[i] += newdesc; - } - this.raw = this.session + this.media.join(''); -}; - -// remove lines matching prefix from session section -SDP.prototype.removeSessionLines = function(prefix) { - var self = this; - var lines = SDPUtil.find_lines(this.session, prefix); - lines.forEach(function(line) { - self.session = self.session.replace(line + '\r\n', ''); - }); - this.raw = this.session + this.media.join(''); - return lines; -} -// remove lines matching prefix from a media section specified by mediaindex -// TODO: non-numeric mediaindex could match mid -SDP.prototype.removeMediaLines = function(mediaindex, prefix) { - var self = this; - var lines = SDPUtil.find_lines(this.media[mediaindex], prefix); - lines.forEach(function(line) { - self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', ''); - }); - this.raw = this.session + this.media.join(''); - return lines; -} - -// add content's to a jingle element -SDP.prototype.toJingle = function (elem, thecreator) { - var i, j, k, mline, ssrc, rtpmap, tmp, line, lines; - var self = this; - // new bundle plan - if (SDPUtil.find_line(this.session, 'a=group:')) { - lines = SDPUtil.find_lines(this.session, 'a=group:'); - for (i = 0; i < lines.length; i++) { - tmp = lines[i].split(' '); - var semantics = tmp.shift().substr(8); - elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics}); - for (j = 0; j < tmp.length; j++) { - elem.c('content', {name: tmp[j]}).up(); - } - elem.up(); - } - } - // old bundle plan, to be removed - var bundle = []; - if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) { - bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' '); - bundle.shift(); - } - for (i = 0; i < this.media.length; i++) { - mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]); - if (!(mline.media === 'audio' || - mline.media === 'video' || - mline.media === 'application')) - { - continue; - } - if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) { - ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first - } else { - ssrc = false; - } - - elem.c('content', {creator: thecreator, name: mline.media}); - if (SDPUtil.find_line(this.media[i], 'a=mid:')) { - // prefer identifier from a=mid if present - var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:')); - elem.attrs({ name: mid }); - - // old BUNDLE plan, to be removed - if (bundle.indexOf(mid) !== -1) { - elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up(); - bundle.splice(bundle.indexOf(mid), 1); - } - } - - if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length) - { - elem.c('description', - {xmlns: 'urn:xmpp:jingle:apps:rtp:1', - media: mline.media }); - if (ssrc) { - elem.attrs({ssrc: ssrc}); - } - for (j = 0; j < mline.fmt.length; j++) { - rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]); - elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap)); - // put any 'a=fmtp:' + mline.fmt[j] lines into - if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) { - tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])); - for (k = 0; k < tmp.length; k++) { - elem.c('parameter', tmp[k]).up(); - } - } - this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb - - elem.up(); - } - if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) { - elem.c('encryption', {required: 1}); - var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session); - crypto.forEach(function(line) { - elem.c('crypto', SDPUtil.parse_crypto(line)).up(); - }); - elem.up(); // end of encryption - } - - if (ssrc) { - // new style mapping - elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' }); - // FIXME: group by ssrc and support multiple different ssrcs - var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:'); - ssrclines.forEach(function(line) { - idx = line.indexOf(' '); - var linessrc = line.substr(0, idx).substr(7); - if (linessrc != ssrc) { - elem.up(); - ssrc = linessrc; - elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' }); - } - var kv = line.substr(idx + 1); - elem.c('parameter'); - if (kv.indexOf(':') == -1) { - elem.attrs({ name: kv }); - } else { - elem.attrs({ name: kv.split(':', 2)[0] }); - elem.attrs({ value: kv.split(':', 2)[1] }); - } - elem.up(); - }); - elem.up(); - - // old proprietary mapping, to be removed at some point - tmp = SDPUtil.parse_ssrc(this.media[i]); - tmp.xmlns = 'http://estos.de/ns/ssrc'; - tmp.ssrc = ssrc; - elem.c('ssrc', tmp).up(); // ssrc is part of description - - // XEP-0339 handle ssrc-group attributes - var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:'); - ssrc_group_lines.forEach(function(line) { - idx = line.indexOf(' '); - var semantics = line.substr(0, idx).substr(13); - var ssrcs = line.substr(14 + semantics.length).split(' '); - if (ssrcs.length != 0) { - elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' }); - ssrcs.forEach(function(ssrc) { - elem.c('source', { ssrc: ssrc }) - .up(); - }); - elem.up(); - } - }); - } - - if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) { - elem.c('rtcp-mux').up(); - } - - // XEP-0293 -- map a=rtcp-fb:* - this.RtcpFbToJingle(i, elem, '*'); - - // XEP-0294 - if (SDPUtil.find_line(this.media[i], 'a=extmap:')) { - lines = SDPUtil.find_lines(this.media[i], 'a=extmap:'); - for (j = 0; j < lines.length; j++) { - tmp = SDPUtil.parse_extmap(lines[j]); - elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0', - uri: tmp.uri, - id: tmp.value }); - if (tmp.hasOwnProperty('direction')) { - switch (tmp.direction) { - case 'sendonly': - elem.attrs({senders: 'responder'}); - break; - case 'recvonly': - elem.attrs({senders: 'initiator'}); - break; - case 'sendrecv': - elem.attrs({senders: 'both'}); - break; - case 'inactive': - elem.attrs({senders: 'none'}); - break; - } - } - // TODO: handle params - elem.up(); - } - } - elem.up(); // end of description - } - - // map ice-ufrag/pwd, dtls fingerprint, candidates - this.TransportToJingle(i, elem); - - if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) { - elem.attrs({senders: 'both'}); - } else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) { - elem.attrs({senders: 'initiator'}); - } else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) { - elem.attrs({senders: 'responder'}); - } else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) { - elem.attrs({senders: 'none'}); - } - if (mline.port == '0') { - // estos hack to reject an m-line - elem.attrs({senders: 'rejected'}); - } - elem.up(); // end of content - } - elem.up(); - return elem; -}; - -SDP.prototype.TransportToJingle = function (mediaindex, elem) { - var i = mediaindex; - var tmp; - var self = this; - elem.c('transport'); - - // XEP-0343 DTLS/SCTP - if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length) - { - var sctpmap = SDPUtil.find_line( - this.media[i], 'a=sctpmap:', self.session); - if (sctpmap) - { - var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap); - elem.c('sctpmap', - { - xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1', - number: sctpAttrs[0], /* SCTP port */ - protocol: sctpAttrs[1], /* protocol */ - }); - // Optional stream count attribute - if (sctpAttrs.length > 2) - elem.attrs({ streams: sctpAttrs[2]}); - elem.up(); - } - } - // XEP-0320 - var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session); - fingerprints.forEach(function(line) { - tmp = SDPUtil.parse_fingerprint(line); - tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0'; - elem.c('fingerprint').t(tmp.fingerprint); - delete tmp.fingerprint; - line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session); - if (line) { - tmp.setup = line.substr(8); - } - elem.attrs(tmp); - elem.up(); // end of fingerprint - }); - tmp = SDPUtil.iceparams(this.media[mediaindex], this.session); - if (tmp) { - tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1'; - elem.attrs(tmp); - // XEP-0176 - if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines - var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session); - lines.forEach(function (line) { - elem.c('candidate', SDPUtil.candidateToJingle(line)).up(); - }); - } - } - elem.up(); // end of transport -} - -SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293 - var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype); - lines.forEach(function (line) { - var tmp = SDPUtil.parse_rtcpfb(line); - if (tmp.type == 'trr-int') { - elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]}); - elem.up(); - } else { - elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type}); - if (tmp.params.length > 0) { - elem.attrs({'subtype': tmp.params[0]}); - } - elem.up(); - } - }); -}; - -SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293 - var media = ''; - var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]'); - if (tmp.length) { - media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' '; - if (tmp.attr('value')) { - media += tmp.attr('value'); - } else { - media += '0'; - } - media += '\r\n'; - } - tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]'); - tmp.each(function () { - media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type'); - if ($(this).attr('subtype')) { - media += ' ' + $(this).attr('subtype'); - } - media += '\r\n'; - }); - return media; -}; - -// construct an SDP from a jingle stanza -SDP.prototype.fromJingle = function (jingle) { - var self = this; - this.raw = 'v=0\r\n' + - 'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME - 's=-\r\n' + - 't=0 0\r\n'; - // http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8 - if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) { - $(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) { - var contents = $(group).find('>content').map(function (idx, content) { - return content.getAttribute('name'); - }).get(); - if (contents.length > 0) { - self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n'; - } - }); - } else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) { - // temporary namespace, not to be used. to be removed soon. - $(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) { - var contents = $(group).find('>content').map(function (idx, content) { - return content.getAttribute('name'); - }).get(); - if (group.getAttribute('type') !== null && contents.length > 0) { - self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n'; - } - }); - } else { - // for backward compability, to be removed soon - // assume all contents are in the same bundle group, can be improved upon later - var bundle = $(jingle).find('>content').filter(function (idx, content) { - //elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'}); - return $(content).find('>bundle').length > 0; - }).map(function (idx, content) { - return content.getAttribute('name'); - }).get(); - if (bundle.length) { - this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n'; - } - } - - this.session = this.raw; - jingle.find('>content').each(function () { - var m = self.jingle2media($(this)); - self.media.push(m); - }); - - // reconstruct msid-semantic -- apparently not necessary - /* - var msid = SDPUtil.parse_ssrc(this.raw); - if (msid.hasOwnProperty('mslabel')) { - this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n"; - } - */ - - this.raw = this.session + this.media.join(''); -}; - -// translate a jingle content element into an an SDP media part -SDP.prototype.jingle2media = function (content) { - var media = '', - desc = content.find('description'), - ssrc = desc.attr('ssrc'), - self = this, - tmp; - var sctp = content.find( - '>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]'); - - tmp = { media: desc.attr('media') }; - tmp.port = '1'; - if (content.attr('senders') == 'rejected') { - // estos hack to reject an m-line. - tmp.port = '0'; - } - if (content.find('>transport>fingerprint').length || desc.find('encryption').length) { - if (sctp.length) - tmp.proto = 'DTLS/SCTP'; - else - tmp.proto = 'RTP/SAVPF'; - } else { - tmp.proto = 'RTP/AVPF'; - } - if (!sctp.length) - { - tmp.fmt = desc.find('payload-type').map( - function () { return this.getAttribute('id'); }).get(); - media += SDPUtil.build_mline(tmp) + '\r\n'; - } - else - { - media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n'; - media += 'a=sctpmap:' + sctp.attr('number') + - ' ' + sctp.attr('protocol'); - - var streamCount = sctp.attr('streams'); - if (streamCount) - media += ' ' + streamCount + '\r\n'; - else - media += '\r\n'; - } - - media += 'c=IN IP4 0.0.0.0\r\n'; - if (!sctp.length) - media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n'; - //tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]'); - tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]'); - //console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length); - //console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length); - //console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML); - if (tmp.length) { - if (tmp.attr('ufrag')) { - media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n'; - } - if (tmp.attr('pwd')) { - media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n'; - } - tmp.find('>fingerprint').each(function () { - // FIXME: check namespace at some point - media += 'a=fingerprint:' + this.getAttribute('hash'); - media += ' ' + $(this).text(); - media += '\r\n'; - //console.log("mline "+media); - if (this.getAttribute('setup')) { - media += 'a=setup:' + this.getAttribute('setup') + '\r\n'; - } - }); - } - switch (content.attr('senders')) { - case 'initiator': - media += 'a=sendonly\r\n'; - break; - case 'responder': - media += 'a=recvonly\r\n'; - break; - case 'none': - media += 'a=inactive\r\n'; - break; - case 'both': - media += 'a=sendrecv\r\n'; - break; - } - media += 'a=mid:' + content.attr('name') + '\r\n'; - /*if (content.attr('name') == 'video') { - media += 'a=x-google-flag:conference' + '\r\n'; - }*/ - - // - // see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though - // and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html - if (desc.find('rtcp-mux').length) { - media += 'a=rtcp-mux\r\n'; - } - - if (desc.find('encryption').length) { - desc.find('encryption>crypto').each(function () { - media += 'a=crypto:' + this.getAttribute('tag'); - media += ' ' + this.getAttribute('crypto-suite'); - media += ' ' + this.getAttribute('key-params'); - if (this.getAttribute('session-params')) { - media += ' ' + this.getAttribute('session-params'); - } - media += '\r\n'; - }); - } - desc.find('payload-type').each(function () { - media += SDPUtil.build_rtpmap(this) + '\r\n'; - if ($(this).find('>parameter').length) { - media += 'a=fmtp:' + this.getAttribute('id') + ' '; - media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; '); - media += '\r\n'; - } - // xep-0293 - media += self.RtcpFbFromJingle($(this), this.getAttribute('id')); - }); - - // xep-0293 - media += self.RtcpFbFromJingle(desc, '*'); - - // xep-0294 - tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]'); - tmp.each(function () { - media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n'; - }); - - content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () { - media += SDPUtil.candidateFromJingle(this); - }); - - // XEP-0339 handle ssrc-group attributes - tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() { - var semantics = this.getAttribute('semantics'); - var ssrcs = $(this).find('>source').map(function() { - return this.getAttribute('ssrc'); - }).get(); - - if (ssrcs.length != 0) { - media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n'; - } - }); - - tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]'); - tmp.each(function () { - var ssrc = this.getAttribute('ssrc'); - $(this).find('>parameter').each(function () { - media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name'); - if (this.getAttribute('value') && this.getAttribute('value').length) - media += ':' + this.getAttribute('value'); - media += '\r\n'; - }); - }); - - if (tmp.length === 0) { - // fallback to proprietary mapping of a=ssrc lines - tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]'); - if (tmp.length) { - media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n'; - media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n'; - media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n'; - media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n'; - } - } - return media; -}; - diff --git a/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js b/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js deleted file mode 100644 index 042a123c32..0000000000 --- a/contrib/jitsimeetbridge/unjingle/strophe.jingle.sdp.util.js +++ /dev/null @@ -1,408 +0,0 @@ -/** - * Contains utility classes used in SDP class. - * - */ - -/** - * Class holds a=ssrc lines and media type a=mid - * @param ssrc synchronization source identifier number(a=ssrc lines from SDP) - * @param type media type eg. "audio" or "video"(a=mid frm SDP) - * @constructor - */ -function ChannelSsrc(ssrc, type) { - this.ssrc = ssrc; - this.type = type; - this.lines = []; -} - -/** - * Class holds a=ssrc-group: lines - * @param semantics - * @param ssrcs - * @constructor - */ -function ChannelSsrcGroup(semantics, ssrcs, line) { - this.semantics = semantics; - this.ssrcs = ssrcs; -} - -/** - * Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type. - * @param channelNumber channel idx in SDP media array. - * @param mediaType media type(a=mid) - * @constructor - */ -function MediaChannel(channelNumber, mediaType) { - /** - * SDP channel number - * @type {*} - */ - this.chNumber = channelNumber; - /** - * Channel media type(a=mid) - * @type {*} - */ - this.mediaType = mediaType; - /** - * The maps of ssrc numbers to ChannelSsrc objects. - */ - this.ssrcs = {}; - - /** - * The array of ChannelSsrcGroup objects. - * @type {Array} - */ - this.ssrcGroups = []; -} - -SDPUtil = { - iceparams: function (mediadesc, sessiondesc) { - var data = null; - if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) && - SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) { - data = { - ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)), - pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) - }; - } - return data; - }, - parse_iceufrag: function (line) { - return line.substring(12); - }, - build_iceufrag: function (frag) { - return 'a=ice-ufrag:' + frag; - }, - parse_icepwd: function (line) { - return line.substring(10); - }, - build_icepwd: function (pwd) { - return 'a=ice-pwd:' + pwd; - }, - parse_mid: function (line) { - return line.substring(6); - }, - parse_mline: function (line) { - var parts = line.substring(2).split(' '), - data = {}; - data.media = parts.shift(); - data.port = parts.shift(); - data.proto = parts.shift(); - if (parts[parts.length - 1] === '') { // trailing whitespace - parts.pop(); - } - data.fmt = parts; - return data; - }, - build_mline: function (mline) { - return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' '); - }, - parse_rtpmap: function (line) { - var parts = line.substring(9).split(' '), - data = {}; - data.id = parts.shift(); - parts = parts[0].split('/'); - data.name = parts.shift(); - data.clockrate = parts.shift(); - data.channels = parts.length ? parts.shift() : '1'; - return data; - }, - /** - * Parses SDP line "a=sctpmap:..." and extracts SCTP port from it. - * @param line eg. "a=sctpmap:5000 webrtc-datachannel" - * @returns [SCTP port number, protocol, streams] - */ - parse_sctpmap: function (line) - { - var parts = line.substring(10).split(' '); - var sctpPort = parts[0]; - var protocol = parts[1]; - // Stream count is optional - var streamCount = parts.length > 2 ? parts[2] : null; - return [sctpPort, protocol, streamCount];// SCTP port - }, - build_rtpmap: function (el) { - var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate'); - if (el.getAttribute('channels') && el.getAttribute('channels') != '1') { - line += '/' + el.getAttribute('channels'); - } - return line; - }, - parse_crypto: function (line) { - var parts = line.substring(9).split(' '), - data = {}; - data.tag = parts.shift(); - data['crypto-suite'] = parts.shift(); - data['key-params'] = parts.shift(); - if (parts.length) { - data['session-params'] = parts.join(' '); - } - return data; - }, - parse_fingerprint: function (line) { // RFC 4572 - var parts = line.substring(14).split(' '), - data = {}; - data.hash = parts.shift(); - data.fingerprint = parts.shift(); - // TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ? - return data; - }, - parse_fmtp: function (line) { - var parts = line.split(' '), - i, key, value, - data = []; - parts.shift(); - parts = parts.join(' ').split(';'); - for (i = 0; i < parts.length; i++) { - key = parts[i].split('=')[0]; - while (key.length && key[0] == ' ') { - key = key.substring(1); - } - value = parts[i].split('=')[1]; - if (key && value) { - data.push({name: key, value: value}); - } else if (key) { - // rfc 4733 (DTMF) style stuff - data.push({name: '', value: key}); - } - } - return data; - }, - parse_icecandidate: function (line) { - var candidate = {}, - elems = line.split(' '); - candidate.foundation = elems[0].substring(12); - candidate.component = elems[1]; - candidate.protocol = elems[2].toLowerCase(); - candidate.priority = elems[3]; - candidate.ip = elems[4]; - candidate.port = elems[5]; - // elems[6] => "typ" - candidate.type = elems[7]; - candidate.generation = 0; // default value, may be overwritten below - for (var i = 8; i < elems.length; i += 2) { - switch (elems[i]) { - case 'raddr': - candidate['rel-addr'] = elems[i + 1]; - break; - case 'rport': - candidate['rel-port'] = elems[i + 1]; - break; - case 'generation': - candidate.generation = elems[i + 1]; - break; - case 'tcptype': - candidate.tcptype = elems[i + 1]; - break; - default: // TODO - console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"'); - } - } - candidate.network = '1'; - candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random - return candidate; - }, - build_icecandidate: function (cand) { - var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' '); - line += ' '; - switch (cand.type) { - case 'srflx': - case 'prflx': - case 'relay': - if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) { - line += 'raddr'; - line += ' '; - line += cand['rel-addr']; - line += ' '; - line += 'rport'; - line += ' '; - line += cand['rel-port']; - line += ' '; - } - break; - } - if (cand.hasOwnAttribute('tcptype')) { - line += 'tcptype'; - line += ' '; - line += cand.tcptype; - line += ' '; - } - line += 'generation'; - line += ' '; - line += cand.hasOwnAttribute('generation') ? cand.generation : '0'; - return line; - }, - parse_ssrc: function (desc) { - // proprietary mapping of a=ssrc lines - // TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs - // and parse according to that - var lines = desc.split('\r\n'), - data = {}; - for (var i = 0; i < lines.length; i++) { - if (lines[i].substring(0, 7) == 'a=ssrc:') { - var idx = lines[i].indexOf(' '); - data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1]; - } - } - return data; - }, - parse_rtcpfb: function (line) { - var parts = line.substr(10).split(' '); - var data = {}; - data.pt = parts.shift(); - data.type = parts.shift(); - data.params = parts; - return data; - }, - parse_extmap: function (line) { - var parts = line.substr(9).split(' '); - var data = {}; - data.value = parts.shift(); - if (data.value.indexOf('/') != -1) { - data.direction = data.value.substr(data.value.indexOf('/') + 1); - data.value = data.value.substr(0, data.value.indexOf('/')); - } else { - data.direction = 'both'; - } - data.uri = parts.shift(); - data.params = parts; - return data; - }, - find_line: function (haystack, needle, sessionpart) { - var lines = haystack.split('\r\n'); - for (var i = 0; i < lines.length; i++) { - if (lines[i].substring(0, needle.length) == needle) { - return lines[i]; - } - } - if (!sessionpart) { - return false; - } - // search session part - lines = sessionpart.split('\r\n'); - for (var j = 0; j < lines.length; j++) { - if (lines[j].substring(0, needle.length) == needle) { - return lines[j]; - } - } - return false; - }, - find_lines: function (haystack, needle, sessionpart) { - var lines = haystack.split('\r\n'), - needles = []; - for (var i = 0; i < lines.length; i++) { - if (lines[i].substring(0, needle.length) == needle) - needles.push(lines[i]); - } - if (needles.length || !sessionpart) { - return needles; - } - // search session part - lines = sessionpart.split('\r\n'); - for (var j = 0; j < lines.length; j++) { - if (lines[j].substring(0, needle.length) == needle) { - needles.push(lines[j]); - } - } - return needles; - }, - candidateToJingle: function (line) { - // a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0 - // - if (line.indexOf('candidate:') === 0) { - line = 'a=' + line; - } else if (line.substring(0, 12) != 'a=candidate:') { - console.log('parseCandidate called with a line that is not a candidate line'); - console.log(line); - return null; - } - if (line.substring(line.length - 2) == '\r\n') // chomp it - line = line.substring(0, line.length - 2); - var candidate = {}, - elems = line.split(' '), - i; - if (elems[6] != 'typ') { - console.log('did not find typ in the right place'); - console.log(line); - return null; - } - candidate.foundation = elems[0].substring(12); - candidate.component = elems[1]; - candidate.protocol = elems[2].toLowerCase(); - candidate.priority = elems[3]; - candidate.ip = elems[4]; - candidate.port = elems[5]; - // elems[6] => "typ" - candidate.type = elems[7]; - - candidate.generation = '0'; // default, may be overwritten below - for (i = 8; i < elems.length; i += 2) { - switch (elems[i]) { - case 'raddr': - candidate['rel-addr'] = elems[i + 1]; - break; - case 'rport': - candidate['rel-port'] = elems[i + 1]; - break; - case 'generation': - candidate.generation = elems[i + 1]; - break; - case 'tcptype': - candidate.tcptype = elems[i + 1]; - break; - default: // TODO - console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"'); - } - } - candidate.network = '1'; - candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random - return candidate; - }, - candidateFromJingle: function (cand) { - var line = 'a=candidate:'; - line += cand.getAttribute('foundation'); - line += ' '; - line += cand.getAttribute('component'); - line += ' '; - line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this - line += ' '; - line += cand.getAttribute('priority'); - line += ' '; - line += cand.getAttribute('ip'); - line += ' '; - line += cand.getAttribute('port'); - line += ' '; - line += 'typ'; - line += ' ' + cand.getAttribute('type'); - line += ' '; - switch (cand.getAttribute('type')) { - case 'srflx': - case 'prflx': - case 'relay': - if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) { - line += 'raddr'; - line += ' '; - line += cand.getAttribute('rel-addr'); - line += ' '; - line += 'rport'; - line += ' '; - line += cand.getAttribute('rel-port'); - line += ' '; - } - break; - } - if (cand.getAttribute('protocol').toLowerCase() == 'tcp') { - line += 'tcptype'; - line += ' '; - line += cand.getAttribute('tcptype'); - line += ' '; - } - line += 'generation'; - line += ' '; - line += cand.getAttribute('generation') || '0'; - return line + '\r\n'; - } -}; - -exports.SDPUtil = SDPUtil; - diff --git a/contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js b/contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js deleted file mode 100644 index 9c45c2df18..0000000000 --- a/contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js +++ /dev/null @@ -1,254 +0,0 @@ -/** - * Wrapper for built-in http.js to emulate the browser XMLHttpRequest object. - * - * This can be used with JS designed for browsers to improve reuse of code and - * allow the use of existing libraries. - * - * Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs. - * - * @todo SSL Support - * @author Dan DeFelippi - * @license MIT - */ - -var Url = require("url") - ,sys = require("util"); - -exports.XMLHttpRequest = function() { - /** - * Private variables - */ - var self = this; - var http = require('http'); - var https = require('https'); - - // Holds http.js objects - var client; - var request; - var response; - - // Request settings - var settings = {}; - - // Set some default headers - var defaultHeaders = { - "User-Agent": "node.js", - "Accept": "*/*", - }; - - var headers = defaultHeaders; - - /** - * Constants - */ - this.UNSENT = 0; - this.OPENED = 1; - this.HEADERS_RECEIVED = 2; - this.LOADING = 3; - this.DONE = 4; - - /** - * Public vars - */ - // Current state - this.readyState = this.UNSENT; - - // default ready state change handler in case one is not set or is set late - this.onreadystatechange = function() {}; - - // Result & response - this.responseText = ""; - this.responseXML = ""; - this.status = null; - this.statusText = null; - - /** - * Open the connection. Currently supports local server requests. - * - * @param string method Connection method (eg GET, POST) - * @param string url URL for the connection. - * @param boolean async Asynchronous connection. Default is true. - * @param string user Username for basic authentication (optional) - * @param string password Password for basic authentication (optional) - */ - this.open = function(method, url, async, user, password) { - settings = { - "method": method, - "url": url, - "async": async || null, - "user": user || null, - "password": password || null - }; - - this.abort(); - - setState(this.OPENED); - }; - - /** - * Sets a header for the request. - * - * @param string header Header name - * @param string value Header value - */ - this.setRequestHeader = function(header, value) { - headers[header] = value; - }; - - /** - * Gets a header from the server response. - * - * @param string header Name of header to get. - * @return string Text of the header or null if it doesn't exist. - */ - this.getResponseHeader = function(header) { - if (this.readyState > this.OPENED && response.headers[header]) { - return header + ": " + response.headers[header]; - } - - return null; - }; - - /** - * Gets all the response headers. - * - * @return string - */ - this.getAllResponseHeaders = function() { - if (this.readyState < this.HEADERS_RECEIVED) { - throw "INVALID_STATE_ERR: Headers have not been received."; - } - var result = ""; - - for (var i in response.headers) { - result += i + ": " + response.headers[i] + "\r\n"; - } - return result.substr(0, result.length - 2); - }; - - /** - * Sends the request to the server. - * - * @param string data Optional data to send as request body. - */ - this.send = function(data) { - if (this.readyState != this.OPENED) { - throw "INVALID_STATE_ERR: connection must be opened before send() is called"; - } - - var ssl = false; - var url = Url.parse(settings.url); - - // Determine the server - switch (url.protocol) { - case 'https:': - ssl = true; - // SSL & non-SSL both need host, no break here. - case 'http:': - var host = url.hostname; - break; - - case undefined: - case '': - var host = "localhost"; - break; - - default: - throw "Protocol not supported."; - } - - // Default to port 80. If accessing localhost on another port be sure - // to use http://localhost:port/path - var port = url.port || (ssl ? 443 : 80); - // Add query string if one is used - var uri = url.pathname + (url.search ? url.search : ''); - - // Set the Host header or the server may reject the request - this.setRequestHeader("Host", host); - - // Set content length header - if (settings.method == "GET" || settings.method == "HEAD") { - data = null; - } else if (data) { - this.setRequestHeader("Content-Length", Buffer.byteLength(data)); - - if (!headers["Content-Type"]) { - this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8"); - } - } - - // Use the proper protocol - var doRequest = ssl ? https.request : http.request; - - var options = { - host: host, - port: port, - path: uri, - method: settings.method, - headers: headers, - agent: false - }; - - var req = doRequest(options, function(res) { - response = res; - response.setEncoding("utf8"); - - setState(self.HEADERS_RECEIVED); - self.status = response.statusCode; - - response.on('data', function(chunk) { - // Make sure there's some data - if (chunk) { - self.responseText += chunk; - } - setState(self.LOADING); - }); - - response.on('end', function() { - setState(self.DONE); - }); - - response.on('error', function() { - self.handleError(error); - }); - }).on('error', function(error) { - self.handleError(error); - }); - - req.setHeader("Connection", "Close"); - - // Node 0.4 and later won't accept empty data. Make sure it's needed. - if (data) { - req.write(data); - } - - req.end(); - }; - - this.handleError = function(error) { - this.status = 503; - this.statusText = error; - this.responseText = error.stack; - setState(this.DONE); - }; - - /** - * Aborts a request. - */ - this.abort = function() { - headers = defaultHeaders; - this.readyState = this.UNSENT; - this.responseText = ""; - this.responseXML = ""; - }; - - /** - * Changes readyState and calls onreadystatechange. - * - * @param int state New state - */ - var setState = function(state) { - self.readyState = state; - self.onreadystatechange(); - } -}; diff --git a/contrib/jitsimeetbridge/unjingle/strophe/base64.js b/contrib/jitsimeetbridge/unjingle/strophe/base64.js deleted file mode 100644 index 418caac050..0000000000 --- a/contrib/jitsimeetbridge/unjingle/strophe/base64.js +++ /dev/null @@ -1,83 +0,0 @@ -// This code was written by Tyler Akins and has been placed in the -// public domain. It would be nice if you left this header intact. -// Base64 code from Tyler Akins -- http://rumkin.com - -var Base64 = (function () { - var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="; - - var obj = { - /** - * Encodes a string in base64 - * @param {String} input The string to encode in base64. - */ - encode: function (input) { - var output = ""; - var chr1, chr2, chr3; - var enc1, enc2, enc3, enc4; - var i = 0; - - do { - chr1 = input.charCodeAt(i++); - chr2 = input.charCodeAt(i++); - chr3 = input.charCodeAt(i++); - - enc1 = chr1 >> 2; - enc2 = ((chr1 & 3) << 4) | (chr2 >> 4); - enc3 = ((chr2 & 15) << 2) | (chr3 >> 6); - enc4 = chr3 & 63; - - if (isNaN(chr2)) { - enc3 = enc4 = 64; - } else if (isNaN(chr3)) { - enc4 = 64; - } - - output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) + - keyStr.charAt(enc3) + keyStr.charAt(enc4); - } while (i < input.length); - - return output; - }, - - /** - * Decodes a base64 string. - * @param {String} input The string to decode. - */ - decode: function (input) { - var output = ""; - var chr1, chr2, chr3; - var enc1, enc2, enc3, enc4; - var i = 0; - - // remove all characters that are not A-Z, a-z, 0-9, +, /, or = - input = input.replace(/[^A-Za-z0-9\+\/\=]/g, ''); - - do { - enc1 = keyStr.indexOf(input.charAt(i++)); - enc2 = keyStr.indexOf(input.charAt(i++)); - enc3 = keyStr.indexOf(input.charAt(i++)); - enc4 = keyStr.indexOf(input.charAt(i++)); - - chr1 = (enc1 << 2) | (enc2 >> 4); - chr2 = ((enc2 & 15) << 4) | (enc3 >> 2); - chr3 = ((enc3 & 3) << 6) | enc4; - - output = output + String.fromCharCode(chr1); - - if (enc3 != 64) { - output = output + String.fromCharCode(chr2); - } - if (enc4 != 64) { - output = output + String.fromCharCode(chr3); - } - } while (i < input.length); - - return output; - } - }; - - return obj; -})(); - -// Nodify -exports.Base64 = Base64; diff --git a/contrib/jitsimeetbridge/unjingle/strophe/md5.js b/contrib/jitsimeetbridge/unjingle/strophe/md5.js deleted file mode 100644 index 5334325e2f..0000000000 --- a/contrib/jitsimeetbridge/unjingle/strophe/md5.js +++ /dev/null @@ -1,279 +0,0 @@ -/* - * A JavaScript implementation of the RSA Data Security, Inc. MD5 Message - * Digest Algorithm, as defined in RFC 1321. - * Version 2.1 Copyright (C) Paul Johnston 1999 - 2002. - * Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet - * Distributed under the BSD License - * See http://pajhome.org.uk/crypt/md5 for more info. - */ - -var MD5 = (function () { - /* - * Configurable variables. You may need to tweak these to be compatible with - * the server-side, but the defaults work in most cases. - */ - var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */ - var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */ - var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */ - - /* - * Add integers, wrapping at 2^32. This uses 16-bit operations internally - * to work around bugs in some JS interpreters. - */ - var safe_add = function (x, y) { - var lsw = (x & 0xFFFF) + (y & 0xFFFF); - var msw = (x >> 16) + (y >> 16) + (lsw >> 16); - return (msw << 16) | (lsw & 0xFFFF); - }; - - /* - * Bitwise rotate a 32-bit number to the left. - */ - var bit_rol = function (num, cnt) { - return (num << cnt) | (num >>> (32 - cnt)); - }; - - /* - * Convert a string to an array of little-endian words - * If chrsz is ASCII, characters >255 have their hi-byte silently ignored. - */ - var str2binl = function (str) { - var bin = []; - var mask = (1 << chrsz) - 1; - for(var i = 0; i < str.length * chrsz; i += chrsz) - { - bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32); - } - return bin; - }; - - /* - * Convert an array of little-endian words to a string - */ - var binl2str = function (bin) { - var str = ""; - var mask = (1 << chrsz) - 1; - for(var i = 0; i < bin.length * 32; i += chrsz) - { - str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask); - } - return str; - }; - - /* - * Convert an array of little-endian words to a hex string. - */ - var binl2hex = function (binarray) { - var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef"; - var str = ""; - for(var i = 0; i < binarray.length * 4; i++) - { - str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) + - hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF); - } - return str; - }; - - /* - * Convert an array of little-endian words to a base-64 string - */ - var binl2b64 = function (binarray) { - var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - var str = ""; - var triplet, j; - for(var i = 0; i < binarray.length * 4; i += 3) - { - triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) | - (((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) | - ((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF); - for(j = 0; j < 4; j++) - { - if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; } - else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); } - } - } - return str; - }; - - /* - * These functions implement the four basic operations the algorithm uses. - */ - var md5_cmn = function (q, a, b, x, s, t) { - return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b); - }; - - var md5_ff = function (a, b, c, d, x, s, t) { - return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t); - }; - - var md5_gg = function (a, b, c, d, x, s, t) { - return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t); - }; - - var md5_hh = function (a, b, c, d, x, s, t) { - return md5_cmn(b ^ c ^ d, a, b, x, s, t); - }; - - var md5_ii = function (a, b, c, d, x, s, t) { - return md5_cmn(c ^ (b | (~d)), a, b, x, s, t); - }; - - /* - * Calculate the MD5 of an array of little-endian words, and a bit length - */ - var core_md5 = function (x, len) { - /* append padding */ - x[len >> 5] |= 0x80 << ((len) % 32); - x[(((len + 64) >>> 9) << 4) + 14] = len; - - var a = 1732584193; - var b = -271733879; - var c = -1732584194; - var d = 271733878; - - var olda, oldb, oldc, oldd; - for (var i = 0; i < x.length; i += 16) - { - olda = a; - oldb = b; - oldc = c; - oldd = d; - - a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936); - d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586); - c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819); - b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330); - a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897); - d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426); - c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341); - b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983); - a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416); - d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417); - c = md5_ff(c, d, a, b, x[i+10], 17, -42063); - b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162); - a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682); - d = md5_ff(d, a, b, c, x[i+13], 12, -40341101); - c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290); - b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329); - - a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510); - d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632); - c = md5_gg(c, d, a, b, x[i+11], 14, 643717713); - b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302); - a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691); - d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083); - c = md5_gg(c, d, a, b, x[i+15], 14, -660478335); - b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848); - a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438); - d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690); - c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961); - b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501); - a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467); - d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784); - c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473); - b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734); - - a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558); - d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463); - c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562); - b = md5_hh(b, c, d, a, x[i+14], 23, -35309556); - a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060); - d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353); - c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632); - b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640); - a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174); - d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222); - c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979); - b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189); - a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487); - d = md5_hh(d, a, b, c, x[i+12], 11, -421815835); - c = md5_hh(c, d, a, b, x[i+15], 16, 530742520); - b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651); - - a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844); - d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415); - c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905); - b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055); - a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571); - d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606); - c = md5_ii(c, d, a, b, x[i+10], 15, -1051523); - b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799); - a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359); - d = md5_ii(d, a, b, c, x[i+15], 10, -30611744); - c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380); - b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649); - a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070); - d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379); - c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259); - b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551); - - a = safe_add(a, olda); - b = safe_add(b, oldb); - c = safe_add(c, oldc); - d = safe_add(d, oldd); - } - return [a, b, c, d]; - }; - - - /* - * Calculate the HMAC-MD5, of a key and some data - */ - var core_hmac_md5 = function (key, data) { - var bkey = str2binl(key); - if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); } - - var ipad = new Array(16), opad = new Array(16); - for(var i = 0; i < 16; i++) - { - ipad[i] = bkey[i] ^ 0x36363636; - opad[i] = bkey[i] ^ 0x5C5C5C5C; - } - - var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz); - return core_md5(opad.concat(hash), 512 + 128); - }; - - var obj = { - /* - * These are the functions you'll usually want to call. - * They take string arguments and return either hex or base-64 encoded - * strings. - */ - hexdigest: function (s) { - return binl2hex(core_md5(str2binl(s), s.length * chrsz)); - }, - - b64digest: function (s) { - return binl2b64(core_md5(str2binl(s), s.length * chrsz)); - }, - - hash: function (s) { - return binl2str(core_md5(str2binl(s), s.length * chrsz)); - }, - - hmac_hexdigest: function (key, data) { - return binl2hex(core_hmac_md5(key, data)); - }, - - hmac_b64digest: function (key, data) { - return binl2b64(core_hmac_md5(key, data)); - }, - - hmac_hash: function (key, data) { - return binl2str(core_hmac_md5(key, data)); - }, - - /* - * Perform a simple self-test to see if the VM is working - */ - test: function () { - return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72"; - } - }; - - return obj; -})(); - -// Nodify -exports.MD5 = MD5; diff --git a/contrib/jitsimeetbridge/unjingle/strophe/strophe.js b/contrib/jitsimeetbridge/unjingle/strophe/strophe.js deleted file mode 100644 index 06d426cdec..0000000000 --- a/contrib/jitsimeetbridge/unjingle/strophe/strophe.js +++ /dev/null @@ -1,3256 +0,0 @@ -/* - This program is distributed under the terms of the MIT license. - Please see the LICENSE file for details. - - Copyright 2006-2008, OGG, LLC -*/ - -/* jslint configuration: */ -/*global document, window, setTimeout, clearTimeout, console, - XMLHttpRequest, ActiveXObject, - Base64, MD5, - Strophe, $build, $msg, $iq, $pres */ - -/** File: strophe.js - * A JavaScript library for XMPP BOSH. - * - * This is the JavaScript version of the Strophe library. Since JavaScript - * has no facilities for persistent TCP connections, this library uses - * Bidirectional-streams Over Synchronous HTTP (BOSH) to emulate - * a persistent, stateful, two-way connection to an XMPP server. More - * information on BOSH can be found in XEP 124. - */ - -/** PrivateFunction: Function.prototype.bind - * Bind a function to an instance. - * - * This Function object extension method creates a bound method similar - * to those in Python. This means that the 'this' object will point - * to the instance you want. See - * MDC's bind() documentation and - * Bound Functions and Function Imports in JavaScript - * for a complete explanation. - * - * This extension already exists in some browsers (namely, Firefox 3), but - * we provide it to support those that don't. - * - * Parameters: - * (Object) obj - The object that will become 'this' in the bound function. - * (Object) argN - An option argument that will be prepended to the - * arguments given for the function call - * - * Returns: - * The bound function. - */ - -/* Make it work on node.js: Nodify - * - * Steps: - * 1. Create the global objects: window, document, Base64, MD5 and XMLHttpRequest - * 2. Use the node-XMLHttpRequest module. - * 3. Use jsdom for the document object - since it supports DOM functions. - * 4. Replace all calls to childNodes with _childNodes (since the former doesn't - * seem to work on jsdom). - * 5. While getting the response from XMLHttpRequest, manually convert the text - * data to XML. - * 6. All calls to nodeName should replaced by nodeName.toLowerCase() since jsdom - * seems to always convert node names to upper case. - * - */ -var XMLHttpRequest = require('./XMLHttpRequest.js').XMLHttpRequest; -var Base64 = require('./base64.js').Base64; -var MD5 = require('./md5.js').MD5; -var jsdom = require("jsdom").jsdom; - -document = jsdom(""), - -window = { - XMLHttpRequest: XMLHttpRequest, - Base64: Base64, - MD5: MD5 -}; - -exports.Strophe = window; - -if (!Function.prototype.bind) { - Function.prototype.bind = function (obj /*, arg1, arg2, ... */) - { - var func = this; - var _slice = Array.prototype.slice; - var _concat = Array.prototype.concat; - var _args = _slice.call(arguments, 1); - - return function () { - return func.apply(obj ? obj : this, - _concat.call(_args, - _slice.call(arguments, 0))); - }; - }; -} - -/** PrivateFunction: Array.prototype.indexOf - * Return the index of an object in an array. - * - * This function is not supplied by some JavaScript implementations, so - * we provide it if it is missing. This code is from: - * http://developer.mozilla.org/En/Core_JavaScript_1.5_Reference:Objects:Array:indexOf - * - * Parameters: - * (Object) elt - The object to look for. - * (Integer) from - The index from which to start looking. (optional). - * - * Returns: - * The index of elt in the array or -1 if not found. - */ -if (!Array.prototype.indexOf) -{ - Array.prototype.indexOf = function(elt /*, from*/) - { - var len = this.length; - - var from = Number(arguments[1]) || 0; - from = (from < 0) ? Math.ceil(from) : Math.floor(from); - if (from < 0) { - from += len; - } - - for (; from < len; from++) { - if (from in this && this[from] === elt) { - return from; - } - } - - return -1; - }; -} - -/* All of the Strophe globals are defined in this special function below so - * that references to the globals become closures. This will ensure that - * on page reload, these references will still be available to callbacks - * that are still executing. - */ - -(function (callback) { -var Strophe; - -/** Function: $build - * Create a Strophe.Builder. - * This is an alias for 'new Strophe.Builder(name, attrs)'. - * - * Parameters: - * (String) name - The root element name. - * (Object) attrs - The attributes for the root element in object notation. - * - * Returns: - * A new Strophe.Builder object. - */ -function $build(name, attrs) { return new Strophe.Builder(name, attrs); } -/** Function: $msg - * Create a Strophe.Builder with a element as the root. - * - * Parmaeters: - * (Object) attrs - The element attributes in object notation. - * - * Returns: - * A new Strophe.Builder object. - */ -function $msg(attrs) { return new Strophe.Builder("message", attrs); } -/** Function: $iq - * Create a Strophe.Builder with an element as the root. - * - * Parameters: - * (Object) attrs - The element attributes in object notation. - * - * Returns: - * A new Strophe.Builder object. - */ -function $iq(attrs) { return new Strophe.Builder("iq", attrs); } -/** Function: $pres - * Create a Strophe.Builder with a element as the root. - * - * Parameters: - * (Object) attrs - The element attributes in object notation. - * - * Returns: - * A new Strophe.Builder object. - */ -function $pres(attrs) { return new Strophe.Builder("presence", attrs); } - -/** Class: Strophe - * An object container for all Strophe library functions. - * - * This class is just a container for all the objects and constants - * used in the library. It is not meant to be instantiated, but to - * provide a namespace for library objects, constants, and functions. - */ -Strophe = { - /** Constant: VERSION - * The version of the Strophe library. Unreleased builds will have - * a version of head-HASH where HASH is a partial revision. - */ - VERSION: "@VERSION@", - - /** Constants: XMPP Namespace Constants - * Common namespace constants from the XMPP RFCs and XEPs. - * - * NS.HTTPBIND - HTTP BIND namespace from XEP 124. - * NS.BOSH - BOSH namespace from XEP 206. - * NS.CLIENT - Main XMPP client namespace. - * NS.AUTH - Legacy authentication namespace. - * NS.ROSTER - Roster operations namespace. - * NS.PROFILE - Profile namespace. - * NS.DISCO_INFO - Service discovery info namespace from XEP 30. - * NS.DISCO_ITEMS - Service discovery items namespace from XEP 30. - * NS.MUC - Multi-User Chat namespace from XEP 45. - * NS.SASL - XMPP SASL namespace from RFC 3920. - * NS.STREAM - XMPP Streams namespace from RFC 3920. - * NS.BIND - XMPP Binding namespace from RFC 3920. - * NS.SESSION - XMPP Session namespace from RFC 3920. - */ - NS: { - HTTPBIND: "http://jabber.org/protocol/httpbind", - BOSH: "urn:xmpp:xbosh", - CLIENT: "jabber:client", - AUTH: "jabber:iq:auth", - ROSTER: "jabber:iq:roster", - PROFILE: "jabber:iq:profile", - DISCO_INFO: "http://jabber.org/protocol/disco#info", - DISCO_ITEMS: "http://jabber.org/protocol/disco#items", - MUC: "http://jabber.org/protocol/muc", - SASL: "urn:ietf:params:xml:ns:xmpp-sasl", - STREAM: "http://etherx.jabber.org/streams", - BIND: "urn:ietf:params:xml:ns:xmpp-bind", - SESSION: "urn:ietf:params:xml:ns:xmpp-session", - VERSION: "jabber:iq:version", - STANZAS: "urn:ietf:params:xml:ns:xmpp-stanzas" - }, - - /** Function: addNamespace - * This function is used to extend the current namespaces in - * Strophe.NS. It takes a key and a value with the key being the - * name of the new namespace, with its actual value. - * For example: - * Strophe.addNamespace('PUBSUB', "http://jabber.org/protocol/pubsub"); - * - * Parameters: - * (String) name - The name under which the namespace will be - * referenced under Strophe.NS - * (String) value - The actual namespace. - */ - addNamespace: function (name, value) - { - Strophe.NS[name] = value; - }, - - /** Constants: Connection Status Constants - * Connection status constants for use by the connection handler - * callback. - * - * Status.ERROR - An error has occurred - * Status.CONNECTING - The connection is currently being made - * Status.CONNFAIL - The connection attempt failed - * Status.AUTHENTICATING - The connection is authenticating - * Status.AUTHFAIL - The authentication attempt failed - * Status.CONNECTED - The connection has succeeded - * Status.DISCONNECTED - The connection has been terminated - * Status.DISCONNECTING - The connection is currently being terminated - * Status.ATTACHED - The connection has been attached - */ - Status: { - ERROR: 0, - CONNECTING: 1, - CONNFAIL: 2, - AUTHENTICATING: 3, - AUTHFAIL: 4, - CONNECTED: 5, - DISCONNECTED: 6, - DISCONNECTING: 7, - ATTACHED: 8 - }, - - /** Constants: Log Level Constants - * Logging level indicators. - * - * LogLevel.DEBUG - Debug output - * LogLevel.INFO - Informational output - * LogLevel.WARN - Warnings - * LogLevel.ERROR - Errors - * LogLevel.FATAL - Fatal errors - */ - LogLevel: { - DEBUG: 0, - INFO: 1, - WARN: 2, - ERROR: 3, - FATAL: 4 - }, - - /** PrivateConstants: DOM Element Type Constants - * DOM element types. - * - * ElementType.NORMAL - Normal element. - * ElementType.TEXT - Text data element. - */ - ElementType: { - NORMAL: 1, - TEXT: 3 - }, - - /** PrivateConstants: Timeout Values - * Timeout values for error states. These values are in seconds. - * These should not be changed unless you know exactly what you are - * doing. - * - * TIMEOUT - Timeout multiplier. A waiting request will be considered - * failed after Math.floor(TIMEOUT * wait) seconds have elapsed. - * This defaults to 1.1, and with default wait, 66 seconds. - * SECONDARY_TIMEOUT - Secondary timeout multiplier. In cases where - * Strophe can detect early failure, it will consider the request - * failed if it doesn't return after - * Math.floor(SECONDARY_TIMEOUT * wait) seconds have elapsed. - * This defaults to 0.1, and with default wait, 6 seconds. - */ - TIMEOUT: 1.1, - SECONDARY_TIMEOUT: 0.1, - - /** Function: forEachChild - * Map a function over some or all child elements of a given element. - * - * This is a small convenience function for mapping a function over - * some or all of the children of an element. If elemName is null, all - * children will be passed to the function, otherwise only children - * whose tag names match elemName will be passed. - * - * Parameters: - * (XMLElement) elem - The element to operate on. - * (String) elemName - The child element tag name filter. - * (Function) func - The function to apply to each child. This - * function should take a single argument, a DOM element. - */ - forEachChild: function (elem, elemName, func) - { - var i, childNode; - - for (i = 0; i < elem._childNodes.length; i++) { - childNode = elem._childNodes[i]; - if (childNode.nodeType == Strophe.ElementType.NORMAL && - (!elemName || this.isTagEqual(childNode, elemName))) { - func(childNode); - } - } - }, - - /** Function: isTagEqual - * Compare an element's tag name with a string. - * - * This function is case insensitive. - * - * Parameters: - * (XMLElement) el - A DOM element. - * (String) name - The element name. - * - * Returns: - * true if the element's tag name matches _el_, and false - * otherwise. - */ - isTagEqual: function (el, name) - { - return el.tagName.toLowerCase() == name.toLowerCase(); - }, - - /** PrivateVariable: _xmlGenerator - * _Private_ variable that caches a DOM document to - * generate elements. - */ - _xmlGenerator: null, - - /** PrivateFunction: _makeGenerator - * _Private_ function that creates a dummy XML DOM document to serve as - * an element and text node generator. - */ - _makeGenerator: function () { - var doc; - - if (window.ActiveXObject) { - doc = this._getIEXmlDom(); - doc.appendChild(doc.createElement('strophe')); - } else { - doc = document.implementation - .createDocument('jabber:client', 'strophe', null); - } - - return doc; - }, - - /** Function: xmlGenerator - * Get the DOM document to generate elements. - * - * Returns: - * The currently used DOM document. - */ - xmlGenerator: function () { - if (!Strophe._xmlGenerator) { - Strophe._xmlGenerator = Strophe._makeGenerator(); - } - return Strophe._xmlGenerator; - }, - - /** PrivateFunction: _getIEXmlDom - * Gets IE xml doc object - * - * Returns: - * A Microsoft XML DOM Object - * See Also: - * http://msdn.microsoft.com/en-us/library/ms757837%28VS.85%29.aspx - */ - _getIEXmlDom : function() { - var doc = null; - var docStrings = [ - "Msxml2.DOMDocument.6.0", - "Msxml2.DOMDocument.5.0", - "Msxml2.DOMDocument.4.0", - "MSXML2.DOMDocument.3.0", - "MSXML2.DOMDocument", - "MSXML.DOMDocument", - "Microsoft.XMLDOM" - ]; - - for (var d = 0; d < docStrings.length; d++) { - if (doc === null) { - try { - doc = new ActiveXObject(docStrings[d]); - } catch (e) { - doc = null; - } - } else { - break; - } - } - - return doc; - }, - - /** Function: xmlElement - * Create an XML DOM element. - * - * This function creates an XML DOM element correctly across all - * implementations. Note that these are not HTML DOM elements, which - * aren't appropriate for XMPP stanzas. - * - * Parameters: - * (String) name - The name for the element. - * (Array|Object) attrs - An optional array or object containing - * key/value pairs to use as element attributes. The object should - * be in the format {'key': 'value'} or {key: 'value'}. The array - * should have the format [['key1', 'value1'], ['key2', 'value2']]. - * (String) text - The text child data for the element. - * - * Returns: - * A new XML DOM element. - */ - xmlElement: function (name) - { - if (!name) { return null; } - - var node = Strophe.xmlGenerator().createElement(name); - - // FIXME: this should throw errors if args are the wrong type or - // there are more than two optional args - var a, i, k; - for (a = 1; a < arguments.length; a++) { - if (!arguments[a]) { continue; } - if (typeof(arguments[a]) == "string" || - typeof(arguments[a]) == "number") { - node.appendChild(Strophe.xmlTextNode(arguments[a])); - } else if (typeof(arguments[a]) == "object" && - typeof(arguments[a].sort) == "function") { - for (i = 0; i < arguments[a].length; i++) { - if (typeof(arguments[a][i]) == "object" && - typeof(arguments[a][i].sort) == "function") { - node.setAttribute(arguments[a][i][0], - arguments[a][i][1]); - } - } - } else if (typeof(arguments[a]) == "object") { - for (k in arguments[a]) { - if (arguments[a].hasOwnProperty(k)) { - node.setAttribute(k, arguments[a][k]); - } - } - } - } - - return node; - }, - - /* Function: xmlescape - * Excapes invalid xml characters. - * - * Parameters: - * (String) text - text to escape. - * - * Returns: - * Escaped text. - */ - xmlescape: function(text) - { - text = text.replace(/\&/g, "&"); - text = text.replace(//g, ">"); - return text; - }, - - /** Function: xmlTextNode - * Creates an XML DOM text node. - * - * Provides a cross implementation version of document.createTextNode. - * - * Parameters: - * (String) text - The content of the text node. - * - * Returns: - * A new XML DOM text node. - */ - xmlTextNode: function (text) - { - //ensure text is escaped - text = Strophe.xmlescape(text); - - return Strophe.xmlGenerator().createTextNode(text); - }, - - /** Function: getText - * Get the concatenation of all text children of an element. - * - * Parameters: - * (XMLElement) elem - A DOM element. - * - * Returns: - * A String with the concatenated text of all text element children. - */ - getText: function (elem) - { - if (!elem) { return null; } - - var str = ""; - if (elem._childNodes.length === 0 && elem.nodeType == - Strophe.ElementType.TEXT) { - str += elem.nodeValue; - } - - for (var i = 0; i < elem._childNodes.length; i++) { - if (elem._childNodes[i].nodeType == Strophe.ElementType.TEXT) { - str += elem._childNodes[i].nodeValue; - } - } - - return str; - }, - - /** Function: copyElement - * Copy an XML DOM element. - * - * This function copies a DOM element and all its descendants and returns - * the new copy. - * - * Parameters: - * (XMLElement) elem - A DOM element. - * - * Returns: - * A new, copied DOM element tree. - */ - copyElement: function (elem) - { - var i, el; - if (elem.nodeType == Strophe.ElementType.NORMAL) { - el = Strophe.xmlElement(elem.tagName); - - for (i = 0; i < elem.attributes.length; i++) { - el.setAttribute(elem.attributes[i].nodeName.toLowerCase(), - elem.attributes[i].value); - } - - for (i = 0; i < elem._childNodes.length; i++) { - el.appendChild(Strophe.copyElement(elem._childNodes[i])); - } - } else if (elem.nodeType == Strophe.ElementType.TEXT) { - el = Strophe.xmlTextNode(elem.nodeValue); - } - - return el; - }, - - /** Function: escapeNode - * Escape the node part (also called local part) of a JID. - * - * Parameters: - * (String) node - A node (or local part). - * - * Returns: - * An escaped node (or local part). - */ - escapeNode: function (node) - { - return node.replace(/^\s+|\s+$/g, '') - .replace(/\\/g, "\\5c") - .replace(/ /g, "\\20") - .replace(/\"/g, "\\22") - .replace(/\&/g, "\\26") - .replace(/\'/g, "\\27") - .replace(/\//g, "\\2f") - .replace(/:/g, "\\3a") - .replace(//g, "\\3e") - .replace(/@/g, "\\40"); - }, - - /** Function: unescapeNode - * Unescape a node part (also called local part) of a JID. - * - * Parameters: - * (String) node - A node (or local part). - * - * Returns: - * An unescaped node (or local part). - */ - unescapeNode: function (node) - { - return node.replace(/\\20/g, " ") - .replace(/\\22/g, '"') - .replace(/\\26/g, "&") - .replace(/\\27/g, "'") - .replace(/\\2f/g, "/") - .replace(/\\3a/g, ":") - .replace(/\\3c/g, "<") - .replace(/\\3e/g, ">") - .replace(/\\40/g, "@") - .replace(/\\5c/g, "\\"); - }, - - /** Function: getNodeFromJid - * Get the node portion of a JID String. - * - * Parameters: - * (String) jid - A JID. - * - * Returns: - * A String containing the node. - */ - getNodeFromJid: function (jid) - { - if (jid.indexOf("@") < 0) { return null; } - return jid.split("@")[0]; - }, - - /** Function: getDomainFromJid - * Get the domain portion of a JID String. - * - * Parameters: - * (String) jid - A JID. - * - * Returns: - * A String containing the domain. - */ - getDomainFromJid: function (jid) - { - var bare = Strophe.getBareJidFromJid(jid); - if (bare.indexOf("@") < 0) { - return bare; - } else { - var parts = bare.split("@"); - parts.splice(0, 1); - return parts.join('@'); - } - }, - - /** Function: getResourceFromJid - * Get the resource portion of a JID String. - * - * Parameters: - * (String) jid - A JID. - * - * Returns: - * A String containing the resource. - */ - getResourceFromJid: function (jid) - { - var s = jid.split("/"); - if (s.length < 2) { return null; } - s.splice(0, 1); - return s.join('/'); - }, - - /** Function: getBareJidFromJid - * Get the bare JID from a JID String. - * - * Parameters: - * (String) jid - A JID. - * - * Returns: - * A String containing the bare JID. - */ - getBareJidFromJid: function (jid) - { - return jid ? jid.split("/")[0] : null; - }, - - /** Function: log - * User overrideable logging function. - * - * This function is called whenever the Strophe library calls any - * of the logging functions. The default implementation of this - * function does nothing. If client code wishes to handle the logging - * messages, it should override this with - * > Strophe.log = function (level, msg) { - * > (user code here) - * > }; - * - * Please note that data sent and received over the wire is logged - * via Strophe.Connection.rawInput() and Strophe.Connection.rawOutput(). - * - * The different levels and their meanings are - * - * DEBUG - Messages useful for debugging purposes. - * INFO - Informational messages. This is mostly information like - * 'disconnect was called' or 'SASL auth succeeded'. - * WARN - Warnings about potential problems. This is mostly used - * to report transient connection errors like request timeouts. - * ERROR - Some error occurred. - * FATAL - A non-recoverable fatal error occurred. - * - * Parameters: - * (Integer) level - The log level of the log message. This will - * be one of the values in Strophe.LogLevel. - * (String) msg - The log message. - */ - log: function (level, msg) - { - return; - }, - - /** Function: debug - * Log a message at the Strophe.LogLevel.DEBUG level. - * - * Parameters: - * (String) msg - The log message. - */ - debug: function(msg) - { - this.log(this.LogLevel.DEBUG, msg); - }, - - /** Function: info - * Log a message at the Strophe.LogLevel.INFO level. - * - * Parameters: - * (String) msg - The log message. - */ - info: function (msg) - { - this.log(this.LogLevel.INFO, msg); - }, - - /** Function: warn - * Log a message at the Strophe.LogLevel.WARN level. - * - * Parameters: - * (String) msg - The log message. - */ - warn: function (msg) - { - this.log(this.LogLevel.WARN, msg); - }, - - /** Function: error - * Log a message at the Strophe.LogLevel.ERROR level. - * - * Parameters: - * (String) msg - The log message. - */ - error: function (msg) - { - this.log(this.LogLevel.ERROR, msg); - }, - - /** Function: fatal - * Log a message at the Strophe.LogLevel.FATAL level. - * - * Parameters: - * (String) msg - The log message. - */ - fatal: function (msg) - { - this.log(this.LogLevel.FATAL, msg); - }, - - /** Function: serialize - * Render a DOM element and all descendants to a String. - * - * Parameters: - * (XMLElement) elem - A DOM element. - * - * Returns: - * The serialized element tree as a String. - */ - serialize: function (elem) - { - var result; - - if (!elem) { return null; } - - if (typeof(elem.tree) === "function") { - elem = elem.tree(); - } - - var nodeName = elem.nodeName.toLowerCase(); - var i, child; - - if (elem.getAttribute("_realname")) { - nodeName = elem.getAttribute("_realname").toLowerCase(); - } - - result = "<" + nodeName.toLowerCase(); - for (i = 0; i < elem.attributes.length; i++) { - if(elem.attributes[i].nodeName.toLowerCase() != "_realname") { - result += " " + elem.attributes[i].nodeName.toLowerCase() + - "='" + elem.attributes[i].value - .replace(/&/g, "&") - .replace(/\'/g, "'") - .replace(/ 0) { - result += ">"; - for (i = 0; i < elem._childNodes.length; i++) { - child = elem._childNodes[i]; - if (child.nodeType == Strophe.ElementType.NORMAL) { - // normal element, so recurse - result += Strophe.serialize(child); - } else if (child.nodeType == Strophe.ElementType.TEXT) { - // text element - result += child.nodeValue; - } - } - result += ""; - } else { - result += "/>"; - } - - return result; - }, - - /** PrivateVariable: _requestId - * _Private_ variable that keeps track of the request ids for - * connections. - */ - _requestId: 0, - - /** PrivateVariable: Strophe.connectionPlugins - * _Private_ variable Used to store plugin names that need - * initialization on Strophe.Connection construction. - */ - _connectionPlugins: {}, - - /** Function: addConnectionPlugin - * Extends the Strophe.Connection object with the given plugin. - * - * Paramaters: - * (String) name - The name of the extension. - * (Object) ptype - The plugin's prototype. - */ - addConnectionPlugin: function (name, ptype) - { - Strophe._connectionPlugins[name] = ptype; - } -}; - -/** Class: Strophe.Builder - * XML DOM builder. - * - * This object provides an interface similar to JQuery but for building - * DOM element easily and rapidly. All the functions except for toString() - * and tree() return the object, so calls can be chained. Here's an - * example using the $iq() builder helper. - * > $iq({to: 'you', from: 'me', type: 'get', id: '1'}) - * > .c('query', {xmlns: 'strophe:example'}) - * > .c('example') - * > .toString() - * The above generates this XML fragment - * > - * > - * > - * > - * > - * The corresponding DOM manipulations to get a similar fragment would be - * a lot more tedious and probably involve several helper variables. - * - * Since adding children makes new operations operate on the child, up() - * is provided to traverse up the tree. To add two children, do - * > builder.c('child1', ...).up().c('child2', ...) - * The next operation on the Builder will be relative to the second child. - */ - -/** Constructor: Strophe.Builder - * Create a Strophe.Builder object. - * - * The attributes should be passed in object notation. For example - * > var b = new Builder('message', {to: 'you', from: 'me'}); - * or - * > var b = new Builder('messsage', {'xml:lang': 'en'}); - * - * Parameters: - * (String) name - The name of the root element. - * (Object) attrs - The attributes for the root element in object notation. - * - * Returns: - * A new Strophe.Builder. - */ -Strophe.Builder = function (name, attrs) -{ - // Set correct namespace for jabber:client elements - if (name == "presence" || name == "message" || name == "iq") { - if (attrs && !attrs.xmlns) { - attrs.xmlns = Strophe.NS.CLIENT; - } else if (!attrs) { - attrs = {xmlns: Strophe.NS.CLIENT}; - } - } - - // Holds the tree being built. - this.nodeTree = Strophe.xmlElement(name, attrs); - - // Points to the current operation node. - this.node = this.nodeTree; -}; - -Strophe.Builder.prototype = { - /** Function: tree - * Return the DOM tree. - * - * This function returns the current DOM tree as an element object. This - * is suitable for passing to functions like Strophe.Connection.send(). - * - * Returns: - * The DOM tree as a element object. - */ - tree: function () - { - return this.nodeTree; - }, - - /** Function: toString - * Serialize the DOM tree to a String. - * - * This function returns a string serialization of the current DOM - * tree. It is often used internally to pass data to a - * Strophe.Request object. - * - * Returns: - * The serialized DOM tree in a String. - */ - toString: function () - { - return Strophe.serialize(this.nodeTree); - }, - - /** Function: up - * Make the current parent element the new current element. - * - * This function is often used after c() to traverse back up the tree. - * For example, to add two children to the same element - * > builder.c('child1', {}).up().c('child2', {}); - * - * Returns: - * The Stophe.Builder object. - */ - up: function () - { - this.node = this.node.parentNode; - return this; - }, - - /** Function: attrs - * Add or modify attributes of the current element. - * - * The attributes should be passed in object notation. This function - * does not move the current element pointer. - * - * Parameters: - * (Object) moreattrs - The attributes to add/modify in object notation. - * - * Returns: - * The Strophe.Builder object. - */ - attrs: function (moreattrs) - { - for (var k in moreattrs) { - if (moreattrs.hasOwnProperty(k)) { - this.node.setAttribute(k, moreattrs[k]); - } - } - return this; - }, - - /** Function: c - * Add a child to the current element and make it the new current - * element. - * - * This function moves the current element pointer to the child. If you - * need to add another child, it is necessary to use up() to go back - * to the parent in the tree. - * - * Parameters: - * (String) name - The name of the child. - * (Object) attrs - The attributes of the child in object notation. - * - * Returns: - * The Strophe.Builder object. - */ - c: function (name, attrs) - { - var child = Strophe.xmlElement(name, attrs); - this.node.appendChild(child); - this.node = child; - return this; - }, - - /** Function: cnode - * Add a child to the current element and make it the new current - * element. - * - * This function is the same as c() except that instead of using a - * name and an attributes object to create the child it uses an - * existing DOM element object. - * - * Parameters: - * (XMLElement) elem - A DOM element. - * - * Returns: - * The Strophe.Builder object. - */ - cnode: function (elem) - { - var xmlGen = Strophe.xmlGenerator(); - var newElem = xmlGen.importNode ? xmlGen.importNode(elem, true) : Strophe.copyElement(elem); - this.node.appendChild(newElem); - this.node = newElem; - return this; - }, - - /** Function: t - * Add a child text element. - * - * This *does not* make the child the new current element since there - * are no children of text elements. - * - * Parameters: - * (String) text - The text data to append to the current element. - * - * Returns: - * The Strophe.Builder object. - */ - t: function (text) - { - var child = Strophe.xmlTextNode(text); - this.node.appendChild(child); - return this; - } -}; - - -/** PrivateClass: Strophe.Handler - * _Private_ helper class for managing stanza handlers. - * - * A Strophe.Handler encapsulates a user provided callback function to be - * executed when matching stanzas are received by the connection. - * Handlers can be either one-off or persistant depending on their - * return value. Returning true will cause a Handler to remain active, and - * returning false will remove the Handler. - * - * Users will not use Strophe.Handler objects directly, but instead they - * will use Strophe.Connection.addHandler() and - * Strophe.Connection.deleteHandler(). - */ - -/** PrivateConstructor: Strophe.Handler - * Create and initialize a new Strophe.Handler. - * - * Parameters: - * (Function) handler - A function to be executed when the handler is run. - * (String) ns - The namespace to match. - * (String) name - The element name to match. - * (String) type - The element type to match. - * (String) id - The element id attribute to match. - * (String) from - The element from attribute to match. - * (Object) options - Handler options - * - * Returns: - * A new Strophe.Handler object. - */ -Strophe.Handler = function (handler, ns, name, type, id, from, options) -{ - this.handler = handler; - this.ns = ns; - this.name = name; - this.type = type; - this.id = id; - this.options = options || {matchbare: false}; - - // default matchBare to false if undefined - if (!this.options.matchBare) { - this.options.matchBare = false; - } - - if (this.options.matchBare) { - this.from = from ? Strophe.getBareJidFromJid(from) : null; - } else { - this.from = from; - } - - // whether the handler is a user handler or a system handler - this.user = true; -}; - -Strophe.Handler.prototype = { - /** PrivateFunction: isMatch - * Tests if a stanza matches the Strophe.Handler. - * - * Parameters: - * (XMLElement) elem - The XML element to test. - * - * Returns: - * true if the stanza matches and false otherwise. - */ - isMatch: function (elem) - { - var nsMatch; - var from = null; - - if (this.options.matchBare) { - from = Strophe.getBareJidFromJid(elem.getAttribute('from')); - } else { - from = elem.getAttribute('from'); - } - - nsMatch = false; - if (!this.ns) { - nsMatch = true; - } else { - var that = this; - Strophe.forEachChild(elem, null, function (elem) { - if (elem.getAttribute("xmlns") == that.ns) { - nsMatch = true; - } - }); - - nsMatch = nsMatch || elem.getAttribute("xmlns") == this.ns; - } - - if (nsMatch && - (!this.name || Strophe.isTagEqual(elem, this.name)) && - (!this.type || elem.getAttribute("type") == this.type) && - (!this.id || elem.getAttribute("id") == this.id) && - (!this.from || from == this.from)) { - return true; - } - - return false; - }, - - /** PrivateFunction: run - * Run the callback on a matching stanza. - * - * Parameters: - * (XMLElement) elem - The DOM element that triggered the - * Strophe.Handler. - * - * Returns: - * A boolean indicating if the handler should remain active. - */ - run: function (elem) - { - var result = null; - try { - result = this.handler(elem); - } catch (e) { - if (e.sourceURL) { - Strophe.fatal("error: " + this.handler + - " " + e.sourceURL + ":" + - e.line + " - " + e.name + ": " + e.message); - } else if (e.fileName) { - if (typeof(console) != "undefined") { - console.trace(); - console.error(this.handler, " - error - ", e, e.message); - } - Strophe.fatal("error: " + this.handler + " " + - e.fileName + ":" + e.lineNumber + " - " + - e.name + ": " + e.message); - } else { - Strophe.fatal("error: " + this.handler); - } - - throw e; - } - - return result; - }, - - /** PrivateFunction: toString - * Get a String representation of the Strophe.Handler object. - * - * Returns: - * A String. - */ - toString: function () - { - return "{Handler: " + this.handler + "(" + this.name + "," + - this.id + "," + this.ns + ")}"; - } -}; - -/** PrivateClass: Strophe.TimedHandler - * _Private_ helper class for managing timed handlers. - * - * A Strophe.TimedHandler encapsulates a user provided callback that - * should be called after a certain period of time or at regular - * intervals. The return value of the callback determines whether the - * Strophe.TimedHandler will continue to fire. - * - * Users will not use Strophe.TimedHandler objects directly, but instead - * they will use Strophe.Connection.addTimedHandler() and - * Strophe.Connection.deleteTimedHandler(). - */ - -/** PrivateConstructor: Strophe.TimedHandler - * Create and initialize a new Strophe.TimedHandler object. - * - * Parameters: - * (Integer) period - The number of milliseconds to wait before the - * handler is called. - * (Function) handler - The callback to run when the handler fires. This - * function should take no arguments. - * - * Returns: - * A new Strophe.TimedHandler object. - */ -Strophe.TimedHandler = function (period, handler) -{ - this.period = period; - this.handler = handler; - - this.lastCalled = new Date().getTime(); - this.user = true; -}; - -Strophe.TimedHandler.prototype = { - /** PrivateFunction: run - * Run the callback for the Strophe.TimedHandler. - * - * Returns: - * true if the Strophe.TimedHandler should be called again, and false - * otherwise. - */ - run: function () - { - this.lastCalled = new Date().getTime(); - return this.handler(); - }, - - /** PrivateFunction: reset - * Reset the last called time for the Strophe.TimedHandler. - */ - reset: function () - { - this.lastCalled = new Date().getTime(); - }, - - /** PrivateFunction: toString - * Get a string representation of the Strophe.TimedHandler object. - * - * Returns: - * The string representation. - */ - toString: function () - { - return "{TimedHandler: " + this.handler + "(" + this.period +")}"; - } -}; - -/** PrivateClass: Strophe.Request - * _Private_ helper class that provides a cross implementation abstraction - * for a BOSH related XMLHttpRequest. - * - * The Strophe.Request class is used internally to encapsulate BOSH request - * information. It is not meant to be used from user's code. - */ - -/** PrivateConstructor: Strophe.Request - * Create and initialize a new Strophe.Request object. - * - * Parameters: - * (XMLElement) elem - The XML data to be sent in the request. - * (Function) func - The function that will be called when the - * XMLHttpRequest readyState changes. - * (Integer) rid - The BOSH rid attribute associated with this request. - * (Integer) sends - The number of times this same request has been - * sent. - */ -Strophe.Request = function (elem, func, rid, sends) -{ - this.id = ++Strophe._requestId; - this.xmlData = elem; - this.data = Strophe.serialize(elem); - // save original function in case we need to make a new request - // from this one. - this.origFunc = func; - this.func = func; - this.rid = rid; - this.date = NaN; - this.sends = sends || 0; - this.abort = false; - this.dead = null; - this.age = function () { - if (!this.date) { return 0; } - var now = new Date(); - return (now - this.date) / 1000; - }; - this.timeDead = function () { - if (!this.dead) { return 0; } - var now = new Date(); - return (now - this.dead) / 1000; - }; - this.xhr = this._newXHR(); -}; - -Strophe.Request.prototype = { - /** PrivateFunction: getResponse - * Get a response from the underlying XMLHttpRequest. - * - * This function attempts to get a response from the request and checks - * for errors. - * - * Throws: - * "parsererror" - A parser error occured. - * - * Returns: - * The DOM element tree of the response. - */ - getResponse: function () - { - // console.log("getResponse:", this.xhr.responseXML, ":", this.xhr.responseText); - - var node = null; - if (this.xhr.responseXML && this.xhr.responseXML.documentElement) { - node = this.xhr.responseXML.documentElement; - if (node.tagName == "parsererror") { - Strophe.error("invalid response received"); - Strophe.error("responseText: " + this.xhr.responseText); - Strophe.error("responseXML: " + - Strophe.serialize(this.xhr.responseXML)); - throw "parsererror"; - } - } else if (this.xhr.responseText) { - // Hack for node. - var _div = document.createElement("div"); - _div.innerHTML = this.xhr.responseText; - node = _div._childNodes[0]; - - Strophe.error("invalid response received"); - Strophe.error("responseText: " + this.xhr.responseText); - Strophe.error("responseXML: " + - Strophe.serialize(this.xhr.responseXML)); - } - - return node; - }, - - /** PrivateFunction: _newXHR - * _Private_ helper function to create XMLHttpRequests. - * - * This function creates XMLHttpRequests across all implementations. - * - * Returns: - * A new XMLHttpRequest. - */ - _newXHR: function () - { - var xhr = null; - if (window.XMLHttpRequest) { - xhr = new XMLHttpRequest(); - if (xhr.overrideMimeType) { - xhr.overrideMimeType("text/xml"); - } - } else if (window.ActiveXObject) { - xhr = new ActiveXObject("Microsoft.XMLHTTP"); - } - - // use Function.bind() to prepend ourselves as an argument - xhr.onreadystatechange = this.func.bind(null, this); - - return xhr; - } -}; - -/** Class: Strophe.Connection - * XMPP Connection manager. - * - * Thie class is the main part of Strophe. It manages a BOSH connection - * to an XMPP server and dispatches events to the user callbacks as - * data arrives. It supports SASL PLAIN, SASL DIGEST-MD5, and legacy - * authentication. - * - * After creating a Strophe.Connection object, the user will typically - * call connect() with a user supplied callback to handle connection level - * events like authentication failure, disconnection, or connection - * complete. - * - * The user will also have several event handlers defined by using - * addHandler() and addTimedHandler(). These will allow the user code to - * respond to interesting stanzas or do something periodically with the - * connection. These handlers will be active once authentication is - * finished. - * - * To send data to the connection, use send(). - */ - -/** Constructor: Strophe.Connection - * Create and initialize a Strophe.Connection object. - * - * Parameters: - * (String) service - The BOSH service URL. - * - * Returns: - * A new Strophe.Connection object. - */ -Strophe.Connection = function (service) -{ - /* The path to the httpbind service. */ - this.service = service; - /* The connected JID. */ - this.jid = ""; - /* request id for body tags */ - this.rid = Math.floor(Math.random() * 4294967295); - /* The current session ID. */ - this.sid = null; - this.streamId = null; - /* stream:features */ - this.features = null; - - // SASL - this.do_session = false; - this.do_bind = false; - - // handler lists - this.timedHandlers = []; - this.handlers = []; - this.removeTimeds = []; - this.removeHandlers = []; - this.addTimeds = []; - this.addHandlers = []; - - this._idleTimeout = null; - this._disconnectTimeout = null; - - this.authenticated = false; - this.disconnecting = false; - this.connected = false; - - this.errors = 0; - - this.paused = false; - - // default BOSH values - this.hold = 1; - this.wait = 60; - this.window = 5; - - this._data = []; - this._requests = []; - this._uniqueId = Math.round(Math.random() * 10000); - - this._sasl_success_handler = null; - this._sasl_failure_handler = null; - this._sasl_challenge_handler = null; - - // setup onIdle callback every 1/10th of a second - this._idleTimeout = setTimeout(this._onIdle.bind(this), 100); - - // initialize plugins - for (var k in Strophe._connectionPlugins) { - if (Strophe._connectionPlugins.hasOwnProperty(k)) { - var ptype = Strophe._connectionPlugins[k]; - // jslint complaints about the below line, but this is fine - var F = function () {}; - F.prototype = ptype; - this[k] = new F(); - this[k].init(this); - } - } -}; - -Strophe.Connection.prototype = { - /** Function: reset - * Reset the connection. - * - * This function should be called after a connection is disconnected - * before that connection is reused. - */ - reset: function () - { - this.rid = Math.floor(Math.random() * 4294967295); - - this.sid = null; - this.streamId = null; - - // SASL - this.do_session = false; - this.do_bind = false; - - // handler lists - this.timedHandlers = []; - this.handlers = []; - this.removeTimeds = []; - this.removeHandlers = []; - this.addTimeds = []; - this.addHandlers = []; - - this.authenticated = false; - this.disconnecting = false; - this.connected = false; - - this.errors = 0; - - this._requests = []; - this._uniqueId = Math.round(Math.random()*10000); - }, - - /** Function: pause - * Pause the request manager. - * - * This will prevent Strophe from sending any more requests to the - * server. This is very useful for temporarily pausing while a lot - * of send() calls are happening quickly. This causes Strophe to - * send the data in a single request, saving many request trips. - */ - pause: function () - { - this.paused = true; - }, - - /** Function: resume - * Resume the request manager. - * - * This resumes after pause() has been called. - */ - resume: function () - { - this.paused = false; - }, - - /** Function: getUniqueId - * Generate a unique ID for use in elements. - * - * All stanzas are required to have unique id attributes. This - * function makes creating these easy. Each connection instance has - * a counter which starts from zero, and the value of this counter - * plus a colon followed by the suffix becomes the unique id. If no - * suffix is supplied, the counter is used as the unique id. - * - * Suffixes are used to make debugging easier when reading the stream - * data, and their use is recommended. The counter resets to 0 for - * every new connection for the same reason. For connections to the - * same server that authenticate the same way, all the ids should be - * the same, which makes it easy to see changes. This is useful for - * automated testing as well. - * - * Parameters: - * (String) suffix - A optional suffix to append to the id. - * - * Returns: - * A unique string to be used for the id attribute. - */ - getUniqueId: function (suffix) - { - if (typeof(suffix) == "string" || typeof(suffix) == "number") { - return ++this._uniqueId + ":" + suffix; - } else { - return ++this._uniqueId + ""; - } - }, - - /** Function: connect - * Starts the connection process. - * - * As the connection process proceeds, the user supplied callback will - * be triggered multiple times with status updates. The callback - * should take two arguments - the status code and the error condition. - * - * The status code will be one of the values in the Strophe.Status - * constants. The error condition will be one of the conditions - * defined in RFC 3920 or the condition 'strophe-parsererror'. - * - * Please see XEP 124 for a more detailed explanation of the optional - * parameters below. - * - * Parameters: - * (String) jid - The user's JID. This may be a bare JID, - * or a full JID. If a node is not supplied, SASL ANONYMOUS - * authentication will be attempted. - * (String) pass - The user's password. - * (Function) callback The connect callback function. - * (Integer) wait - The optional HTTPBIND wait value. This is the - * time the server will wait before returning an empty result for - * a request. The default setting of 60 seconds is recommended. - * Other settings will require tweaks to the Strophe.TIMEOUT value. - * (Integer) hold - The optional HTTPBIND hold value. This is the - * number of connections the server will hold at one time. This - * should almost always be set to 1 (the default). - */ - connect: function (jid, pass, callback, wait, hold, route) - { - this.jid = jid; - this.pass = pass; - this.connect_callback = callback; - this.disconnecting = false; - this.connected = false; - this.authenticated = false; - this.errors = 0; - - this.wait = wait || this.wait; - this.hold = hold || this.hold; - - // parse jid for domain and resource - this.domain = Strophe.getDomainFromJid(this.jid); - - // build the body tag - var body_attrs = { - to: this.domain, - "xml:lang": "en", - wait: this.wait, - hold: this.hold, - content: "text/xml; charset=utf-8", - ver: "1.6", - "xmpp:version": "1.0", - "xmlns:xmpp": Strophe.NS.BOSH - }; - if (route) { - body_attrs.route = route; - } - - var body = this._buildBody().attrs(body_attrs); - - this._changeConnectStatus(Strophe.Status.CONNECTING, null); - - this._requests.push( - new Strophe.Request(body.tree(), - this._onRequestStateChange.bind( - this, this._connect_cb.bind(this)), - body.tree().getAttribute("rid"))); - this._throttledRequestHandler(); - }, - - /** Function: attach - * Attach to an already created and authenticated BOSH session. - * - * This function is provided to allow Strophe to attach to BOSH - * sessions which have been created externally, perhaps by a Web - * application. This is often used to support auto-login type features - * without putting user credentials into the page. - * - * Parameters: - * (String) jid - The full JID that is bound by the session. - * (String) sid - The SID of the BOSH session. - * (String) rid - The current RID of the BOSH session. This RID - * will be used by the next request. - * (Function) callback The connect callback function. - * (Integer) wait - The optional HTTPBIND wait value. This is the - * time the server will wait before returning an empty result for - * a request. The default setting of 60 seconds is recommended. - * Other settings will require tweaks to the Strophe.TIMEOUT value. - * (Integer) hold - The optional HTTPBIND hold value. This is the - * number of connections the server will hold at one time. This - * should almost always be set to 1 (the default). - * (Integer) wind - The optional HTTBIND window value. This is the - * allowed range of request ids that are valid. The default is 5. - */ - attach: function (jid, sid, rid, callback, wait, hold, wind) - { - this.jid = jid; - this.sid = sid; - this.rid = rid; - this.connect_callback = callback; - - this.domain = Strophe.getDomainFromJid(this.jid); - - this.authenticated = true; - this.connected = true; - - this.wait = wait || this.wait; - this.hold = hold || this.hold; - this.window = wind || this.window; - - this._changeConnectStatus(Strophe.Status.ATTACHED, null); - }, - - /** Function: xmlInput - * User overrideable function that receives XML data coming into the - * connection. - * - * The default function does nothing. User code can override this with - * > Strophe.Connection.xmlInput = function (elem) { - * > (user code) - * > }; - * - * Parameters: - * (XMLElement) elem - The XML data received by the connection. - */ - xmlInput: function (elem) - { - return; - }, - - /** Function: xmlOutput - * User overrideable function that receives XML data sent to the - * connection. - * - * The default function does nothing. User code can override this with - * > Strophe.Connection.xmlOutput = function (elem) { - * > (user code) - * > }; - * - * Parameters: - * (XMLElement) elem - The XMLdata sent by the connection. - */ - xmlOutput: function (elem) - { - return; - }, - - /** Function: rawInput - * User overrideable function that receives raw data coming into the - * connection. - * - * The default function does nothing. User code can override this with - * > Strophe.Connection.rawInput = function (data) { - * > (user code) - * > }; - * - * Parameters: - * (String) data - The data received by the connection. - */ - rawInput: function (data) - { - return; - }, - - /** Function: rawOutput - * User overrideable function that receives raw data sent to the - * connection. - * - * The default function does nothing. User code can override this with - * > Strophe.Connection.rawOutput = function (data) { - * > (user code) - * > }; - * - * Parameters: - * (String) data - The data sent by the connection. - */ - rawOutput: function (data) - { - return; - }, - - /** Function: send - * Send a stanza. - * - * This function is called to push data onto the send queue to - * go out over the wire. Whenever a request is sent to the BOSH - * server, all pending data is sent and the queue is flushed. - * - * Parameters: - * (XMLElement | - * [XMLElement] | - * Strophe.Builder) elem - The stanza to send. - */ - send: function (elem) - { - if (elem === null) { return ; } - if (typeof(elem.sort) === "function") { - for (var i = 0; i < elem.length; i++) { - this._queueData(elem[i]); - } - } else if (typeof(elem.tree) === "function") { - this._queueData(elem.tree()); - } else { - this._queueData(elem); - } - - this._throttledRequestHandler(); - clearTimeout(this._idleTimeout); - this._idleTimeout = setTimeout(this._onIdle.bind(this), 100); - }, - - /** Function: flush - * Immediately send any pending outgoing data. - * - * Normally send() queues outgoing data until the next idle period - * (100ms), which optimizes network use in the common cases when - * several send()s are called in succession. flush() can be used to - * immediately send all pending data. - */ - flush: function () - { - // cancel the pending idle period and run the idle function - // immediately - clearTimeout(this._idleTimeout); - this._onIdle(); - }, - - /** Function: sendIQ - * Helper function to send IQ stanzas. - * - * Parameters: - * (XMLElement) elem - The stanza to send. - * (Function) callback - The callback function for a successful request. - * (Function) errback - The callback function for a failed or timed - * out request. On timeout, the stanza will be null. - * (Integer) timeout - The time specified in milliseconds for a - * timeout to occur. - * - * Returns: - * The id used to send the IQ. - */ - sendIQ: function(elem, callback, errback, timeout) { - var timeoutHandler = null; - var that = this; - - if (typeof(elem.tree) === "function") { - elem = elem.tree(); - } - var id = elem.getAttribute('id'); - - // inject id if not found - if (!id) { - id = this.getUniqueId("sendIQ"); - elem.setAttribute("id", id); - } - - var handler = this.addHandler(function (stanza) { - // remove timeout handler if there is one - if (timeoutHandler) { - that.deleteTimedHandler(timeoutHandler); - } - - var iqtype = stanza.getAttribute('type'); - if (iqtype == 'result') { - if (callback) { - callback(stanza); - } - } else if (iqtype == 'error') { - if (errback) { - errback(stanza); - } - } else { - throw { - name: "StropheError", - message: "Got bad IQ type of " + iqtype - }; - } - }, null, 'iq', null, id); - - // if timeout specified, setup timeout handler. - if (timeout) { - timeoutHandler = this.addTimedHandler(timeout, function () { - // get rid of normal handler - that.deleteHandler(handler); - - // call errback on timeout with null stanza - if (errback) { - errback(null); - } - return false; - }); - } - - this.send(elem); - - return id; - }, - - /** PrivateFunction: _queueData - * Queue outgoing data for later sending. Also ensures that the data - * is a DOMElement. - */ - _queueData: function (element) { - if (element === null || - !element.tagName || - !element._childNodes) { - throw { - name: "StropheError", - message: "Cannot queue non-DOMElement." - }; - } - - this._data.push(element); - }, - - /** PrivateFunction: _sendRestart - * Send an xmpp:restart stanza. - */ - _sendRestart: function () - { - this._data.push("restart"); - - this._throttledRequestHandler(); - clearTimeout(this._idleTimeout); - this._idleTimeout = setTimeout(this._onIdle.bind(this), 100); - }, - - /** Function: addTimedHandler - * Add a timed handler to the connection. - * - * This function adds a timed handler. The provided handler will - * be called every period milliseconds until it returns false, - * the connection is terminated, or the handler is removed. Handlers - * that wish to continue being invoked should return true. - * - * Because of method binding it is necessary to save the result of - * this function if you wish to remove a handler with - * deleteTimedHandler(). - * - * Note that user handlers are not active until authentication is - * successful. - * - * Parameters: - * (Integer) period - The period of the handler. - * (Function) handler - The callback function. - * - * Returns: - * A reference to the handler that can be used to remove it. - */ - addTimedHandler: function (period, handler) - { - var thand = new Strophe.TimedHandler(period, handler); - this.addTimeds.push(thand); - return thand; - }, - - /** Function: deleteTimedHandler - * Delete a timed handler for a connection. - * - * This function removes a timed handler from the connection. The - * handRef parameter is *not* the function passed to addTimedHandler(), - * but is the reference returned from addTimedHandler(). - * - * Parameters: - * (Strophe.TimedHandler) handRef - The handler reference. - */ - deleteTimedHandler: function (handRef) - { - // this must be done in the Idle loop so that we don't change - // the handlers during iteration - this.removeTimeds.push(handRef); - }, - - /** Function: addHandler - * Add a stanza handler for the connection. - * - * This function adds a stanza handler to the connection. The - * handler callback will be called for any stanza that matches - * the parameters. Note that if multiple parameters are supplied, - * they must all match for the handler to be invoked. - * - * The handler will receive the stanza that triggered it as its argument. - * The handler should return true if it is to be invoked again; - * returning false will remove the handler after it returns. - * - * As a convenience, the ns parameters applies to the top level element - * and also any of its immediate children. This is primarily to make - * matching /iq/query elements easy. - * - * The options argument contains handler matching flags that affect how - * matches are determined. Currently the only flag is matchBare (a - * boolean). When matchBare is true, the from parameter and the from - * attribute on the stanza will be matched as bare JIDs instead of - * full JIDs. To use this, pass {matchBare: true} as the value of - * options. The default value for matchBare is false. - * - * The return value should be saved if you wish to remove the handler - * with deleteHandler(). - * - * Parameters: - * (Function) handler - The user callback. - * (String) ns - The namespace to match. - * (String) name - The stanza name to match. - * (String) type - The stanza type attribute to match. - * (String) id - The stanza id attribute to match. - * (String) from - The stanza from attribute to match. - * (String) options - The handler options - * - * Returns: - * A reference to the handler that can be used to remove it. - */ - addHandler: function (handler, ns, name, type, id, from, options) - { - var hand = new Strophe.Handler(handler, ns, name, type, id, from, options); - this.addHandlers.push(hand); - return hand; - }, - - /** Function: deleteHandler - * Delete a stanza handler for a connection. - * - * This function removes a stanza handler from the connection. The - * handRef parameter is *not* the function passed to addHandler(), - * but is the reference returned from addHandler(). - * - * Parameters: - * (Strophe.Handler) handRef - The handler reference. - */ - deleteHandler: function (handRef) - { - // this must be done in the Idle loop so that we don't change - // the handlers during iteration - this.removeHandlers.push(handRef); - }, - - /** Function: disconnect - * Start the graceful disconnection process. - * - * This function starts the disconnection process. This process starts - * by sending unavailable presence and sending BOSH body of type - * terminate. A timeout handler makes sure that disconnection happens - * even if the BOSH server does not respond. - * - * The user supplied connection callback will be notified of the - * progress as this process happens. - * - * Parameters: - * (String) reason - The reason the disconnect is occuring. - */ - disconnect: function (reason) - { - this._changeConnectStatus(Strophe.Status.DISCONNECTING, reason); - - Strophe.info("Disconnect was called because: " + reason); - if (this.connected) { - // setup timeout handler - this._disconnectTimeout = this._addSysTimedHandler( - 3000, this._onDisconnectTimeout.bind(this)); - this._sendTerminate(); - } - }, - - /** PrivateFunction: _changeConnectStatus - * _Private_ helper function that makes sure plugins and the user's - * callback are notified of connection status changes. - * - * Parameters: - * (Integer) status - the new connection status, one of the values - * in Strophe.Status - * (String) condition - the error condition or null - */ - _changeConnectStatus: function (status, condition) - { - // notify all plugins listening for status changes - for (var k in Strophe._connectionPlugins) { - if (Strophe._connectionPlugins.hasOwnProperty(k)) { - var plugin = this[k]; - if (plugin.statusChanged) { - try { - plugin.statusChanged(status, condition); - } catch (err) { - Strophe.error("" + k + " plugin caused an exception " + - "changing status: " + err); - } - } - } - } - - // notify the user's callback - if (this.connect_callback) { - try { - this.connect_callback(status, condition); - } catch (e) { - Strophe.error("User connection callback caused an " + - "exception: " + e); - } - } - }, - - /** PrivateFunction: _buildBody - * _Private_ helper function to generate the wrapper for BOSH. - * - * Returns: - * A Strophe.Builder with a element. - */ - _buildBody: function () - { - var bodyWrap = $build('body', { - rid: this.rid++, - xmlns: Strophe.NS.HTTPBIND - }); - - if (this.sid !== null) { - bodyWrap.attrs({sid: this.sid}); - } - - return bodyWrap; - }, - - /** PrivateFunction: _removeRequest - * _Private_ function to remove a request from the queue. - * - * Parameters: - * (Strophe.Request) req - The request to remove. - */ - _removeRequest: function (req) - { - Strophe.debug("removing request"); - - var i; - for (i = this._requests.length - 1; i >= 0; i--) { - if (req == this._requests[i]) { - this._requests.splice(i, 1); - } - } - - // IE6 fails on setting to null, so set to empty function - req.xhr.onreadystatechange = function () {}; - - this._throttledRequestHandler(); - }, - - /** PrivateFunction: _restartRequest - * _Private_ function to restart a request that is presumed dead. - * - * Parameters: - * (Integer) i - The index of the request in the queue. - */ - _restartRequest: function (i) - { - var req = this._requests[i]; - if (req.dead === null) { - req.dead = new Date(); - } - - this._processRequest(i); - }, - - /** PrivateFunction: _processRequest - * _Private_ function to process a request in the queue. - * - * This function takes requests off the queue and sends them and - * restarts dead requests. - * - * Parameters: - * (Integer) i - The index of the request in the queue. - */ - _processRequest: function (i) - { - var req = this._requests[i]; - var reqStatus = -1; - - try { - if (req.xhr.readyState == 4) { - reqStatus = req.xhr.status; - } - } catch (e) { - Strophe.error("caught an error in _requests[" + i + - "], reqStatus: " + reqStatus); - } - - if (typeof(reqStatus) == "undefined") { - reqStatus = -1; - } - - // make sure we limit the number of retries - if (req.sends > 5) { - this._onDisconnectTimeout(); - return; - } - - var time_elapsed = req.age(); - var primaryTimeout = (!isNaN(time_elapsed) && - time_elapsed > Math.floor(Strophe.TIMEOUT * this.wait)); - var secondaryTimeout = (req.dead !== null && - req.timeDead() > Math.floor(Strophe.SECONDARY_TIMEOUT * this.wait)); - var requestCompletedWithServerError = (req.xhr.readyState == 4 && - (reqStatus < 1 || - reqStatus >= 500)); - if (primaryTimeout || secondaryTimeout || - requestCompletedWithServerError) { - if (secondaryTimeout) { - Strophe.error("Request " + - this._requests[i].id + - " timed out (secondary), restarting"); - } - req.abort = true; - req.xhr.abort(); - // setting to null fails on IE6, so set to empty function - req.xhr.onreadystatechange = function () {}; - this._requests[i] = new Strophe.Request(req.xmlData, - req.origFunc, - req.rid, - req.sends); - req = this._requests[i]; - } - - if (req.xhr.readyState === 0) { - Strophe.debug("request id " + req.id + - "." + req.sends + " posting"); - - req.date = new Date(); - try { - req.xhr.open("POST", this.service, true); - } catch (e2) { - Strophe.error("XHR open failed."); - if (!this.connected) { - this._changeConnectStatus(Strophe.Status.CONNFAIL, - "bad-service"); - } - this.disconnect(); - return; - } - - // Fires the XHR request -- may be invoked immediately - // or on a gradually expanding retry window for reconnects - var sendFunc = function () { - req.xhr.send(req.data); - }; - - // Implement progressive backoff for reconnects -- - // First retry (send == 1) should also be instantaneous - if (req.sends > 1) { - // Using a cube of the retry number creats a nicely - // expanding retry window - var backoff = Math.pow(req.sends, 3) * 1000; - setTimeout(sendFunc, backoff); - } else { - sendFunc(); - } - - req.sends++; - - this.xmlOutput(req.xmlData); - this.rawOutput(req.data); - } else { - Strophe.debug("_processRequest: " + - (i === 0 ? "first" : "second") + - " request has readyState of " + - req.xhr.readyState); - } - }, - - /** PrivateFunction: _throttledRequestHandler - * _Private_ function to throttle requests to the connection window. - * - * This function makes sure we don't send requests so fast that the - * request ids overflow the connection window in the case that one - * request died. - */ - _throttledRequestHandler: function () - { - if (!this._requests) { - Strophe.debug("_throttledRequestHandler called with " + - "undefined requests"); - } else { - Strophe.debug("_throttledRequestHandler called with " + - this._requests.length + " requests"); - } - - if (!this._requests || this._requests.length === 0) { - return; - } - - if (this._requests.length > 0) { - this._processRequest(0); - } - - if (this._requests.length > 1 && - Math.abs(this._requests[0].rid - - this._requests[1].rid) < this.window) { - this._processRequest(1); - } - }, - - /** PrivateFunction: _onRequestStateChange - * _Private_ handler for Strophe.Request state changes. - * - * This function is called when the XMLHttpRequest readyState changes. - * It contains a lot of error handling logic for the many ways that - * requests can fail, and calls the request callback when requests - * succeed. - * - * Parameters: - * (Function) func - The handler for the request. - * (Strophe.Request) req - The request that is changing readyState. - */ - _onRequestStateChange: function (func, req) - { - Strophe.debug("request id " + req.id + - "." + req.sends + " state changed to " + - req.xhr.readyState); - - if (req.abort) { - req.abort = false; - return; - } - - // request complete - var reqStatus; - if (req.xhr.readyState == 4) { - reqStatus = 0; - try { - reqStatus = req.xhr.status; - } catch (e) { - // ignore errors from undefined status attribute. works - // around a browser bug - } - - if (typeof(reqStatus) == "undefined") { - reqStatus = 0; - } - - if (this.disconnecting) { - if (reqStatus >= 400) { - this._hitError(reqStatus); - return; - } - } - - var reqIs0 = (this._requests[0] == req); - var reqIs1 = (this._requests[1] == req); - - if ((reqStatus > 0 && reqStatus < 500) || req.sends > 5) { - // remove from internal queue - this._removeRequest(req); - Strophe.debug("request id " + - req.id + - " should now be removed"); - } - - // request succeeded - if (reqStatus == 200) { - // if request 1 finished, or request 0 finished and request - // 1 is over Strophe.SECONDARY_TIMEOUT seconds old, we need to - // restart the other - both will be in the first spot, as the - // completed request has been removed from the queue already - if (reqIs1 || - (reqIs0 && this._requests.length > 0 && - this._requests[0].age() > Math.floor(Strophe.SECONDARY_TIMEOUT * this.wait))) { - this._restartRequest(0); - } - // call handler - Strophe.debug("request id " + - req.id + "." + - req.sends + " got 200"); - func(req); - this.errors = 0; - } else { - Strophe.error("request id " + - req.id + "." + - req.sends + " error " + reqStatus + - " happened"); - if (reqStatus === 0 || - (reqStatus >= 400 && reqStatus < 600) || - reqStatus >= 12000) { - this._hitError(reqStatus); - if (reqStatus >= 400 && reqStatus < 500) { - this._changeConnectStatus(Strophe.Status.DISCONNECTING, - null); - this._doDisconnect(); - } - } - } - - if (!((reqStatus > 0 && reqStatus < 500) || - req.sends > 5)) { - this._throttledRequestHandler(); - } - } - }, - - /** PrivateFunction: _hitError - * _Private_ function to handle the error count. - * - * Requests are resent automatically until their error count reaches - * 5. Each time an error is encountered, this function is called to - * increment the count and disconnect if the count is too high. - * - * Parameters: - * (Integer) reqStatus - The request status. - */ - _hitError: function (reqStatus) - { - this.errors++; - Strophe.warn("request errored, status: " + reqStatus + - ", number of errors: " + this.errors); - if (this.errors > 4) { - this._onDisconnectTimeout(); - } - }, - - /** PrivateFunction: _doDisconnect - * _Private_ function to disconnect. - * - * This is the last piece of the disconnection logic. This resets the - * connection and alerts the user's connection callback. - */ - _doDisconnect: function () - { - Strophe.info("_doDisconnect was called"); - this.authenticated = false; - this.disconnecting = false; - this.sid = null; - this.streamId = null; - this.rid = Math.floor(Math.random() * 4294967295); - - // tell the parent we disconnected - if (this.connected) { - this._changeConnectStatus(Strophe.Status.DISCONNECTED, null); - this.connected = false; - } - - // delete handlers - this.handlers = []; - this.timedHandlers = []; - this.removeTimeds = []; - this.removeHandlers = []; - this.addTimeds = []; - this.addHandlers = []; - }, - - /** PrivateFunction: _dataRecv - * _Private_ handler to processes incoming data from the the connection. - * - * Except for _connect_cb handling the initial connection request, - * this function handles the incoming data for all requests. This - * function also fires stanza handlers that match each incoming - * stanza. - * - * Parameters: - * (Strophe.Request) req - The request that has data ready. - */ - _dataRecv: function (req) - { - try { - var elem = req.getResponse(); - } catch (e) { - if (e != "parsererror") { throw e; } - this.disconnect("strophe-parsererror"); - } - if (elem === null) { return; } - - this.xmlInput(elem); - this.rawInput(Strophe.serialize(elem)); - - // remove handlers scheduled for deletion - var i, hand; - while (this.removeHandlers.length > 0) { - hand = this.removeHandlers.pop(); - i = this.handlers.indexOf(hand); - if (i >= 0) { - this.handlers.splice(i, 1); - } - } - - // add handlers scheduled for addition - while (this.addHandlers.length > 0) { - this.handlers.push(this.addHandlers.pop()); - } - - // handle graceful disconnect - if (this.disconnecting && this._requests.length === 0) { - this.deleteTimedHandler(this._disconnectTimeout); - this._disconnectTimeout = null; - this._doDisconnect(); - return; - } - - var typ = elem.getAttribute("type"); - var cond, conflict; - if (typ !== null && typ == "terminate") { - // Don't process stanzas that come in after disconnect - if (this.disconnecting) { - return; - } - - // an error occurred - cond = elem.getAttribute("condition"); - conflict = elem.getElementsByTagName("conflict"); - if (cond !== null) { - if (cond == "remote-stream-error" && conflict.length > 0) { - cond = "conflict"; - } - this._changeConnectStatus(Strophe.Status.CONNFAIL, cond); - } else { - this._changeConnectStatus(Strophe.Status.CONNFAIL, "unknown"); - } - this.disconnect(); - return; - } - - // send each incoming stanza through the handler chain - var that = this; - Strophe.forEachChild(elem, null, function (child) { - var i, newList; - // process handlers - newList = that.handlers; - that.handlers = []; - for (i = 0; i < newList.length; i++) { - var hand = newList[i]; - if (hand.isMatch(child) && - (that.authenticated || !hand.user)) { - if (hand.run(child)) { - that.handlers.push(hand); - } - } else { - that.handlers.push(hand); - } - } - }); - }, - - /** PrivateFunction: _sendTerminate - * _Private_ function to send initial disconnect sequence. - * - * This is the first step in a graceful disconnect. It sends - * the BOSH server a terminate body and includes an unavailable - * presence if authentication has completed. - */ - _sendTerminate: function () - { - Strophe.info("_sendTerminate was called"); - var body = this._buildBody().attrs({type: "terminate"}); - - if (this.authenticated) { - body.c('presence', { - xmlns: Strophe.NS.CLIENT, - type: 'unavailable' - }); - } - - this.disconnecting = true; - - var req = new Strophe.Request(body.tree(), - this._onRequestStateChange.bind( - this, this._dataRecv.bind(this)), - body.tree().getAttribute("rid")); - - this._requests.push(req); - this._throttledRequestHandler(); - }, - - /** PrivateFunction: _connect_cb - * _Private_ handler for initial connection request. - * - * This handler is used to process the initial connection request - * response from the BOSH server. It is used to set up authentication - * handlers and start the authentication process. - * - * SASL authentication will be attempted if available, otherwise - * the code will fall back to legacy authentication. - * - * Parameters: - * (Strophe.Request) req - The current request. - */ - _connect_cb: function (req) - { - Strophe.info("_connect_cb was called"); - - this.connected = true; - var bodyWrap = req.getResponse(); - if (!bodyWrap) { return; } - - this.xmlInput(bodyWrap); - this.rawInput(Strophe.serialize(bodyWrap)); - - var typ = bodyWrap.getAttribute("type"); - var cond, conflict; - if (typ !== null && typ == "terminate") { - // an error occurred - cond = bodyWrap.getAttribute("condition"); - conflict = bodyWrap.getElementsByTagName("conflict"); - if (cond !== null) { - if (cond == "remote-stream-error" && conflict.length > 0) { - cond = "conflict"; - } - this._changeConnectStatus(Strophe.Status.CONNFAIL, cond); - } else { - this._changeConnectStatus(Strophe.Status.CONNFAIL, "unknown"); - } - return; - } - - // check to make sure we don't overwrite these if _connect_cb is - // called multiple times in the case of missing stream:features - if (!this.sid) { - this.sid = bodyWrap.getAttribute("sid"); - } - if (!this.stream_id) { - this.stream_id = bodyWrap.getAttribute("authid"); - } - var wind = bodyWrap.getAttribute('requests'); - if (wind) { this.window = parseInt(wind, 10); } - var hold = bodyWrap.getAttribute('hold'); - if (hold) { this.hold = parseInt(hold, 10); } - var wait = bodyWrap.getAttribute('wait'); - if (wait) { this.wait = parseInt(wait, 10); } - - - var do_sasl_plain = false; - var do_sasl_digest_md5 = false; - var do_sasl_anonymous = false; - - var mechanisms = bodyWrap.getElementsByTagName("mechanism"); - var i, mech, auth_str, hashed_auth_str; - if (mechanisms.length > 0) { - for (i = 0; i < mechanisms.length; i++) { - mech = Strophe.getText(mechanisms[i]); - if (mech == 'DIGEST-MD5') { - do_sasl_digest_md5 = true; - } else if (mech == 'PLAIN') { - do_sasl_plain = true; - } else if (mech == 'ANONYMOUS') { - do_sasl_anonymous = true; - } - } - } else { - // we didn't get stream:features yet, so we need wait for it - // by sending a blank poll request - var body = this._buildBody(); - this._requests.push( - new Strophe.Request(body.tree(), - this._onRequestStateChange.bind( - this, this._connect_cb.bind(this)), - body.tree().getAttribute("rid"))); - this._throttledRequestHandler(); - return; - } - - if (Strophe.getNodeFromJid(this.jid) === null && - do_sasl_anonymous) { - this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null); - this._sasl_success_handler = this._addSysHandler( - this._sasl_success_cb.bind(this), null, - "success", null, null); - this._sasl_failure_handler = this._addSysHandler( - this._sasl_failure_cb.bind(this), null, - "failure", null, null); - - this.send($build("auth", { - xmlns: Strophe.NS.SASL, - mechanism: "ANONYMOUS" - }).tree()); - } else if (Strophe.getNodeFromJid(this.jid) === null) { - // we don't have a node, which is required for non-anonymous - // client connections - this._changeConnectStatus(Strophe.Status.CONNFAIL, - 'x-strophe-bad-non-anon-jid'); - this.disconnect(); - } else if (do_sasl_digest_md5) { - this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null); - this._sasl_challenge_handler = this._addSysHandler( - this._sasl_challenge1_cb.bind(this), null, - "challenge", null, null); - this._sasl_failure_handler = this._addSysHandler( - this._sasl_failure_cb.bind(this), null, - "failure", null, null); - - this.send($build("auth", { - xmlns: Strophe.NS.SASL, - mechanism: "DIGEST-MD5" - }).tree()); - } else if (do_sasl_plain) { - // Build the plain auth string (barejid null - // username null password) and base 64 encoded. - auth_str = Strophe.getBareJidFromJid(this.jid); - auth_str = auth_str + "\u0000"; - auth_str = auth_str + Strophe.getNodeFromJid(this.jid); - auth_str = auth_str + "\u0000"; - auth_str = auth_str + this.pass; - - this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null); - this._sasl_success_handler = this._addSysHandler( - this._sasl_success_cb.bind(this), null, - "success", null, null); - this._sasl_failure_handler = this._addSysHandler( - this._sasl_failure_cb.bind(this), null, - "failure", null, null); - - hashed_auth_str = Base64.encode(auth_str); - this.send($build("auth", { - xmlns: Strophe.NS.SASL, - mechanism: "PLAIN" - }).t(hashed_auth_str).tree()); - } else { - this._changeConnectStatus(Strophe.Status.AUTHENTICATING, null); - this._addSysHandler(this._auth1_cb.bind(this), null, null, - null, "_auth_1"); - - this.send($iq({ - type: "get", - to: this.domain, - id: "_auth_1" - }).c("query", { - xmlns: Strophe.NS.AUTH - }).c("username", {}).t(Strophe.getNodeFromJid(this.jid)).tree()); - } - }, - - /** PrivateFunction: _sasl_challenge1_cb - * _Private_ handler for DIGEST-MD5 SASL authentication. - * - * Parameters: - * (XMLElement) elem - The challenge stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_challenge1_cb: function (elem) - { - var attribMatch = /([a-z]+)=("[^"]+"|[^,"]+)(?:,|$)/; - - var challenge = Base64.decode(Strophe.getText(elem)); - var cnonce = MD5.hexdigest(Math.random() * 1234567890); - var realm = ""; - var host = null; - var nonce = ""; - var qop = ""; - var matches; - - // remove unneeded handlers - this.deleteHandler(this._sasl_failure_handler); - - while (challenge.match(attribMatch)) { - matches = challenge.match(attribMatch); - challenge = challenge.replace(matches[0], ""); - matches[2] = matches[2].replace(/^"(.+)"$/, "$1"); - switch (matches[1]) { - case "realm": - realm = matches[2]; - break; - case "nonce": - nonce = matches[2]; - break; - case "qop": - qop = matches[2]; - break; - case "host": - host = matches[2]; - break; - } - } - - var digest_uri = "xmpp/" + this.domain; - if (host !== null) { - digest_uri = digest_uri + "/" + host; - } - - var A1 = MD5.hash(Strophe.getNodeFromJid(this.jid) + - ":" + realm + ":" + this.pass) + - ":" + nonce + ":" + cnonce; - var A2 = 'AUTHENTICATE:' + digest_uri; - - var responseText = ""; - responseText += 'username=' + - this._quote(Strophe.getNodeFromJid(this.jid)) + ','; - responseText += 'realm=' + this._quote(realm) + ','; - responseText += 'nonce=' + this._quote(nonce) + ','; - responseText += 'cnonce=' + this._quote(cnonce) + ','; - responseText += 'nc="00000001",'; - responseText += 'qop="auth",'; - responseText += 'digest-uri=' + this._quote(digest_uri) + ','; - responseText += 'response=' + this._quote( - MD5.hexdigest(MD5.hexdigest(A1) + ":" + - nonce + ":00000001:" + - cnonce + ":auth:" + - MD5.hexdigest(A2))) + ','; - responseText += 'charset="utf-8"'; - - this._sasl_challenge_handler = this._addSysHandler( - this._sasl_challenge2_cb.bind(this), null, - "challenge", null, null); - this._sasl_success_handler = this._addSysHandler( - this._sasl_success_cb.bind(this), null, - "success", null, null); - this._sasl_failure_handler = this._addSysHandler( - this._sasl_failure_cb.bind(this), null, - "failure", null, null); - - this.send($build('response', { - xmlns: Strophe.NS.SASL - }).t(Base64.encode(responseText)).tree()); - - return false; - }, - - /** PrivateFunction: _quote - * _Private_ utility function to backslash escape and quote strings. - * - * Parameters: - * (String) str - The string to be quoted. - * - * Returns: - * quoted string - */ - _quote: function (str) - { - return '"' + str.replace(/\\/g, "\\\\").replace(/"/g, '\\"') + '"'; - //" end string workaround for emacs - }, - - - /** PrivateFunction: _sasl_challenge2_cb - * _Private_ handler for second step of DIGEST-MD5 SASL authentication. - * - * Parameters: - * (XMLElement) elem - The challenge stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_challenge2_cb: function (elem) - { - // remove unneeded handlers - this.deleteHandler(this._sasl_success_handler); - this.deleteHandler(this._sasl_failure_handler); - - this._sasl_success_handler = this._addSysHandler( - this._sasl_success_cb.bind(this), null, - "success", null, null); - this._sasl_failure_handler = this._addSysHandler( - this._sasl_failure_cb.bind(this), null, - "failure", null, null); - this.send($build('response', {xmlns: Strophe.NS.SASL}).tree()); - return false; - }, - - /** PrivateFunction: _auth1_cb - * _Private_ handler for legacy authentication. - * - * This handler is called in response to the initial - * for legacy authentication. It builds an authentication and - * sends it, creating a handler (calling back to _auth2_cb()) to - * handle the result - * - * Parameters: - * (XMLElement) elem - The stanza that triggered the callback. - * - * Returns: - * false to remove the handler. - */ - _auth1_cb: function (elem) - { - // build plaintext auth iq - var iq = $iq({type: "set", id: "_auth_2"}) - .c('query', {xmlns: Strophe.NS.AUTH}) - .c('username', {}).t(Strophe.getNodeFromJid(this.jid)) - .up() - .c('password').t(this.pass); - - if (!Strophe.getResourceFromJid(this.jid)) { - // since the user has not supplied a resource, we pick - // a default one here. unlike other auth methods, the server - // cannot do this for us. - this.jid = Strophe.getBareJidFromJid(this.jid) + '/strophe'; - } - iq.up().c('resource', {}).t(Strophe.getResourceFromJid(this.jid)); - - this._addSysHandler(this._auth2_cb.bind(this), null, - null, null, "_auth_2"); - - this.send(iq.tree()); - - return false; - }, - - /** PrivateFunction: _sasl_success_cb - * _Private_ handler for succesful SASL authentication. - * - * Parameters: - * (XMLElement) elem - The matching stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_success_cb: function (elem) - { - Strophe.info("SASL authentication succeeded."); - - // remove old handlers - this.deleteHandler(this._sasl_failure_handler); - this._sasl_failure_handler = null; - if (this._sasl_challenge_handler) { - this.deleteHandler(this._sasl_challenge_handler); - this._sasl_challenge_handler = null; - } - - this._addSysHandler(this._sasl_auth1_cb.bind(this), null, - "stream:features", null, null); - - // we must send an xmpp:restart now - this._sendRestart(); - - return false; - }, - - /** PrivateFunction: _sasl_auth1_cb - * _Private_ handler to start stream binding. - * - * Parameters: - * (XMLElement) elem - The matching stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_auth1_cb: function (elem) - { - // save stream:features for future usage - this.features = elem; - - var i, child; - - for (i = 0; i < elem._childNodes.length; i++) { - child = elem._childNodes[i]; - if (child.nodeName.toLowerCase() == 'bind') { - this.do_bind = true; - } - - if (child.nodeName.toLowerCase() == 'session') { - this.do_session = true; - } - } - - if (!this.do_bind) { - this._changeConnectStatus(Strophe.Status.AUTHFAIL, null); - return false; - } else { - this._addSysHandler(this._sasl_bind_cb.bind(this), null, null, - null, "_bind_auth_2"); - - var resource = Strophe.getResourceFromJid(this.jid); - if (resource) { - this.send($iq({type: "set", id: "_bind_auth_2"}) - .c('bind', {xmlns: Strophe.NS.BIND}) - .c('resource', {}).t(resource).tree()); - } else { - this.send($iq({type: "set", id: "_bind_auth_2"}) - .c('bind', {xmlns: Strophe.NS.BIND}) - .tree()); - } - } - - return false; - }, - - /** PrivateFunction: _sasl_bind_cb - * _Private_ handler for binding result and session start. - * - * Parameters: - * (XMLElement) elem - The matching stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_bind_cb: function (elem) - { - if (elem.getAttribute("type") == "error") { - Strophe.info("SASL binding failed."); - this._changeConnectStatus(Strophe.Status.AUTHFAIL, null); - return false; - } - - // TODO - need to grab errors - var bind = elem.getElementsByTagName("bind"); - var jidNode; - if (bind.length > 0) { - // Grab jid - jidNode = bind[0].getElementsByTagName("jid"); - if (jidNode.length > 0) { - this.jid = Strophe.getText(jidNode[0]); - - if (this.do_session) { - this._addSysHandler(this._sasl_session_cb.bind(this), - null, null, null, "_session_auth_2"); - - this.send($iq({type: "set", id: "_session_auth_2"}) - .c('session', {xmlns: Strophe.NS.SESSION}) - .tree()); - } else { - this.authenticated = true; - this._changeConnectStatus(Strophe.Status.CONNECTED, null); - } - } - } else { - Strophe.info("SASL binding failed."); - this._changeConnectStatus(Strophe.Status.AUTHFAIL, null); - return false; - } - }, - - /** PrivateFunction: _sasl_session_cb - * _Private_ handler to finish successful SASL connection. - * - * This sets Connection.authenticated to true on success, which - * starts the processing of user handlers. - * - * Parameters: - * (XMLElement) elem - The matching stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_session_cb: function (elem) - { - if (elem.getAttribute("type") == "result") { - this.authenticated = true; - this._changeConnectStatus(Strophe.Status.CONNECTED, null); - } else if (elem.getAttribute("type") == "error") { - Strophe.info("Session creation failed."); - this._changeConnectStatus(Strophe.Status.AUTHFAIL, null); - return false; - } - - return false; - }, - - /** PrivateFunction: _sasl_failure_cb - * _Private_ handler for SASL authentication failure. - * - * Parameters: - * (XMLElement) elem - The matching stanza. - * - * Returns: - * false to remove the handler. - */ - _sasl_failure_cb: function (elem) - { - // delete unneeded handlers - if (this._sasl_success_handler) { - this.deleteHandler(this._sasl_success_handler); - this._sasl_success_handler = null; - } - if (this._sasl_challenge_handler) { - this.deleteHandler(this._sasl_challenge_handler); - this._sasl_challenge_handler = null; - } - - this._changeConnectStatus(Strophe.Status.AUTHFAIL, null); - return false; - }, - - /** PrivateFunction: _auth2_cb - * _Private_ handler to finish legacy authentication. - * - * This handler is called when the result from the jabber:iq:auth - * stanza is returned. - * - * Parameters: - * (XMLElement) elem - The stanza that triggered the callback. - * - * Returns: - * false to remove the handler. - */ - _auth2_cb: function (elem) - { - if (elem.getAttribute("type") == "result") { - this.authenticated = true; - this._changeConnectStatus(Strophe.Status.CONNECTED, null); - } else if (elem.getAttribute("type") == "error") { - this._changeConnectStatus(Strophe.Status.AUTHFAIL, null); - this.disconnect(); - } - - return false; - }, - - /** PrivateFunction: _addSysTimedHandler - * _Private_ function to add a system level timed handler. - * - * This function is used to add a Strophe.TimedHandler for the - * library code. System timed handlers are allowed to run before - * authentication is complete. - * - * Parameters: - * (Integer) period - The period of the handler. - * (Function) handler - The callback function. - */ - _addSysTimedHandler: function (period, handler) - { - var thand = new Strophe.TimedHandler(period, handler); - thand.user = false; - this.addTimeds.push(thand); - return thand; - }, - - /** PrivateFunction: _addSysHandler - * _Private_ function to add a system level stanza handler. - * - * This function is used to add a Strophe.Handler for the - * library code. System stanza handlers are allowed to run before - * authentication is complete. - * - * Parameters: - * (Function) handler - The callback function. - * (String) ns - The namespace to match. - * (String) name - The stanza name to match. - * (String) type - The stanza type attribute to match. - * (String) id - The stanza id attribute to match. - */ - _addSysHandler: function (handler, ns, name, type, id) - { - var hand = new Strophe.Handler(handler, ns, name, type, id); - hand.user = false; - this.addHandlers.push(hand); - return hand; - }, - - /** PrivateFunction: _onDisconnectTimeout - * _Private_ timeout handler for handling non-graceful disconnection. - * - * If the graceful disconnect process does not complete within the - * time allotted, this handler finishes the disconnect anyway. - * - * Returns: - * false to remove the handler. - */ - _onDisconnectTimeout: function () - { - Strophe.info("_onDisconnectTimeout was called"); - - // cancel all remaining requests and clear the queue - var req; - while (this._requests.length > 0) { - req = this._requests.pop(); - req.abort = true; - req.xhr.abort(); - // jslint complains, but this is fine. setting to empty func - // is necessary for IE6 - req.xhr.onreadystatechange = function () {}; - } - - // actually disconnect - this._doDisconnect(); - - return false; - }, - - /** PrivateFunction: _onIdle - * _Private_ handler to process events during idle cycle. - * - * This handler is called every 100ms to fire timed handlers that - * are ready and keep poll requests going. - */ - _onIdle: function () - { - var i, thand, since, newList; - - // add timed handlers scheduled for addition - // NOTE: we add before remove in the case a timed handler is - // added and then deleted before the next _onIdle() call. - while (this.addTimeds.length > 0) { - this.timedHandlers.push(this.addTimeds.pop()); - } - - // remove timed handlers that have been scheduled for deletion - while (this.removeTimeds.length > 0) { - thand = this.removeTimeds.pop(); - i = this.timedHandlers.indexOf(thand); - if (i >= 0) { - this.timedHandlers.splice(i, 1); - } - } - - // call ready timed handlers - var now = new Date().getTime(); - newList = []; - for (i = 0; i < this.timedHandlers.length; i++) { - thand = this.timedHandlers[i]; - if (this.authenticated || !thand.user) { - since = thand.lastCalled + thand.period; - if (since - now <= 0) { - if (thand.run()) { - newList.push(thand); - } - } else { - newList.push(thand); - } - } - } - this.timedHandlers = newList; - - var body, time_elapsed; - - // if no requests are in progress, poll - if (this.authenticated && this._requests.length === 0 && - this._data.length === 0 && !this.disconnecting) { - Strophe.info("no requests during idle cycle, sending " + - "blank request"); - this._data.push(null); - } - - if (this._requests.length < 2 && this._data.length > 0 && - !this.paused) { - body = this._buildBody(); - for (i = 0; i < this._data.length; i++) { - if (this._data[i] !== null) { - if (this._data[i] === "restart") { - body.attrs({ - to: this.domain, - "xml:lang": "en", - "xmpp:restart": "true", - "xmlns:xmpp": Strophe.NS.BOSH - }); - } else { - body.cnode(this._data[i]).up(); - } - } - } - delete this._data; - this._data = []; - this._requests.push( - new Strophe.Request(body.tree(), - this._onRequestStateChange.bind( - this, this._dataRecv.bind(this)), - body.tree().getAttribute("rid"))); - this._processRequest(this._requests.length - 1); - } - - if (this._requests.length > 0) { - time_elapsed = this._requests[0].age(); - if (this._requests[0].dead !== null) { - if (this._requests[0].timeDead() > - Math.floor(Strophe.SECONDARY_TIMEOUT * this.wait)) { - this._throttledRequestHandler(); - } - } - - if (time_elapsed > Math.floor(Strophe.TIMEOUT * this.wait)) { - Strophe.warn("Request " + - this._requests[0].id + - " timed out, over " + Math.floor(Strophe.TIMEOUT * this.wait) + - " seconds since last activity"); - this._throttledRequestHandler(); - } - } - - // reactivate the timer - clearTimeout(this._idleTimeout); - this._idleTimeout = setTimeout(this._onIdle.bind(this), 100); - } -}; - -if (callback) { - callback(Strophe, $build, $msg, $iq, $pres); -} - -})(function () { - window.Strophe = arguments[0]; - window.$build = arguments[1]; - window.$msg = arguments[2]; - window.$iq = arguments[3]; - window.$pres = arguments[4]; -}); diff --git a/contrib/jitsimeetbridge/unjingle/unjingle.js b/contrib/jitsimeetbridge/unjingle/unjingle.js deleted file mode 100644 index 3dfe759914..0000000000 --- a/contrib/jitsimeetbridge/unjingle/unjingle.js +++ /dev/null @@ -1,48 +0,0 @@ -var strophe = require("./strophe/strophe.js").Strophe; - -var Strophe = strophe.Strophe; -var $iq = strophe.$iq; -var $msg = strophe.$msg; -var $build = strophe.$build; -var $pres = strophe.$pres; - -var jsdom = require("jsdom"); -var window = jsdom.jsdom().parentWindow; -var $ = require('jquery')(window); - -var stropheJingle = require("./strophe.jingle.sdp.js"); - - -var input = ''; - -process.stdin.on('readable', function() { - var chunk = process.stdin.read(); - if (chunk !== null) { - input += chunk; - } -}); - -process.stdin.on('end', function() { - if (process.argv[2] == '--jingle') { - var elem = $(input); - // app does: - // sess.setRemoteDescription($(iq).find('>jingle'), 'offer'); - //console.log(elem.find('>content')); - var sdp = new stropheJingle.SDP(''); - sdp.fromJingle(elem); - console.log(sdp.raw); - } else if (process.argv[2] == '--sdp') { - var sdp = new stropheJingle.SDP(input); - var accept = $iq({to: '%(tojid)s', - type: 'set'}) - .c('jingle', {xmlns: 'urn:xmpp:jingle:1', - //action: 'session-accept', - action: '%(action)s', - initiator: '%(initiator)s', - responder: '%(responder)s', - sid: '%(sid)s' }); - sdp.toJingle(accept, 'responder'); - console.log(Strophe.serialize(accept)); - } -}); - diff --git a/contrib/scripts/kick_users.py b/contrib/scripts/kick_users.py deleted file mode 100755 index f8e0c732fb..0000000000 --- a/contrib/scripts/kick_users.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python - -import json -import sys -import urllib -from argparse import ArgumentParser - -import requests - - -def _mkurl(template, kws): - for key in kws: - template = template.replace(key, kws[key]) - return template - - -def main(hs, room_id, access_token, user_id_prefix, why): - if not why: - why = "Automated kick." - print( - "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix) - ) - room_state_url = _mkurl( - "$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN", - {"$HS": hs, "$ROOM": room_id, "$TOKEN": access_token}, - ) - print("Getting room state => %s" % room_state_url) - res = requests.get(room_state_url) - print("HTTP %s" % res.status_code) - state_events = res.json() - if "error" in state_events: - print("FATAL") - print(state_events) - return - - kick_list = [] - room_name = room_id - for event in state_events: - if not event["type"] == "m.room.member": - if event["type"] == "m.room.name": - room_name = event["content"].get("name") - continue - if not event["content"].get("membership") == "join": - continue - if event["state_key"].startswith(user_id_prefix): - kick_list.append(event["state_key"]) - - if len(kick_list) == 0: - print("No user IDs match the prefix '%s'" % user_id_prefix) - return - - print("The following user IDs will be kicked from %s" % room_name) - for uid in kick_list: - print(uid) - doit = input("Continue? [Y]es\n") - if len(doit) > 0 and doit.lower() == "y": - print("Kicking members...") - # encode them all - kick_list = [urllib.quote(uid) for uid in kick_list] - for uid in kick_list: - kick_url = _mkurl( - "$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN", - {"$HS": hs, "$UID": uid, "$ROOM": room_id, "$TOKEN": access_token}, - ) - kick_body = {"membership": "leave", "reason": why} - print("Kicking %s" % uid) - res = requests.put(kick_url, data=json.dumps(kick_body)) - if res.status_code != 200: - print("ERROR: HTTP %s" % res.status_code) - if res.json().get("error"): - print("ERROR: JSON %s" % res.json()) - - -if __name__ == "__main__": - parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.") - parser.add_argument("-u", "--user-id", help="The user ID prefix e.g. '@irc_'") - parser.add_argument("-t", "--token", help="Your access_token") - parser.add_argument("-r", "--room", help="The room ID to kick members in") - parser.add_argument( - "-s", "--homeserver", help="The base HS url e.g. http://matrix.org" - ) - parser.add_argument("-w", "--why", help="Reason for the kick. Optional.") - args = parser.parse_args() - if not args.room or not args.token or not args.user_id or not args.homeserver: - parser.print_help() - sys.exit(1) - else: - main(args.homeserver, args.room, args.token, args.user_id, args.why) diff --git a/debian/changelog b/debian/changelog index dda342a630..8f08972743 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,28 @@ +matrix-synapse-py3 (1.61.0~rc1+nmu1) UNRELEASED; urgency=medium + + * Non-maintainer upload. + * Remove unused `jitsimeetbridge` experiment from `contrib` directory. + + -- Synapse Packaging team Sun, 29 May 2022 14:44:45 +0100 + +matrix-synapse-py3 (1.60.0) stable; urgency=medium + + * New Synapse release 1.60.0. + + -- Synapse Packaging team Tue, 31 May 2022 13:41:22 +0100 + +matrix-synapse-py3 (1.60.0~rc2) stable; urgency=medium + + * New Synapse release 1.60.0rc2. + + -- Synapse Packaging team Fri, 27 May 2022 11:04:55 +0100 + +matrix-synapse-py3 (1.60.0~rc1) stable; urgency=medium + + * New Synapse release 1.60.0rc1. + + -- Synapse Packaging team Tue, 24 May 2022 12:05:01 +0100 + matrix-synapse-py3 (1.59.1) stable; urgency=medium * New Synapse release 1.59.1. diff --git a/debian/copyright b/debian/copyright index 95c21ea12a..902b18fa41 100644 --- a/debian/copyright +++ b/debian/copyright @@ -22,29 +22,6 @@ Files: synapse/config/repository.py Copyright: 2014-2015, matrix.org License: Apache-2.0 -Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js -Copyright: Public Domain (Tyler Akins http://rumkin.com) -License: public-domain - This code was written by Tyler Akins and has been placed in the - public domain. It would be nice if you left this header intact. - Base64 code from Tyler Akins -- http://rumkin.com - -Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js -Copyright: 1999-2002, Paul Johnston & Contributors -License: BSD-3-clause - -Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js -Copyright: 2006-2008, OGG, LLC -License: Expat - -Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js -Copyright: 2010 passive.ly LLC -License: Expat - -Files: contrib/jitsimeetbridge/unjingle/*.js -Copyright: 2014 Jitsi -License: Apache-2.0 - Files: debian/* Copyright: 2016-2017, Erik Johnston 2017, Rahul De diff --git a/demo/start.sh b/demo/start.sh index 96b3a2ceab..fdd75816fb 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -6,11 +6,12 @@ CWD=$(pwd) cd "$DIR/.." || exit -PYTHONPATH=$(readlink -f "$(pwd)") -export PYTHONPATH - - -echo "$PYTHONPATH" +# Do not override PYTHONPATH if we are in a virtual env +if [ "$VIRTUAL_ENV" = "" ]; then + PYTHONPATH=$(readlink -f "$(pwd)") + export PYTHONPATH + echo "$PYTHONPATH" +fi # Create servers which listen on HTTP at 808x and HTTPS at 848x. for port in 8080 8081 8082; do diff --git a/docker/Dockerfile b/docker/Dockerfile index ccc6a9f778..7af0e51f97 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -55,7 +55,7 @@ RUN \ # NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also # pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export). RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --user git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5 + pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5" WORKDIR /synapse diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index b6ad141173..f7dac90222 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -158,6 +158,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$", "^/_matrix/client/(api/v1|r0|v3|unstable)/join/", "^/_matrix/client/(api/v1|r0|v3|unstable)/profile/", + "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index f55a1fbb90..2b3714df66 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -422,8 +422,8 @@ same lightweight approach that the Linux Kernel [submitting patches process]( https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>), [Docker](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other -projects use: the DCO (Developer Certificate of Origin: -http://developercertificate.org/). This is a simple declaration that you wrote +projects use: the DCO ([Developer Certificate of Origin](http://developercertificate.org/)). +This is a simple declaration that you wrote the contribution or otherwise have the right to contribute it to Matrix: ``` diff --git a/docs/message_retention_policies.md b/docs/message_retention_policies.md index 9214d6d7e9..b52c4aaa24 100644 --- a/docs/message_retention_policies.md +++ b/docs/message_retention_policies.md @@ -117,7 +117,7 @@ In this example, we define three jobs: Note that this example is tailored to show different configurations and features slightly more jobs than it's probably necessary (in practice, a server admin would probably consider it better to replace the two last -jobs with one that runs once a day and handles rooms which which +jobs with one that runs once a day and handles rooms which policy's `max_lifetime` is greater than 3 days). Keep in mind, when configuring these jobs, that a purge job can become diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 71f6f9f0ab..ad35e667ed 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -11,29 +11,28 @@ The available spam checker callbacks are: ### `check_event_for_spam` _First introduced in Synapse v1.37.0_ -_Signature extended to support Allow and Code in Synapse v1.60.0_ -_Boolean and string return value types deprecated in Synapse v1.60.0_ + +_Changed in Synapse v1.60.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean or a string is now deprecated._ ```python -async def check_event_for_spam(event: "synapse.module_api.EventBase") -> Union["synapse.module_api.ALLOW", "synapse.module_api.error.Codes", str, bool] +async def check_event_for_spam(event: "synapse.module_api.EventBase") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", str, bool] ``` -Called when receiving an event from a client or via federation. The callback must return either: - - `synapse.module_api.ALLOW`, to allow the operation. Other callbacks - may still decide to reject it. - - `synapse.api.Codes` to reject the operation with an error code. In case - of doubt, `synapse.api.error.Codes.FORBIDDEN` is a good error code. - - (deprecated) a `str` to reject the operation and specify an error message. Note that clients +Called when receiving an event from a client or via federation. The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + - (deprecated) a non-`Codes` `str` to reject the operation and specify an error message. Note that clients typically will not localize the error message to the user's preferred locale. - - (deprecated) on `False`, behave as `ALLOW`. Deprecated as confusing, as some - callbacks in expect `True` to allow and others `True` to reject. - - (deprecated) on `True`, behave as `synapse.api.error.Codes.FORBIDDEN`. Deprecated as confusing, as - some callbacks in expect `True` to allow and others `True` to reject. + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `synapse.module_api.ALLOW`, Synapse falls through to the next one. The value of the -first callback that does not return `synapse.module_api.ALLOW` will be used. If this happens, Synapse -will not call any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. ### `user_may_join_room` diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ee98d193cb..56a25c534f 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2216,7 +2216,9 @@ sso: password_config: - # Uncomment to disable password login + # Uncomment to disable password login. + # Set to `only_for_reauth` to permit reauthentication for users that + # have passwords and are already logged in. # #enabled: false @@ -2521,16 +2523,6 @@ push: # "events_default": 1 -# Uncomment to allow non-server-admin users to create groups on this server -# -#enable_group_creation: true - -# If enabled, non server admins can only create groups with local parts -# starting with this prefix -# -#group_creation_prefix: "unofficial_" - - # User Directory configuration # diff --git a/docs/structured_logging.md b/docs/structured_logging.md index a6667e1a11..d43dc9eb6e 100644 --- a/docs/structured_logging.md +++ b/docs/structured_logging.md @@ -43,7 +43,7 @@ loggers: The above logging config will set Synapse as 'INFO' logging level by default, with the SQL layer at 'WARNING', and will log to a file, stored as JSON. -It is also possible to figure Synapse to log to a remote endpoint by using the +It is also possible to configure Synapse to log to a remote endpoint by using the `synapse.logging.RemoteHandler` class included with Synapse. It takes the following arguments: diff --git a/docs/upgrade.md b/docs/upgrade.md index e7eadadb64..e3c64da17f 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -177,11 +177,11 @@ has queries that can be used to check a database for this problem in advance. -## SpamChecker API's `check_event_for_spam` has a new signature. +## New signature for the spam checker callback `check_event_for_spam` The previous signature has been deprecated. -Whereas `check_event_for_spam` callbacks used to return `Union[str, bool]`, they should now return `Union["synapse.module_api.Allow", "synapse.module_api.errors.Codes"]`. +Whereas `check_event_for_spam` callbacks used to return `Union[str, bool]`, they should now return `Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]`. This is part of an ongoing refactoring of the SpamChecker API to make it less ambiguous and more powerful. @@ -204,8 +204,8 @@ async def check_event_for_spam(event): # Event is spam, mark it as forbidden (you may use some more precise error # code if it is useful). return synapse.module_api.errors.Codes.FORBIDDEN - # Event is not spam, mark it as `ALLOW`. - return synapse.module_api.ALLOW + # Event is not spam, mark it as such. + return synapse.module_api.NOT_SPAM ``` # Upgrading to v1.59.0 diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 0f5bda32b9..1c75a23a36 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -575,6 +575,18 @@ Example configuration: dummy_events_threshold: 5 ``` --- +Config option `delete_stale_devices_after` + +An optional duration. If set, Synapse will run a daily background task to log out and +delete any device that hasn't been accessed for more than the specified amount of time. + +Defaults to no duration, which means devices are never pruned. + +Example configuration: +```yaml +delete_stale_devices_after: 1y +``` + ## Homeserver blocking ## Useful options for Synapse admins. @@ -1447,7 +1459,7 @@ federation_rr_transactions_per_room_per_second: 40 ``` --- ## Media Store ## -Config options relating to Synapse media store. +Config options related to Synapse's media store. --- Config option: `enable_media_repo` @@ -1551,6 +1563,33 @@ thumbnail_sizes: height: 600 method: scale ``` +--- +Config option: `media_retention` + +Controls whether local media and entries in the remote media cache +(media that is downloaded from other homeservers) should be removed +under certain conditions, typically for the purpose of saving space. + +Purging media files will be the carried out by the media worker +(that is, the worker that has the `enable_media_repo` homeserver config +option set to 'true'). This may be the main process. + +The `media_retention.local_media_lifetime` and +`media_retention.remote_media_lifetime` config options control whether +media will be purged if it has not been accessed in a given amount of +time. Note that media is 'accessed' when loaded in a room in a client, or +otherwise downloaded by a local or remote user. If the media has never +been accessed, the media's creation time is used instead. Both thumbnails +and the original media will be removed. If either of these options are unset, +then media of that type will not be purged. + +Example configuration: +```yaml +media_retention: + local_media_lifetime: 90d + remote_media_lifetime: 14d +``` +--- Config option: `url_preview_enabled` This setting determines whether the preview URL API is enabled. @@ -2930,6 +2969,9 @@ Use this setting to enable password-based logins. This setting has the following sub-options: * `enabled`: Defaults to true. + Set to false to disable password authentication. + Set to `only_for_reauth` to allow users with existing passwords to use them + to log in and reauthenticate, whilst preventing new users from setting passwords. * `localdb_enabled`: Set to false to disable authentication against the local password database. This is ignored if `enabled` is false, and is only useful if you have other `password_providers`. Defaults to true. @@ -3145,25 +3187,6 @@ Example configuration: encryption_enabled_by_default_for_room_type: invite ``` --- -Config option: `enable_group_creation` - -Set to true to allow non-server-admin users to create groups on this server - -Example configuration: -```yaml -enable_group_creation: true -``` ---- -Config option: `group_creation_prefix` - -If enabled/present, non-server admins can only create groups with local parts -starting with this prefix. - -Example configuration: -```yaml -group_creation_prefix: "unofficial_" -``` ---- Config option: `user_directory` This setting defines options related to the user directory. diff --git a/docs/workers.md b/docs/workers.md index 779069b817..78973a498c 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -1,6 +1,6 @@ # Scaling synapse via workers -For small instances it recommended to run Synapse in the default monolith mode. +For small instances it is recommended to run Synapse in the default monolith mode. For larger instances where performance is a concern it can be helpful to split out functionality into multiple separate python processes. These processes are called 'workers', and are (eventually) intended to scale horizontally @@ -193,7 +193,7 @@ information. ^/_matrix/federation/v1/user/devices/ ^/_matrix/federation/v1/get_groups_publicised$ ^/_matrix/key/v2/query - ^/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/ + ^/_matrix/federation/v1/hierarchy/ # Inbound federation transaction request ^/_matrix/federation/v1/send/ @@ -205,9 +205,11 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$ - ^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$ + ^/_matrix/client/v1/rooms/.*/hierarchy$ + ^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$ ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$ ^/_matrix/client/(r0|v3|unstable)/account/3pid$ + ^/_matrix/client/(r0|v3|unstable)/account/whoami$ ^/_matrix/client/(r0|v3|unstable)/devices$ ^/_matrix/client/versions$ ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$ @@ -237,9 +239,6 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/join/ ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ - # Device requests - ^/_matrix/client/(r0|v3|unstable)/sendToDevice/ - # Account data requests ^/_matrix/client/(r0|v3|unstable)/.*/tags ^/_matrix/client/(r0|v3|unstable)/.*/account_data diff --git a/poetry.lock b/poetry.lock index 23ee668aa4..efbdc7d2f9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -824,7 +824,7 @@ python-versions = ">=3.5" [[package]] name = "pyjwt" -version = "2.3.0" +version = "2.4.0" description = "JSON Web Token implementation in Python" category = "main" optional = false @@ -1366,7 +1366,7 @@ python-versions = "*" [[package]] name = "types-jsonschema" -version = "4.4.1" +version = "4.4.6" description = "Typing stubs for jsonschema" category = "dev" optional = false @@ -2318,8 +2318,8 @@ pygments = [ {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, ] pyjwt = [ - {file = "PyJWT-2.3.0-py3-none-any.whl", hash = "sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"}, - {file = "PyJWT-2.3.0.tar.gz", hash = "sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41"}, + {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, + {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, ] pymacaroons = [ {file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"}, @@ -2672,8 +2672,8 @@ types-ipaddress = [ {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, ] types-jsonschema = [ - {file = "types-jsonschema-4.4.1.tar.gz", hash = "sha256:bd68b75217ebbb33b0242db10047581dad3b061a963a46ee80d4a9044080663e"}, - {file = "types_jsonschema-4.4.1-py3-none-any.whl", hash = "sha256:ab3ecfdc912d6091cc82f4b7556cfbf1a7cbabc26da0ceaa1cbbc232d1d09971"}, + {file = "types-jsonschema-4.4.6.tar.gz", hash = "sha256:7f2a804618756768c7c0616f8c794b61fcfe3077c7ee1ad47dcf01c5e5f692bb"}, + {file = "types_jsonschema-4.4.6-py3-none-any.whl", hash = "sha256:1db9031ca49a8444d01bd2ce8cf2f89318382b04610953b108321e6f8fb03390"}, ] types-opentracing = [ {file = "types-opentracing-2.4.7.tar.gz", hash = "sha256:be60e9618355aa892571ace002e6b353702538b1c0dc4fbc1c921219d6658830"}, diff --git a/pyproject.toml b/pyproject.toml index fd29cb455f..f483db0fce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.59.1" +version = "1.60.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index ca476d9a5e..3c472c576e 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -45,7 +45,7 @@ docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . extra_test_args=() -test_tags="synapse_blacklist,msc2716,msc3030" +test_tags="synapse_blacklist,msc2716,msc3030,msc3787" # If we're using workers, modify the docker files slightly. if [[ -n "$WORKERS" ]]; then diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 12ff79f6e2..d7dfa92bd1 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -62,7 +62,7 @@ from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackground from synapse.storage.databases.main.events_bg_updates import ( EventsBackgroundUpdatesStore, ) -from synapse.storage.databases.main.group_server import GroupServerWorkerStore +from synapse.storage.databases.main.group_server import GroupServerStore from synapse.storage.databases.main.media_repository import ( MediaRepositoryBackgroundUpdateStore, ) @@ -211,7 +211,7 @@ class Store( PushRuleStore, PusherWorkerStore, PresenceBackgroundUpdateStore, - GroupServerWorkerStore, + GroupServerStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 330de21f6b..f03fdd6dae 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -31,11 +31,6 @@ MAX_ALIAS_LENGTH = 255 # the maximum length for a user id is 255 characters MAX_USERID_LENGTH = 255 -# The maximum length for a group id is 255 characters -MAX_GROUPID_LENGTH = 255 -MAX_GROUP_CATEGORYID_LENGTH = 255 -MAX_GROUP_ROLEID_LENGTH = 255 - class Membership: @@ -142,7 +137,13 @@ class DeviceKeyAlgorithms: class EduTypes: - Presence: Final = "m.presence" + PRESENCE: Final = "m.presence" + TYPING: Final = "m.typing" + RECEIPT: Final = "m.receipt" + DEVICE_LIST_UPDATE: Final = "m.device_list_update" + SIGNING_KEY_UPDATE: Final = "m.signing_key_update" + UNSTABLE_SIGNING_KEY_UPDATE: Final = "org.matrix.signing_key_update" + DIRECT_TO_DEVICE: Final = "m.direct_to_device" class RejectedReason: diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 6650e826d5..cc7b785472 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -79,6 +79,13 @@ class Codes(str, Enum): WEAK_PASSWORD = "M_WEAK_PASSWORD" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" + + # The account has been suspended on the server. + # By opposition to `USER_DEACTIVATED`, this is a reversible measure + # that can possibly be appealed and reverted. + # Part of MSC3823. + USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED" + BAD_ALIAS = "M_BAD_ALIAS" # For restricted join rules. UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN" @@ -139,7 +146,13 @@ class SynapseError(CodeMessageException): errcode: Matrix error code e.g 'M_FORBIDDEN' """ - def __init__(self, code: int, msg: str, errcode: str = Codes.UNKNOWN): + def __init__( + self, + code: int, + msg: str, + errcode: str = Codes.UNKNOWN, + additional_fields: Optional[Dict] = None, + ): """Constructs a synapse error. Args: @@ -149,9 +162,13 @@ class SynapseError(CodeMessageException): """ super().__init__(code, msg) self.errcode = errcode + if additional_fields is None: + self._additional_fields: Dict = {} + else: + self._additional_fields = dict(additional_fields) def error_dict(self) -> "JsonDict": - return cs_error(self.msg, self.errcode) + return cs_error(self.msg, self.errcode, **self._additional_fields) class InvalidAPICallError(SynapseError): @@ -176,14 +193,7 @@ class ProxiedRequestError(SynapseError): errcode: str = Codes.UNKNOWN, additional_fields: Optional[Dict] = None, ): - super().__init__(code, msg, errcode) - if additional_fields is None: - self._additional_fields: Dict = {} - else: - self._additional_fields = dict(additional_fields) - - def error_dict(self) -> "JsonDict": - return cs_error(self.msg, self.errcode, **self._additional_fields) + super().__init__(code, msg, errcode, additional_fields) class ConsentNotGivenError(SynapseError): diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index b91ce06de7..b007147519 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -33,7 +33,7 @@ from typing import ( import jsonschema from jsonschema import FormatChecker -from synapse.api.constants import EventContentFields +from synapse.api.constants import EduTypes, EventContentFields from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState from synapse.events import EventBase @@ -347,7 +347,7 @@ class Filter: user_id = event.user_id field_matchers = { "senders": lambda v: user_id == v, - "types": lambda v: "m.presence" == v, + "types": lambda v: EduTypes.PRESENCE == v, } return self._check_fields(field_matchers) else: diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 2a4c2e59cd..6fedf681f8 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -37,7 +37,6 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore -from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore @@ -55,7 +54,6 @@ class AdminCmdSlavedStore( SlavedApplicationServiceStore, SlavedRegistrationStore, SlavedFilteringStore, - SlavedGroupServerStore, SlavedDeviceInboxStore, SlavedDeviceStore, SlavedPushRuleStore, diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 2a9480a5c1..89f8998f0e 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -58,7 +58,6 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore -from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore @@ -69,7 +68,6 @@ from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client import ( account_data, events, - groups, initial_sync, login, presence, @@ -78,6 +76,7 @@ from synapse.rest.client import ( read_marker, receipts, room, + room_batch, room_keys, sendtodevice, sync, @@ -87,7 +86,7 @@ from synapse.rest.client import ( voip, ) from synapse.rest.client._base import client_patterns -from synapse.rest.client.account import ThreepidRestServlet +from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet from synapse.rest.client.devices import DevicesRestServlet from synapse.rest.client.keys import ( KeyChangesServlet, @@ -233,7 +232,6 @@ class GenericWorkerSlavedStore( SlavedDeviceStore, SlavedReceiptsStore, SlavedPushRuleStore, - SlavedGroupServerStore, SlavedAccountDataStore, SlavedPusherStore, CensorEventsStore, @@ -289,6 +287,7 @@ class GenericWorkerServer(HomeServer): RegistrationTokenValidityRestServlet(self).register(resource) login.register_servlets(self, resource) ThreepidRestServlet(self).register(resource) + WhoamiRestServlet(self).register(resource) DevicesRestServlet(self).register(resource) # Read-only @@ -308,6 +307,7 @@ class GenericWorkerServer(HomeServer): room.register_servlets(self, resource, is_worker=True) room.register_deprecated_servlets(self, resource) initial_sync.register_servlets(self, resource) + room_batch.register_servlets(self, resource) room_keys.register_servlets(self, resource) tags.register_servlets(self, resource) account_data.register_servlets(self, resource) @@ -320,9 +320,6 @@ class GenericWorkerServer(HomeServer): presence.register_servlets(self, resource) - if self.config.experimental.groups_enabled: - groups.register_servlets(self, resource) - resources.update({CLIENT_API_PREFIX: resource}) resources.update(build_synapse_client_resource_tree(self)) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index a610fb785d..ed92c2e910 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -23,13 +23,7 @@ from netaddr import IPSet from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.types import ( - DeviceListUpdates, - GroupID, - JsonDict, - UserID, - get_domain_from_id, -) +from synapse.types import DeviceListUpdates, JsonDict, UserID from synapse.util.caches.descriptors import _CacheContext, cached if TYPE_CHECKING: @@ -55,7 +49,6 @@ class ApplicationServiceState(Enum): @attr.s(slots=True, frozen=True, auto_attribs=True) class Namespace: exclusive: bool - group_id: Optional[str] regex: Pattern[str] @@ -141,30 +134,13 @@ class ApplicationService: exclusive = regex_obj.get("exclusive") if not isinstance(exclusive, bool): raise ValueError("Expected bool for 'exclusive' in ns '%s'" % ns) - group_id = regex_obj.get("group_id") - if group_id: - if not isinstance(group_id, str): - raise ValueError( - "Expected string for 'group_id' in ns '%s'" % ns - ) - try: - GroupID.from_string(group_id) - except Exception: - raise ValueError( - "Expected valid group ID for 'group_id' in ns '%s'" % ns - ) - - if get_domain_from_id(group_id) != self.server_name: - raise ValueError( - "Expected 'group_id' to be this host in ns '%s'" % ns - ) regex = regex_obj.get("regex") if not isinstance(regex, str): raise ValueError("Expected string for 'regex' in ns '%s'" % ns) # Pre-compile regex. - result[ns].append(Namespace(exclusive, group_id, re.compile(regex))) + result[ns].append(Namespace(exclusive, re.compile(regex))) return result @@ -369,21 +345,6 @@ class ApplicationService: if namespace.exclusive ] - def get_groups_for_user(self, user_id: str) -> Iterable[str]: - """Get the groups that this user is associated with by this AS - - Args: - user_id: The ID of the user. - - Returns: - An iterable that yields group_id strings. - """ - return ( - namespace.group_id - for namespace in self.namespaces[ApplicationService.NS_USERS] - if namespace.group_id and namespace.regex.match(user_id) - ) - def is_rate_limited(self) -> bool: return self.rate_limited diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index d19f8dd996..df1c214462 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -14,7 +14,7 @@ # limitations under the License. import logging import urllib.parse -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple from prometheus_client import Counter from typing_extensions import TypeGuard @@ -155,6 +155,9 @@ class ApplicationServiceApi(SimpleHttpClient): if service.url is None: return [] + # This is required by the configuration. + assert service.hs_token is not None + uri = "%s%s/thirdparty/%s/%s" % ( service.url, APP_SERVICE_PREFIX, @@ -162,7 +165,11 @@ class ApplicationServiceApi(SimpleHttpClient): urllib.parse.quote(protocol), ) try: - response = await self.get_json(uri, fields) + args: Mapping[Any, Any] = { + **fields, + b"access_token": service.hs_token, + } + response = await self.get_json(uri, args=args) if not isinstance(response, list): logger.warning( "query_3pe to %s returned an invalid response %r", uri, response @@ -190,13 +197,15 @@ class ApplicationServiceApi(SimpleHttpClient): return {} async def _get() -> Optional[JsonDict]: + # This is required by the configuration. + assert service.hs_token is not None uri = "%s%s/thirdparty/protocol/%s" % ( service.url, APP_SERVICE_PREFIX, urllib.parse.quote(protocol), ) try: - info = await self.get_json(uri) + info = await self.get_json(uri, {"access_token": service.hs_token}) if not _is_valid_3pe_metadata(info): logger.warning( diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 3b49e60716..de5e5216c2 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -384,6 +384,11 @@ class _TransactionController: device_list_summary: The device list summary to include in the transaction. """ try: + service_is_up = await self._is_service_up(service) + # Don't create empty txns when in recovery mode (ephemeral events are dropped) + if not service_is_up and not events: + return + txn = await self.store.create_appservice_txn( service=service, events=events, @@ -393,7 +398,6 @@ class _TransactionController: unused_fallback_keys=unused_fallback_keys or {}, device_list_summary=device_list_summary or DeviceListUpdates(), ) - service_is_up = await self._is_service_up(service) if service_is_up: sent = await txn.send(self.as_api) if sent: diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 71d6655fda..01ea2b4dab 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -32,7 +32,6 @@ from synapse.config import ( emailconfig, experimental, federation, - groups, jwt, key, logger, @@ -107,7 +106,6 @@ class RootConfig: push: push.PushConfig spamchecker: spam_checker.SpamCheckerConfig room: room.RoomConfig - groups: groups.GroupsConfig userdirectory: user_directory.UserDirectoryConfig consent: consent.ConsentConfig stats: stats.StatsConfig diff --git a/synapse/config/auth.py b/synapse/config/auth.py index bb417a2359..265a554a5d 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -29,7 +29,18 @@ class AuthConfig(Config): if password_config is None: password_config = {} - self.password_enabled = password_config.get("enabled", True) + passwords_enabled = password_config.get("enabled", True) + # 'only_for_reauth' allows users who have previously set a password to use it, + # even though passwords would otherwise be disabled. + passwords_for_reauth_only = passwords_enabled == "only_for_reauth" + + self.password_enabled_for_login = ( + passwords_enabled and not passwords_for_reauth_only + ) + self.password_enabled_for_reauth = ( + passwords_for_reauth_only or passwords_enabled + ) + self.password_localdb_enabled = password_config.get("localdb_enabled", True) self.password_pepper = password_config.get("pepper", "") @@ -46,7 +57,9 @@ class AuthConfig(Config): def generate_config_section(self, **kwargs: Any) -> str: return """\ password_config: - # Uncomment to disable password login + # Uncomment to disable password login. + # Set to `only_for_reauth` to permit reauthentication for users that + # have passwords and are already logged in. # #enabled: false diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index b20d949689..f2dfd49b07 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -73,9 +73,6 @@ class ExperimentalConfig(Config): # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) - # The deprecated groups feature. - self.groups_enabled: bool = experimental.get("groups_enabled", False) - # MSC2654: Unread counts self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False) @@ -84,3 +81,6 @@ class ExperimentalConfig(Config): # MSC3786 (Add a default push rule to ignore m.room.server_acl events) self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False) + + # MSC3772: A push rule for mutual relations. + self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False) diff --git a/synapse/config/groups.py b/synapse/config/groups.py deleted file mode 100644 index c9b9c6daad..0000000000 --- a/synapse/config/groups.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2017 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any - -from synapse.types import JsonDict - -from ._base import Config - - -class GroupsConfig(Config): - section = "groups" - - def read_config(self, config: JsonDict, **kwargs: Any) -> None: - self.enable_group_creation = config.get("enable_group_creation", False) - self.group_creation_prefix = config.get("group_creation_prefix", "") - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Uncomment to allow non-server-admin users to create groups on this server - # - #enable_group_creation: true - - # If enabled, non server admins can only create groups with local parts - # starting with this prefix - # - #group_creation_prefix: "unofficial_" - """ diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index a4ec706908..4d2b298a70 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -25,7 +25,6 @@ from .database import DatabaseConfig from .emailconfig import EmailConfig from .experimental import ExperimentalConfig from .federation import FederationConfig -from .groups import GroupsConfig from .jwt import JWTConfig from .key import KeyConfig from .logger import LoggingConfig @@ -89,7 +88,6 @@ class HomeServerConfig(RootConfig): PushConfig, SpamCheckerConfig, RoomConfig, - GroupsConfig, UserDirectoryConfig, ConsentConfig, StatsConfig, diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 98d8a16621..f9c55143c3 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -223,6 +223,22 @@ class ContentRepositoryConfig(Config): "url_preview_accept_language" ) or ["en"] + media_retention = config.get("media_retention") or {} + + self.media_retention_local_media_lifetime_ms = None + local_media_lifetime = media_retention.get("local_media_lifetime") + if local_media_lifetime is not None: + self.media_retention_local_media_lifetime_ms = self.parse_duration( + local_media_lifetime + ) + + self.media_retention_remote_media_lifetime_ms = None + remote_media_lifetime = media_retention.get("remote_media_lifetime") + if remote_media_lifetime is not None: + self.media_retention_remote_media_lifetime_ms = self.parse_duration( + remote_media_lifetime + ) + def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None media_store = os.path.join(data_dir_path, "media_store") diff --git a/synapse/config/server.py b/synapse/config/server.py index f73d5e1f66..657322cb1f 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -679,6 +679,17 @@ class ServerConfig(Config): config.get("exclude_rooms_from_sync") or [] ) + delete_stale_devices_after: Optional[str] = ( + config.get("delete_stale_devices_after") or None + ) + + if delete_stale_devices_after is not None: + self.delete_stale_devices_after: Optional[int] = self.parse_duration( + delete_stale_devices_after + ) + else: + self.delete_stale_devices_after = None + def has_tls_listener(self) -> bool: return any(listener.tls for listener in self.listeners) diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 3472a9a01b..ae68a3dd1a 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Set +from typing import Any, List, Set from synapse.types import JsonDict from synapse.util.check_dependencies import DependencyException, check_requirements @@ -49,7 +49,9 @@ class TracerConfig(Config): # The tracer is enabled so sanitize the config - self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", []) + self.opentracer_whitelist: List[str] = opentracing_config.get( + "homeserver_whitelist", [] + ) if not isinstance(self.opentracer_whitelist, list): raise ConfigError("Tracer homeserver_whitelist config is malformed") diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 7a91544119..b700cbbfa1 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -22,7 +22,7 @@ from synapse.events import EventBase from synapse.types import JsonDict, StateMap if TYPE_CHECKING: - from synapse.storage import Storage + from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore from synapse.storage.state import StateFilter @@ -84,7 +84,7 @@ class EventContext: incomplete state. """ - _storage: "Storage" + _storage: "StorageControllers" rejected: Union[Literal[False], str] = False _state_group: Optional[int] = None state_group_before_event: Optional[int] = None @@ -97,7 +97,7 @@ class EventContext: @staticmethod def with_state( - storage: "Storage", + storage: "StorageControllers", state_group: Optional[int], state_group_before_event: Optional[int], state_delta_due_to_event: Optional[StateMap[str]], @@ -117,7 +117,7 @@ class EventContext: @staticmethod def for_outlier( - storage: "Storage", + storage: "StorageControllers", ) -> "EventContext": """Return an EventContext instance suitable for persisting an outlier event""" return EventContext(storage=storage) @@ -147,7 +147,7 @@ class EventContext: } @staticmethod - def deserialize(storage: "Storage", input: JsonDict) -> "EventContext": + def deserialize(storage: "StorageControllers", input: JsonDict) -> "EventContext": """Converts a dict that was produced by `serialize` back into a EventContext. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 7984874e21..d2e06c754e 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -21,6 +21,7 @@ from typing import ( Awaitable, Callable, Collection, + Dict, List, Optional, Tuple, @@ -30,7 +31,7 @@ from typing import ( from synapse.api.errors import Codes from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper -from synapse.spam_checker_api import Allow, Decision, RegistrationBehaviour +from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import RoomAlias, UserProfile from synapse.util.async_helpers import delay_cancellation, maybe_awaitable from synapse.util.metrics import Measure @@ -41,17 +42,19 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) - CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[ ["synapse.events.EventBase"], Awaitable[ Union[ - Allow, + str, Codes, + # Highly experimental, not officially part of the spamchecker API, may + # disappear without warning depending on the results of ongoing + # experiments. + # Use this to return additional information as part of an error. + Tuple[Codes, Dict], # Deprecated bool, - # Deprecated - str, ] ], ] @@ -178,6 +181,8 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: class SpamChecker: + NOT_SPAM = "NOT_SPAM" + def __init__(self, hs: "synapse.server.HomeServer") -> None: self.hs = hs self.clock = hs.get_clock() @@ -270,7 +275,7 @@ class SpamChecker: async def check_event_for_spam( self, event: "synapse.events.EventBase" - ) -> Union[Decision, str]: + ) -> Union[Tuple[Codes, Dict], str]: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -281,22 +286,20 @@ class SpamChecker: event: the event to be checked Returns: - - on `ALLOW`, the event is considered good (non-spammy) and should - be let through. Other spamcheck filters may still reject it. - - on `Code`, the event is considered spammy and is rejected with a specific + - `NOT_SPAM` if the event is considered good (non-spammy) and should be let + through. Other spamcheck filters may still reject it. + - A `Code` if the event is considered spammy and is rejected with a specific error message/code. - - on `str`, the event is considered spammy and the string is used as error - message. This usage is generally discouraged as it doesn't support - internationalization. + - A string that isn't `NOT_SPAM` if the event is considered spammy and the + string should be used as the client-facing error message. This usage is + generally discouraged as it doesn't support internationalization. """ for callback in self._check_event_for_spam_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - res: Union[Decision, str, bool] = await delay_cancellation( - callback(event) - ) - if res is False or res is Allow.ALLOW: + res = await delay_cancellation(callback(event)) + if res is False or res == self.NOT_SPAM: # This spam-checker accepts the event. # Other spam-checkers may reject it, though. continue @@ -304,13 +307,23 @@ class SpamChecker: # This spam-checker rejects the event with deprecated # return value `True` return Codes.FORBIDDEN + elif not isinstance(res, str): + # mypy complains that we can't reach this code because of the + # return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know + # for sure that the module actually returns it. + logger.warning( + "Module returned invalid value, rejecting message as spam" + ) + res = "This message has been rejected as probable spam" else: - # This spam-checker rejects the event either with a `str` - # or with a `Codes`. In either case, we stop here. - return res + # The module rejected the event either with a `Codes` + # or some other `str`. In either case, we stop here. + pass + + return res # No spam-checker has rejected the event, let it pass. - return Allow.ALLOW + return self.NOT_SPAM async def should_drop_federated_event( self, event: "synapse.events.EventBase" diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 360d24274a..29fa9b3880 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections.abc -from typing import Iterable, Type, Union +from typing import Iterable, Type, Union, cast import jsonschema @@ -103,7 +103,12 @@ class EventValidator: except jsonschema.ValidationError as e: if e.path: # example: "users_default": '0' is not of type 'integer' - message = '"' + e.path[-1] + '": ' + e.message # noqa: B306 + # cast safety: path entries can be integers, if we fail to validate + # items in an array. However the POWER_LEVELS_SCHEMA doesn't expect + # to see any arrays. + message = ( + '"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306 + ) # jsonschema.ValidationError.message is a valid attribute else: # example: '0' is not of type 'integer' diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 1e866b19d8..a6232e048b 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -15,7 +15,6 @@ import logging from typing import TYPE_CHECKING -import synapse from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import EventFormatVersions, RoomVersion @@ -33,6 +32,18 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +class InvalidEventSignatureError(RuntimeError): + """Raised when the signature on an event is invalid. + + The stringification of this exception is just the error message without reference + to the event id. The event id is available as a property. + """ + + def __init__(self, message: str, event_id: str): + super().__init__(message) + self.event_id = event_id + + class FederationBase: def __init__(self, hs: "HomeServer"): self.hs = hs @@ -60,20 +71,13 @@ class FederationBase: Returns: * the original event if the checks pass * a redacted version of the event (if the signature - matched but the hash did not) + matched but the hash did not). In this case a warning will be logged. Raises: - SynapseError if the signature check failed. + InvalidEventSignatureError if the signature check failed. Nothing + will be logged in this case. """ - try: - await _check_sigs_on_pdu(self.keyring, room_version, pdu) - except SynapseError as e: - logger.warning( - "Signature check failed for %s: %s", - pdu.event_id, - e, - ) - raise + await _check_sigs_on_pdu(self.keyring, room_version, pdu) if not check_event_content_hash(pdu): # let's try to distinguish between failures because the event was @@ -88,7 +92,7 @@ class FederationBase: if set(redacted_event.keys()) == set(pdu.keys()) and set( redacted_event.content.keys() ) == set(pdu.content.keys()): - logger.info( + logger.debug( "Event %s seems to have been redacted; using our redacted copy", pdu.event_id, ) @@ -101,7 +105,7 @@ class FederationBase: spam_check = await self.spam_checker.check_event_for_spam(pdu) - if spam_check is not synapse.spam_checker_api.Allow.ALLOW: + if spam_check != self.spam_checker.NOT_SPAM: logger.warning("Event contains spam, soft-failing %s", pdu.event_id) # we redact (to save disk space) as well as soft-failing (to stop # using the event in prev_events). @@ -117,12 +121,13 @@ async def _check_sigs_on_pdu( ) -> None: """Check that the given events are correctly signed - Raise a SynapseError if the event wasn't correctly signed. - Args: keyring: keyring object to do the checks room_version: the room version of the PDUs pdus: the events to be checked + + Raises: + InvalidEventSignatureError if the event wasn't correctly signed. """ # we want to check that the event is signed by: @@ -148,44 +153,38 @@ async def _check_sigs_on_pdu( # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) + sender_domain = get_domain_from_id(pdu.sender) if not _is_invite_via_3pid(pdu): try: await keyring.verify_event_for_server( - get_domain_from_id(pdu.sender), + sender_domain, pdu, pdu.origin_server_ts if room_version.enforce_key_validity else 0, ) except Exception as e: - errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( + raise InvalidEventSignatureError( + f"unable to verify signature for sender domain {sender_domain}: {e}", pdu.event_id, - get_domain_from_id(pdu.sender), - e, - ) - raise SynapseError(403, errmsg, Codes.FORBIDDEN) + ) from None # now let's look for events where the sender's domain is different to the # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). - if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id( - pdu.event_id - ) != get_domain_from_id(pdu.sender): - try: - await keyring.verify_event_for_server( - get_domain_from_id(pdu.event_id), - pdu, - pdu.origin_server_ts if room_version.enforce_key_validity else 0, - ) - except Exception as e: - errmsg = ( - "event id %s: unable to verify signature for event id domain %s: %s" - % ( - pdu.event_id, - get_domain_from_id(pdu.event_id), - e, + if room_version.event_format == EventFormatVersions.V1: + event_domain = get_domain_from_id(pdu.event_id) + if event_domain != sender_domain: + try: + await keyring.verify_event_for_server( + event_domain, + pdu, + pdu.origin_server_ts if room_version.enforce_key_validity else 0, ) - ) - raise SynapseError(403, errmsg, Codes.FORBIDDEN) + except Exception as e: + raise InvalidEventSignatureError( + f"unable to verify signature for event domain {event_domain}: {e}", + pdu.event_id, + ) from None # If this is a join event for a restricted room it may have been authorised # via a different server from the sending server. Check those signatures. @@ -205,15 +204,10 @@ async def _check_sigs_on_pdu( pdu.origin_server_ts if room_version.enforce_key_validity else 0, ) except Exception as e: - errmsg = ( - "event id %s: unable to verify signature for authorising server %s: %s" - % ( - pdu.event_id, - authorising_server, - e, - ) - ) - raise SynapseError(403, errmsg, Codes.FORBIDDEN) + raise InvalidEventSignatureError( + f"unable to verify signature for authorising serve {authorising_server}: {e}", + pdu.event_id, + ) from None def _is_invite_via_3pid(event: EventBase) -> bool: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 17eff60909..ad475a913b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -54,7 +54,11 @@ from synapse.api.room_versions import ( RoomVersions, ) from synapse.events import EventBase, builder -from synapse.federation.federation_base import FederationBase, event_from_pdu_json +from synapse.federation.federation_base import ( + FederationBase, + InvalidEventSignatureError, + event_from_pdu_json, +) from synapse.federation.transport.client import SendJoinResponse from synapse.http.types import QueryParams from synapse.types import JsonDict, UserID, get_domain_from_id @@ -319,7 +323,13 @@ class FederationClient(FederationBase): pdu = pdu_list[0] # Check signatures are correct. - signed_pdu = await self._check_sigs_and_hash(room_version, pdu) + try: + signed_pdu = await self._check_sigs_and_hash(room_version, pdu) + except InvalidEventSignatureError as e: + errmsg = f"event id {pdu.event_id}: {e}" + logger.warning("%s", errmsg) + raise SynapseError(403, errmsg, Codes.FORBIDDEN) + return signed_pdu return None @@ -405,6 +415,9 @@ class FederationClient(FederationBase): Returns: a tuple of (state event_ids, auth event_ids) + + Raises: + InvalidResponseError: if fields in the response have the wrong type. """ result = await self.transport_layer.get_room_state_ids( destination, room_id, event_id=event_id @@ -416,7 +429,7 @@ class FederationClient(FederationBase): if not isinstance(state_event_ids, list) or not isinstance( auth_event_ids, list ): - raise Exception("invalid response from /state_ids") + raise InvalidResponseError("invalid response from /state_ids") return state_event_ids, auth_event_ids @@ -552,20 +565,24 @@ class FederationClient(FederationBase): Returns: The PDU (possibly redacted) if it has valid signatures and hashes. + None if no valid copy could be found. """ - res = None try: - res = await self._check_sigs_and_hash(room_version, pdu) - except SynapseError: - pass - - if not res: - # Check local db. - res = await self.store.get_event( - pdu.event_id, allow_rejected=True, allow_none=True + return await self._check_sigs_and_hash(room_version, pdu) + except InvalidEventSignatureError as e: + logger.warning( + "Signature on retrieved event %s was invalid (%s). " + "Checking local store/orgin server", + pdu.event_id, + e, ) + # Check local db. + res = await self.store.get_event( + pdu.event_id, allow_rejected=True, allow_none=True + ) + pdu_origin = get_domain_from_id(pdu.sender) if not res and pdu_origin != origin: try: @@ -1040,9 +1057,14 @@ class FederationClient(FederationBase): pdu = event_from_pdu_json(pdu_dict, room_version) # Check signatures are correct. - pdu = await self._check_sigs_and_hash(room_version, pdu) + try: + pdu = await self._check_sigs_and_hash(room_version, pdu) + except InvalidEventSignatureError as e: + errmsg = f"event id {pdu.event_id}: {e}" + logger.warning("%s", errmsg) + raise SynapseError(403, errmsg, Codes.FORBIDDEN) - # FIXME: We should handle signature failures more gracefully. + # FIXME: We should handle signature failures more gracefully. return pdu diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index b8232e5257..12591dc8db 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -48,7 +48,11 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.crypto.event_signing import compute_event_signature from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.federation.federation_base import FederationBase, event_from_pdu_json +from synapse.federation.federation_base import ( + FederationBase, + InvalidEventSignatureError, + event_from_pdu_json, +) from synapse.federation.persistence import TransactionActions from synapse.federation.units import Edu, Transaction from synapse.http.servlet import assert_params_in_dict @@ -109,7 +113,6 @@ class FederationServer(FederationBase): super().__init__(hs) self.handler = hs.get_federation_handler() - self.storage = hs.get_storage() self._spam_checker = hs.get_spam_checker() self._federation_event_handler = hs.get_federation_event_handler() self.state = hs.get_state_handler() @@ -632,7 +635,12 @@ class FederationServer(FederationBase): pdu = event_from_pdu_json(content, room_version) origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, pdu.room_id) - pdu = await self._check_sigs_and_hash(room_version, pdu) + try: + pdu = await self._check_sigs_and_hash(room_version, pdu) + except InvalidEventSignatureError as e: + errmsg = f"event id {pdu.event_id}: {e}" + logger.warning("%s", errmsg) + raise SynapseError(403, errmsg, Codes.FORBIDDEN) ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version) time_now = self._clock.time_msec() return {"event": ret_pdu.get_pdu_json(time_now)} @@ -865,7 +873,12 @@ class FederationServer(FederationBase): ) ) - event = await self._check_sigs_and_hash(room_version, event) + try: + event = await self._check_sigs_and_hash(room_version, event) + except InvalidEventSignatureError as e: + errmsg = f"event id {event.event_id}: {e}" + logger.warning("%s", errmsg) + raise SynapseError(403, errmsg, Codes.FORBIDDEN) return await self._federation_event_handler.on_send_membership_event( origin, event @@ -1017,8 +1030,9 @@ class FederationServer(FederationBase): # Check signature. try: pdu = await self._check_sigs_and_hash(room_version, pdu) - except SynapseError as e: - raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id) + except InvalidEventSignatureError as e: + logger.warning("event id %s: %s", pdu.event_id, e) + raise FederationError("ERROR", 403, str(e), affected=pdu.event_id) if await self._spam_checker.should_drop_federated_event(pdu): logger.warning( @@ -1353,7 +1367,7 @@ class FederationHandlerRegistry: self._edu_type_to_instance[edu_type] = instance_names async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: - if not self.config.server.use_presence and edu_type == EduTypes.Presence: + if not self.config.server.use_presence and edu_type == EduTypes.PRESENCE: return # Check if we have a handler on this instance diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index d80f0ac5e8..333ca9a97f 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tupl import attr from prometheus_client import Counter +from synapse.api.constants import EduTypes from synapse.api.errors import ( FederationDeniedError, HttpResponseException, @@ -223,7 +224,7 @@ class PerDestinationQueue: """Marks that the destination has new data to send, without starting a new transaction. - If a transaction loop is already in progress then a new transcation will + If a transaction loop is already in progress then a new transaction will be attempted when the current one finishes. """ @@ -542,7 +543,7 @@ class PerDestinationQueue: edu = Edu( origin=self._server_name, destination=self._destination, - edu_type="m.receipt", + edu_type=EduTypes.RECEIPT, content=self._pending_rrs, ) self._pending_rrs = {} @@ -592,7 +593,7 @@ class PerDestinationQueue: Edu( origin=self._server_name, destination=self._destination, - edu_type="m.direct_to_device", + edu_type=EduTypes.DIRECT_TO_DEVICE, content=content, ) for content in contents @@ -670,7 +671,7 @@ class _TransactionQueueManager: Edu( origin=self.queue._server_name, destination=self.queue._destination, - edu_type="m.presence", + edu_type=EduTypes.PRESENCE, content={ "push": [ format_user_presence_state( diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 0c1cad86ab..75081810fd 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -16,6 +16,7 @@ from typing import TYPE_CHECKING, List from prometheus_client import Gauge +from synapse.api.constants import EduTypes from synapse.api.errors import HttpResponseException from synapse.events import EventBase from synapse.federation.persistence import TransactionActions @@ -126,7 +127,10 @@ class TransactionManager: len(edus), ) if issue_8631_logger.isEnabledFor(logging.DEBUG): - DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"} + DEVICE_UPDATE_EDUS = { + EduTypes.DEVICE_LIST_UPDATE, + EduTypes.SIGNING_KEY_UPDATE, + } device_list_updates = [ edu.content for edu in edus if edu.edu_type in DEVICE_UPDATE_EDUS ] diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 9ce06dfa28..9e84bd677e 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -17,7 +17,6 @@ import logging import urllib from typing import ( Any, - Awaitable, Callable, Collection, Dict, @@ -49,11 +48,6 @@ from synapse.types import JsonDict logger = logging.getLogger(__name__) -# Send join responses can be huge, so we set a separate limit here. The response -# is parsed in a streaming manner, which helps alleviate the issue of memory -# usage a bit. -MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024 - class TransportLayerClient: """Sends federation HTTP requests to other servers""" @@ -349,7 +343,6 @@ class TransportLayerClient: path=path, data=content, parser=SendJoinParser(room_version, v1_api=True), - max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) async def send_join_v2( @@ -372,7 +365,6 @@ class TransportLayerClient: args=query_params, data=content, parser=SendJoinParser(room_version, v1_api=False), - max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) async def send_leave_v1( @@ -688,488 +680,6 @@ class TransportLayerClient: timeout=timeout, ) - async def get_group_profile( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get a group profile""" - path = _create_v1_path("/groups/%s/profile", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def update_group_profile( - self, destination: str, group_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """Update a remote group profile - - Args: - destination - group_id - requester_user_id - content: The new profile of the group - """ - path = _create_v1_path("/groups/%s/profile", group_id) - - return self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def get_group_summary( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get a group summary""" - path = _create_v1_path("/groups/%s/summary", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def get_rooms_in_group( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get all rooms in a group""" - path = _create_v1_path("/groups/%s/rooms", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def add_room_to_group( - self, - destination: str, - group_id: str, - requester_user_id: str, - room_id: str, - content: JsonDict, - ) -> JsonDict: - """Add a room to a group""" - path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def update_room_in_group( - self, - destination: str, - group_id: str, - requester_user_id: str, - room_id: str, - config_key: str, - content: JsonDict, - ) -> JsonDict: - """Update room in group""" - path = _create_v1_path( - "/groups/%s/room/%s/config/%s", group_id, room_id, config_key - ) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def remove_room_from_group( - self, destination: str, group_id: str, requester_user_id: str, room_id: str - ) -> JsonDict: - """Remove a room from a group""" - path = _create_v1_path("/groups/%s/room/%s", group_id, room_id) - - return await self.client.delete_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def get_users_in_group( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get users in a group""" - path = _create_v1_path("/groups/%s/users", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def get_invited_users_in_group( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get users that have been invited to a group""" - path = _create_v1_path("/groups/%s/invited_users", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def accept_group_invite( - self, destination: str, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Accept a group invite""" - path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id) - - return await self.client.post_json( - destination=destination, path=path, data=content, ignore_backoff=True - ) - - def join_group( - self, destination: str, group_id: str, user_id: str, content: JsonDict - ) -> Awaitable[JsonDict]: - """Attempts to join a group""" - path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id) - - return self.client.post_json( - destination=destination, path=path, data=content, ignore_backoff=True - ) - - async def invite_to_group( - self, - destination: str, - group_id: str, - user_id: str, - requester_user_id: str, - content: JsonDict, - ) -> JsonDict: - """Invite a user to a group""" - path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def invite_to_group_notification( - self, destination: str, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Sent by group server to inform a user's server that they have been - invited. - """ - - path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id) - - return await self.client.post_json( - destination=destination, path=path, data=content, ignore_backoff=True - ) - - async def remove_user_from_group( - self, - destination: str, - group_id: str, - requester_user_id: str, - user_id: str, - content: JsonDict, - ) -> JsonDict: - """Remove a user from a group""" - path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def remove_user_from_group_notification( - self, destination: str, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Sent by group server to inform a user's server that they have been - kicked from the group. - """ - - path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id) - - return await self.client.post_json( - destination=destination, path=path, data=content, ignore_backoff=True - ) - - async def renew_group_attestation( - self, destination: str, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Sent by either a group server or a user's server to periodically update - the attestations - """ - - path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id) - - return await self.client.post_json( - destination=destination, path=path, data=content, ignore_backoff=True - ) - - async def update_group_summary_room( - self, - destination: str, - group_id: str, - user_id: str, - room_id: str, - category_id: str, - content: JsonDict, - ) -> JsonDict: - """Update a room entry in a group summary""" - if category_id: - path = _create_v1_path( - "/groups/%s/summary/categories/%s/rooms/%s", - group_id, - category_id, - room_id, - ) - else: - path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": user_id}, - data=content, - ignore_backoff=True, - ) - - async def delete_group_summary_room( - self, - destination: str, - group_id: str, - user_id: str, - room_id: str, - category_id: str, - ) -> JsonDict: - """Delete a room entry in a group summary""" - if category_id: - path = _create_v1_path( - "/groups/%s/summary/categories/%s/rooms/%s", - group_id, - category_id, - room_id, - ) - else: - path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id) - - return await self.client.delete_json( - destination=destination, - path=path, - args={"requester_user_id": user_id}, - ignore_backoff=True, - ) - - async def get_group_categories( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get all categories in a group""" - path = _create_v1_path("/groups/%s/categories", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def get_group_category( - self, destination: str, group_id: str, requester_user_id: str, category_id: str - ) -> JsonDict: - """Get category info in a group""" - path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def update_group_category( - self, - destination: str, - group_id: str, - requester_user_id: str, - category_id: str, - content: JsonDict, - ) -> JsonDict: - """Update a category in a group""" - path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def delete_group_category( - self, destination: str, group_id: str, requester_user_id: str, category_id: str - ) -> JsonDict: - """Delete a category in a group""" - path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id) - - return await self.client.delete_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def get_group_roles( - self, destination: str, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get all roles in a group""" - path = _create_v1_path("/groups/%s/roles", group_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def get_group_role( - self, destination: str, group_id: str, requester_user_id: str, role_id: str - ) -> JsonDict: - """Get a roles info""" - path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) - - return await self.client.get_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def update_group_role( - self, - destination: str, - group_id: str, - requester_user_id: str, - role_id: str, - content: JsonDict, - ) -> JsonDict: - """Update a role in a group""" - path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def delete_group_role( - self, destination: str, group_id: str, requester_user_id: str, role_id: str - ) -> JsonDict: - """Delete a role in a group""" - path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id) - - return await self.client.delete_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def update_group_summary_user( - self, - destination: str, - group_id: str, - requester_user_id: str, - user_id: str, - role_id: str, - content: JsonDict, - ) -> JsonDict: - """Update a users entry in a group""" - if role_id: - path = _create_v1_path( - "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id - ) - else: - path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) - - return await self.client.post_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def set_group_join_policy( - self, destination: str, group_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """Sets the join policy for a group""" - path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id) - - return await self.client.put_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - data=content, - ignore_backoff=True, - ) - - async def delete_group_summary_user( - self, - destination: str, - group_id: str, - requester_user_id: str, - user_id: str, - role_id: str, - ) -> JsonDict: - """Delete a users entry in a group""" - if role_id: - path = _create_v1_path( - "/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id - ) - else: - path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id) - - return await self.client.delete_json( - destination=destination, - path=path, - args={"requester_user_id": requester_user_id}, - ignore_backoff=True, - ) - - async def bulk_get_publicised_groups( - self, destination: str, user_ids: Iterable[str] - ) -> JsonDict: - """Get the groups a list of users are publicising""" - - path = _create_v1_path("/get_groups_publicised") - - content = {"user_ids": user_ids} - - return await self.client.post_json( - destination=destination, path=path, data=content, ignore_backoff=True - ) - async def get_room_complexity(self, destination: str, room_id: str) -> JsonDict: """ Args: @@ -1360,10 +870,15 @@ class SendJoinParser(ByteParser[SendJoinResponse]): CONTENT_TYPE = "application/json" + # /send_join responses can be huge, so we override the size limit here. The response + # is parsed in a streaming manner, which helps alleviate the issue of memory + # usage a bit. + MAX_RESPONSE_SIZE = 500 * 1024 * 1024 + def __init__(self, room_version: RoomVersion, v1_api: bool): self._response = SendJoinResponse([], [], event_dict={}) self._room_version = room_version - self._coros = [] + self._coros: List[Generator[None, bytes, None]] = [] # The V1 API has the shape of `[200, {...}]`, which we handle by # prefixing with `item.*`. @@ -1411,6 +926,9 @@ class SendJoinParser(ByteParser[SendJoinResponse]): return len(data) def finish(self) -> SendJoinResponse: + for c in self._coros: + c.close() + if self._response.event_dict: self._response.event = make_event_from_dict( self._response.event_dict, self._room_version @@ -1427,10 +945,13 @@ class _StateParser(ByteParser[StateRequestResponse]): CONTENT_TYPE = "application/json" + # As with /send_join, /state responses can be huge. + MAX_RESPONSE_SIZE = 500 * 1024 * 1024 + def __init__(self, room_version: RoomVersion): self._response = StateRequestResponse([], []) self._room_version = room_version - self._coros = [ + self._coros: List[Generator[None, bytes, None]] = [ ijson.items_coro( _event_list_parser(room_version, self._response.state), "pdus.item", @@ -1449,4 +970,6 @@ class _StateParser(ByteParser[StateRequestResponse]): return len(data) def finish(self) -> StateRequestResponse: + for c in self._coros: + c.close() return self._response diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 71b2f90eb9..50623cd385 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -27,10 +27,6 @@ from synapse.federation.transport.server.federation import ( FederationAccountStatusServlet, FederationTimestampLookupServlet, ) -from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES -from synapse.federation.transport.server.groups_server import ( - GROUP_SERVER_SERVLET_CLASSES, -) from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import ( parse_boolean_from_args, @@ -199,38 +195,6 @@ class PublicRoomList(BaseFederationServlet): return 200, data -class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): - """A group or user's server renews their attestation""" - - PATH = "/groups/(?P[^/]*)/renew_attestation/(?P[^/]*)" - - def __init__( - self, - hs: "HomeServer", - authenticator: Authenticator, - ratelimiter: FederationRateLimiter, - server_name: str, - ): - super().__init__(hs, authenticator, ratelimiter, server_name) - self.handler = hs.get_groups_attestation_renewer() - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - # We don't need to check auth here as we check the attestation signatures - - new_content = await self.handler.on_renew_attestation( - group_id, user_id, content - ) - - return 200, new_content - - class OpenIdUserInfo(BaseFederationServlet): """ Exchange a bearer token for information about a user. @@ -292,16 +256,9 @@ class OpenIdUserInfo(BaseFederationServlet): SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { "federation": FEDERATION_SERVLET_CLASSES, "room_list": (PublicRoomList,), - "group_server": GROUP_SERVER_SERVLET_CLASSES, - "group_local": GROUP_LOCAL_SERVLET_CLASSES, - "group_attestation": (FederationGroupsRenewAttestaionServlet,), "openid": (OpenIdUserInfo,), } -DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid") - -GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation") - def register_servlets( hs: "HomeServer", @@ -324,10 +281,7 @@ def register_servlets( Defaults to ``DEFAULT_SERVLET_GROUPS``. """ if not servlet_groups: - servlet_groups = DEFAULT_SERVLET_GROUPS - # Only allow the groups servlets if the deprecated groups feature is enabled. - if hs.config.experimental.groups_enabled: - servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS + servlet_groups = SERVLET_GROUPS.keys() for servlet_group in servlet_groups: # Skip unknown servlet groups. diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 6fbc7b5f15..7dfb890661 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -27,6 +27,7 @@ from typing import ( from matrix_common.versionstring import get_distribution_version_string from typing_extensions import Literal +from synapse.api.constants import EduTypes from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersions from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX @@ -108,7 +109,10 @@ class FederationSendServlet(BaseFederationServerServlet): ) if issue_8631_logger.isEnabledFor(logging.DEBUG): - DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"] + DEVICE_UPDATE_EDUS = [ + EduTypes.DEVICE_LIST_UPDATE, + EduTypes.SIGNING_KEY_UPDATE, + ] device_list_updates = [ edu.get("content", {}) for edu in transaction_data.get("edus", []) @@ -650,10 +654,6 @@ class FederationRoomHierarchyServlet(BaseFederationServlet): ) -class FederationRoomHierarchyUnstableServlet(FederationRoomHierarchyServlet): - PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946" - - class RoomComplexityServlet(BaseFederationServlet): """ Indicates to other servers how complex (and therefore likely @@ -752,7 +752,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FederationVersionServlet, RoomComplexityServlet, FederationRoomHierarchyServlet, - FederationRoomHierarchyUnstableServlet, FederationV1SendKnockServlet, FederationMakeKnockServlet, FederationAccountStatusServlet, diff --git a/synapse/federation/transport/server/groups_local.py b/synapse/federation/transport/server/groups_local.py deleted file mode 100644 index 496472e1dc..0000000000 --- a/synapse/federation/transport/server/groups_local.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING, Dict, List, Tuple, Type - -from synapse.api.errors import SynapseError -from synapse.federation.transport.server._base import ( - Authenticator, - BaseFederationServlet, -) -from synapse.handlers.groups_local import GroupsLocalHandler -from synapse.types import JsonDict, get_domain_from_id -from synapse.util.ratelimitutils import FederationRateLimiter - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class BaseGroupsLocalServlet(BaseFederationServlet): - """Abstract base class for federation servlet classes which provides a groups local handler. - - See BaseFederationServlet for more information. - """ - - def __init__( - self, - hs: "HomeServer", - authenticator: Authenticator, - ratelimiter: FederationRateLimiter, - server_name: str, - ): - super().__init__(hs, authenticator, ratelimiter, server_name) - self.handler = hs.get_groups_local_handler() - - -class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet): - """A group server has invited a local user""" - - PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/invite" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - if get_domain_from_id(group_id) != origin: - raise SynapseError(403, "group_id doesn't match origin") - - assert isinstance( - self.handler, GroupsLocalHandler - ), "Workers cannot handle group invites." - - new_content = await self.handler.on_invite(group_id, user_id, content) - - return 200, new_content - - -class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet): - """A group server has removed a local user""" - - PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/remove" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, None]: - if get_domain_from_id(group_id) != origin: - raise SynapseError(403, "user_id doesn't match origin") - - assert isinstance( - self.handler, GroupsLocalHandler - ), "Workers cannot handle group removals." - - await self.handler.user_removed_from_group(group_id, user_id, content) - - return 200, None - - -class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet): - """Get roles in a group""" - - PATH = "/get_groups_publicised" - - async def on_POST( - self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] - ) -> Tuple[int, JsonDict]: - resp = await self.handler.bulk_get_publicised_groups( - content["user_ids"], proxy=False - ) - - return 200, resp - - -GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( - FederationGroupsLocalInviteServlet, - FederationGroupsRemoveLocalUserServlet, - FederationGroupsBulkPublicisedServlet, -) diff --git a/synapse/federation/transport/server/groups_server.py b/synapse/federation/transport/server/groups_server.py deleted file mode 100644 index 851b50152e..0000000000 --- a/synapse/federation/transport/server/groups_server.py +++ /dev/null @@ -1,755 +0,0 @@ -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING, Dict, List, Tuple, Type - -from typing_extensions import Literal - -from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH -from synapse.api.errors import Codes, SynapseError -from synapse.federation.transport.server._base import ( - Authenticator, - BaseFederationServlet, -) -from synapse.http.servlet import parse_string_from_args -from synapse.types import JsonDict, get_domain_from_id -from synapse.util.ratelimitutils import FederationRateLimiter - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class BaseGroupsServerServlet(BaseFederationServlet): - """Abstract base class for federation servlet classes which provides a groups server handler. - - See BaseFederationServlet for more information. - """ - - def __init__( - self, - hs: "HomeServer", - authenticator: Authenticator, - ratelimiter: FederationRateLimiter, - server_name: str, - ): - super().__init__(hs, authenticator, ratelimiter, server_name) - self.handler = hs.get_groups_server_handler() - - -class FederationGroupsProfileServlet(BaseGroupsServerServlet): - """Get/set the basic profile of a group on behalf of a user""" - - PATH = "/groups/(?P[^/]*)/profile" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.get_group_profile(group_id, requester_user_id) - - return 200, new_content - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.update_group_profile( - group_id, requester_user_id, content - ) - - return 200, new_content - - -class FederationGroupsSummaryServlet(BaseGroupsServerServlet): - PATH = "/groups/(?P[^/]*)/summary" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.get_group_summary(group_id, requester_user_id) - - return 200, new_content - - -class FederationGroupsRoomsServlet(BaseGroupsServerServlet): - """Get the rooms in a group on behalf of a user""" - - PATH = "/groups/(?P[^/]*)/rooms" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id) - - return 200, new_content - - -class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet): - """Add/remove room from group""" - - PATH = "/groups/(?P[^/]*)/room/(?P[^/]*)" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - room_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.add_room_to_group( - group_id, requester_user_id, room_id, content - ) - - return 200, new_content - - async def on_DELETE( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - room_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.remove_room_from_group( - group_id, requester_user_id, room_id - ) - - return 200, new_content - - -class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet): - """Update room config in group""" - - PATH = ( - "/groups/(?P[^/]*)/room/(?P[^/]*)" - "/config/(?P[^/]*)" - ) - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - room_id: str, - config_key: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - result = await self.handler.update_room_in_group( - group_id, requester_user_id, room_id, config_key, content - ) - - return 200, result - - -class FederationGroupsUsersServlet(BaseGroupsServerServlet): - """Get the users in a group on behalf of a user""" - - PATH = "/groups/(?P[^/]*)/users" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.get_users_in_group(group_id, requester_user_id) - - return 200, new_content - - -class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet): - """Get the users that have been invited to a group""" - - PATH = "/groups/(?P[^/]*)/invited_users" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.get_invited_users_in_group( - group_id, requester_user_id - ) - - return 200, new_content - - -class FederationGroupsInviteServlet(BaseGroupsServerServlet): - """Ask a group server to invite someone to the group""" - - PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/invite" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.invite_to_group( - group_id, user_id, requester_user_id, content - ) - - return 200, new_content - - -class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet): - """Accept an invitation from the group server""" - - PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/accept_invite" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - if get_domain_from_id(user_id) != origin: - raise SynapseError(403, "user_id doesn't match origin") - - new_content = await self.handler.accept_invite(group_id, user_id, content) - - return 200, new_content - - -class FederationGroupsJoinServlet(BaseGroupsServerServlet): - """Attempt to join a group""" - - PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/join" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - if get_domain_from_id(user_id) != origin: - raise SynapseError(403, "user_id doesn't match origin") - - new_content = await self.handler.join_group(group_id, user_id, content) - - return 200, new_content - - -class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet): - """Leave or kick a user from the group""" - - PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/remove" - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.remove_user_from_group( - group_id, user_id, requester_user_id, content - ) - - return 200, new_content - - -class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet): - """Add/remove a room from the group summary, with optional category. - - Matches both: - - /groups/:group/summary/rooms/:room_id - - /groups/:group/summary/categories/:category/rooms/:room_id - """ - - PATH = ( - "/groups/(?P[^/]*)/summary" - "(/categories/(?P[^/]+))?" - "/rooms/(?P[^/]*)" - ) - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - category_id: str, - room_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if category_id == "": - raise SynapseError( - 400, "category_id cannot be empty string", Codes.INVALID_PARAM - ) - - if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH: - raise SynapseError( - 400, - "category_id may not be longer than %s characters" - % (MAX_GROUP_CATEGORYID_LENGTH,), - Codes.INVALID_PARAM, - ) - - resp = await self.handler.update_group_summary_room( - group_id, - requester_user_id, - room_id=room_id, - category_id=category_id, - content=content, - ) - - return 200, resp - - async def on_DELETE( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - category_id: str, - room_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if category_id == "": - raise SynapseError(400, "category_id cannot be empty string") - - resp = await self.handler.delete_group_summary_room( - group_id, requester_user_id, room_id=room_id, category_id=category_id - ) - - return 200, resp - - -class FederationGroupsCategoriesServlet(BaseGroupsServerServlet): - """Get all categories for a group""" - - PATH = "/groups/(?P[^/]*)/categories/?" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - resp = await self.handler.get_group_categories(group_id, requester_user_id) - - return 200, resp - - -class FederationGroupsCategoryServlet(BaseGroupsServerServlet): - """Add/remove/get a category in a group""" - - PATH = "/groups/(?P[^/]*)/categories/(?P[^/]+)" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - category_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - resp = await self.handler.get_group_category( - group_id, requester_user_id, category_id - ) - - return 200, resp - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - category_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if category_id == "": - raise SynapseError(400, "category_id cannot be empty string") - - if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH: - raise SynapseError( - 400, - "category_id may not be longer than %s characters" - % (MAX_GROUP_CATEGORYID_LENGTH,), - Codes.INVALID_PARAM, - ) - - resp = await self.handler.upsert_group_category( - group_id, requester_user_id, category_id, content - ) - - return 200, resp - - async def on_DELETE( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - category_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if category_id == "": - raise SynapseError(400, "category_id cannot be empty string") - - resp = await self.handler.delete_group_category( - group_id, requester_user_id, category_id - ) - - return 200, resp - - -class FederationGroupsRolesServlet(BaseGroupsServerServlet): - """Get roles in a group""" - - PATH = "/groups/(?P[^/]*)/roles/?" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - resp = await self.handler.get_group_roles(group_id, requester_user_id) - - return 200, resp - - -class FederationGroupsRoleServlet(BaseGroupsServerServlet): - """Add/remove/get a role in a group""" - - PATH = "/groups/(?P[^/]*)/roles/(?P[^/]+)" - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - role_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - resp = await self.handler.get_group_role(group_id, requester_user_id, role_id) - - return 200, resp - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - role_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if role_id == "": - raise SynapseError( - 400, "role_id cannot be empty string", Codes.INVALID_PARAM - ) - - if len(role_id) > MAX_GROUP_ROLEID_LENGTH: - raise SynapseError( - 400, - "role_id may not be longer than %s characters" - % (MAX_GROUP_ROLEID_LENGTH,), - Codes.INVALID_PARAM, - ) - - resp = await self.handler.update_group_role( - group_id, requester_user_id, role_id, content - ) - - return 200, resp - - async def on_DELETE( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - role_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if role_id == "": - raise SynapseError(400, "role_id cannot be empty string") - - resp = await self.handler.delete_group_role( - group_id, requester_user_id, role_id - ) - - return 200, resp - - -class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet): - """Add/remove a user from the group summary, with optional role. - - Matches both: - - /groups/:group/summary/users/:user_id - - /groups/:group/summary/roles/:role/users/:user_id - """ - - PATH = ( - "/groups/(?P[^/]*)/summary" - "(/roles/(?P[^/]+))?" - "/users/(?P[^/]*)" - ) - - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - role_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if role_id == "": - raise SynapseError(400, "role_id cannot be empty string") - - if len(role_id) > MAX_GROUP_ROLEID_LENGTH: - raise SynapseError( - 400, - "role_id may not be longer than %s characters" - % (MAX_GROUP_ROLEID_LENGTH,), - Codes.INVALID_PARAM, - ) - - resp = await self.handler.update_group_summary_user( - group_id, - requester_user_id, - user_id=user_id, - role_id=role_id, - content=content, - ) - - return 200, resp - - async def on_DELETE( - self, - origin: str, - content: Literal[None], - query: Dict[bytes, List[bytes]], - group_id: str, - role_id: str, - user_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - if role_id == "": - raise SynapseError(400, "role_id cannot be empty string") - - resp = await self.handler.delete_group_summary_user( - group_id, requester_user_id, user_id=user_id, role_id=role_id - ) - - return 200, resp - - -class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet): - """Sets whether a group is joinable without an invite or knock""" - - PATH = "/groups/(?P[^/]*)/settings/m.join_policy" - - async def on_PUT( - self, - origin: str, - content: JsonDict, - query: Dict[bytes, List[bytes]], - group_id: str, - ) -> Tuple[int, JsonDict]: - requester_user_id = parse_string_from_args( - query, "requester_user_id", required=True - ) - if get_domain_from_id(requester_user_id) != origin: - raise SynapseError(403, "requester_user_id doesn't match origin") - - new_content = await self.handler.set_group_join_policy( - group_id, requester_user_id, content - ) - - return 200, new_content - - -GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( - FederationGroupsProfileServlet, - FederationGroupsSummaryServlet, - FederationGroupsRoomsServlet, - FederationGroupsUsersServlet, - FederationGroupsInvitedUsersServlet, - FederationGroupsInviteServlet, - FederationGroupsAcceptInviteServlet, - FederationGroupsJoinServlet, - FederationGroupsRemoveUserServlet, - FederationGroupsSummaryRoomsServlet, - FederationGroupsCategoriesServlet, - FederationGroupsCategoryServlet, - FederationGroupsRolesServlet, - FederationGroupsRoleServlet, - FederationGroupsSummaryUsersServlet, - FederationGroupsAddRoomsServlet, - FederationGroupsAddRoomsConfigServlet, - FederationGroupsSettingJoinPolicyServlet, -) diff --git a/synapse/groups/__init__.py b/synapse/groups/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py deleted file mode 100644 index ed26d6a6ce..0000000000 --- a/synapse/groups/attestations.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Attestations ensure that users and groups can't lie about their memberships. - -When a user joins a group the HS and GS swap attestations, which allow them -both to independently prove to third parties their membership.These -attestations have a validity period so need to be periodically renewed. - -If a user leaves (or gets kicked out of) a group, either side can still use -their attestation to "prove" their membership, until the attestation expires. -Therefore attestations shouldn't be relied on to prove membership in important -cases, but can for less important situations, e.g. showing a users membership -of groups on their profile, showing flairs, etc. - -An attestation is a signed blob of json that looks like: - - { - "user_id": "@foo:a.example.com", - "group_id": "+bar:b.example.com", - "valid_until_ms": 1507994728530, - "signatures":{"matrix.org":{"ed25519:auto":"..."}} - } -""" - -import logging -import random -from typing import TYPE_CHECKING, Optional, Tuple - -from signedjson.sign import sign_json - -from twisted.internet.defer import Deferred - -from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import JsonDict, get_domain_from_id - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -# Default validity duration for new attestations we create -DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000 - -# We add some jitter to the validity duration of attestations so that if we -# add lots of users at once we don't need to renew them all at once. -# The jitter is a multiplier picked randomly between the first and second number -DEFAULT_ATTESTATION_JITTER = (0.9, 1.3) - -# Start trying to update our attestations when they come this close to expiring -UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000 - - -class GroupAttestationSigning: - """Creates and verifies group attestations.""" - - def __init__(self, hs: "HomeServer"): - self.keyring = hs.get_keyring() - self.clock = hs.get_clock() - self.server_name = hs.hostname - self.signing_key = hs.signing_key - - async def verify_attestation( - self, - attestation: JsonDict, - group_id: str, - user_id: str, - server_name: Optional[str] = None, - ) -> None: - """Verifies that the given attestation matches the given parameters. - - An optional server_name can be supplied to explicitly set which server's - signature is expected. Otherwise assumes that either the group_id or user_id - is local and uses the other's server as the one to check. - """ - - if not server_name: - if get_domain_from_id(group_id) == self.server_name: - server_name = get_domain_from_id(user_id) - elif get_domain_from_id(user_id) == self.server_name: - server_name = get_domain_from_id(group_id) - else: - raise Exception("Expected either group_id or user_id to be local") - - if user_id != attestation["user_id"]: - raise SynapseError(400, "Attestation has incorrect user_id") - - if group_id != attestation["group_id"]: - raise SynapseError(400, "Attestation has incorrect group_id") - valid_until_ms = attestation["valid_until_ms"] - - # TODO: We also want to check that *new* attestations that people give - # us to store are valid for at least a little while. - now = self.clock.time_msec() - if valid_until_ms < now: - raise SynapseError(400, "Attestation expired") - - assert server_name is not None - await self.keyring.verify_json_for_server( - server_name, - attestation, - now, - ) - - def create_attestation(self, group_id: str, user_id: str) -> JsonDict: - """Create an attestation for the group_id and user_id with default - validity length. - """ - validity_period = DEFAULT_ATTESTATION_LENGTH_MS * random.uniform( - *DEFAULT_ATTESTATION_JITTER - ) - valid_until_ms = int(self.clock.time_msec() + validity_period) - - return sign_json( - { - "group_id": group_id, - "user_id": user_id, - "valid_until_ms": valid_until_ms, - }, - self.server_name, - self.signing_key, - ) - - -class GroupAttestionRenewer: - """Responsible for sending and receiving attestation updates.""" - - def __init__(self, hs: "HomeServer"): - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - self.assestations = hs.get_groups_attestation_signing() - self.transport_client = hs.get_federation_transport_client() - self.is_mine_id = hs.is_mine_id - self.attestations = hs.get_groups_attestation_signing() - - if not hs.config.worker.worker_app: - self._renew_attestations_loop = self.clock.looping_call( - self._start_renew_attestations, 30 * 60 * 1000 - ) - - async def on_renew_attestation( - self, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """When a remote updates an attestation""" - attestation = content["attestation"] - - if not self.is_mine_id(group_id) and not self.is_mine_id(user_id): - raise SynapseError(400, "Neither user not group are on this server") - - await self.attestations.verify_attestation( - attestation, user_id=user_id, group_id=group_id - ) - - await self.store.update_remote_attestion(group_id, user_id, attestation) - - return {} - - def _start_renew_attestations(self) -> "Deferred[None]": - return run_as_background_process("renew_attestations", self._renew_attestations) - - async def _renew_attestations(self) -> None: - """Called periodically to check if we need to update any of our attestations""" - - now = self.clock.time_msec() - - rows = await self.store.get_attestations_need_renewals( - now + UPDATE_ATTESTATION_TIME_MS - ) - - async def _renew_attestation(group_user: Tuple[str, str]) -> None: - group_id, user_id = group_user - try: - if not self.is_mine_id(group_id): - destination = get_domain_from_id(group_id) - elif not self.is_mine_id(user_id): - destination = get_domain_from_id(user_id) - else: - logger.warning( - "Incorrectly trying to do attestations for user: %r in %r", - user_id, - group_id, - ) - await self.store.remove_attestation_renewal(group_id, user_id) - return - - attestation = self.attestations.create_attestation(group_id, user_id) - - await self.transport_client.renew_group_attestation( - destination, group_id, user_id, content={"attestation": attestation} - ) - - await self.store.update_attestation_renewal( - group_id, user_id, attestation - ) - except (RequestSendFailed, HttpResponseException) as e: - logger.warning( - "Failed to renew attestation of %r in %r: %s", user_id, group_id, e - ) - except Exception: - logger.exception( - "Error renewing attestation of %r in %r", user_id, group_id - ) - - for row in rows: - await _renew_attestation((row["group_id"], row["user_id"])) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py deleted file mode 100644 index dfd24af695..0000000000 --- a/synapse/groups/groups_server.py +++ /dev/null @@ -1,1019 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd -# Copyright 2019 Michael Telatynski <7t3chguy@gmail.com> -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import TYPE_CHECKING, Optional - -from synapse.api.errors import Codes, SynapseError -from synapse.handlers.groups_local import GroupsLocalHandler -from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN -from synapse.types import GroupID, JsonDict, RoomID, UserID, get_domain_from_id -from synapse.util.async_helpers import concurrently_execute - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -# TODO: Allow users to "knock" or simply join depending on rules -# TODO: Federation admin APIs -# TODO: is_privileged flag to users and is_public to users and rooms -# TODO: Audit log for admins (profile updates, membership changes, users who tried -# to join but were rejected, etc) -# TODO: Flairs - - -# Note that the maximum lengths are somewhat arbitrary. -MAX_SHORT_DESC_LEN = 1000 -MAX_LONG_DESC_LEN = 10000 - - -class GroupsServerWorkerHandler: - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.store = hs.get_datastores().main - self.room_list_handler = hs.get_room_list_handler() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.keyring = hs.get_keyring() - self.is_mine_id = hs.is_mine_id - self.signing_key = hs.signing_key - self.server_name = hs.hostname - self.attestations = hs.get_groups_attestation_signing() - self.transport_client = hs.get_federation_transport_client() - self.profile_handler = hs.get_profile_handler() - - async def check_group_is_ours( - self, - group_id: str, - requester_user_id: str, - and_exists: bool = False, - and_is_admin: Optional[str] = None, - ) -> Optional[dict]: - """Check that the group is ours, and optionally if it exists. - - If group does exist then return group. - - Args: - group_id: The group ID to check. - requester_user_id: The user ID of the requester. - and_exists: whether to also check if group exists - and_is_admin: whether to also check if given str is a user_id - that is an admin - """ - if not self.is_mine_id(group_id): - raise SynapseError(400, "Group not on this server") - - group = await self.store.get_group(group_id) - if and_exists and not group: - raise SynapseError(404, "Unknown group") - - is_user_in_group = await self.store.is_user_in_group( - requester_user_id, group_id - ) - if group and not is_user_in_group and not group["is_public"]: - raise SynapseError(404, "Unknown group") - - if and_is_admin: - is_admin = await self.store.is_user_admin_in_group(group_id, and_is_admin) - if not is_admin: - raise SynapseError(403, "User is not admin in group") - - return group - - async def get_group_summary( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get the summary for a group as seen by requester_user_id. - - The group summary consists of the profile of the room, and a curated - list of users and rooms. These list *may* be organised by role/category. - The roles/categories are ordered, and so are the users/rooms within them. - - A user/room may appear in multiple roles/categories. - """ - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - is_user_in_group = await self.store.is_user_in_group( - requester_user_id, group_id - ) - - profile = await self.get_group_profile(group_id, requester_user_id) - - users, roles = await self.store.get_users_for_summary_by_role( - group_id, include_private=is_user_in_group - ) - - # TODO: Add profiles to users - - rooms, categories = await self.store.get_rooms_for_summary_by_category( - group_id, include_private=is_user_in_group - ) - - for room_entry in rooms: - room_id = room_entry["room_id"] - joined_users = await self.store.get_users_in_room(room_id) - entry = await self.room_list_handler.generate_room_entry( - room_id, len(joined_users), with_alias=False, allow_private=True - ) - if entry is None: - continue - entry = dict(entry) # so we don't change what's cached - entry.pop("room_id", None) - - room_entry["profile"] = entry - - rooms.sort(key=lambda e: e.get("order", 0)) - - for user in users: - user_id = user["user_id"] - - if not self.is_mine_id(requester_user_id): - attestation = await self.store.get_remote_attestation(group_id, user_id) - if not attestation: - continue - - user["attestation"] = attestation - else: - user["attestation"] = self.attestations.create_attestation( - group_id, user_id - ) - - user_profile = await self.profile_handler.get_profile_from_cache(user_id) - user.update(user_profile) - - users.sort(key=lambda e: e.get("order", 0)) - - membership_info = await self.store.get_users_membership_info_in_group( - group_id, requester_user_id - ) - - return { - "profile": profile, - "users_section": { - "users": users, - "roles": roles, - "total_user_count_estimate": 0, # TODO - }, - "rooms_section": { - "rooms": rooms, - "categories": categories, - "total_room_count_estimate": 0, # TODO - }, - "user": membership_info, - } - - async def get_group_categories( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get all categories in a group (as seen by user)""" - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - categories = await self.store.get_group_categories(group_id=group_id) - return {"categories": categories} - - async def get_group_category( - self, group_id: str, requester_user_id: str, category_id: str - ) -> JsonDict: - """Get a specific category in a group (as seen by user)""" - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - return await self.store.get_group_category( - group_id=group_id, category_id=category_id - ) - - async def get_group_roles(self, group_id: str, requester_user_id: str) -> JsonDict: - """Get all roles in a group (as seen by user)""" - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - roles = await self.store.get_group_roles(group_id=group_id) - return {"roles": roles} - - async def get_group_role( - self, group_id: str, requester_user_id: str, role_id: str - ) -> JsonDict: - """Get a specific role in a group (as seen by user)""" - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - return await self.store.get_group_role(group_id=group_id, role_id=role_id) - - async def get_group_profile( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get the group profile as seen by requester_user_id""" - - await self.check_group_is_ours(group_id, requester_user_id) - - group = await self.store.get_group(group_id) - - if group: - cols = [ - "name", - "short_description", - "long_description", - "avatar_url", - "is_public", - ] - group_description = {key: group[key] for key in cols} - group_description["is_openly_joinable"] = group["join_policy"] == "open" - - return group_description - else: - raise SynapseError(404, "Unknown group") - - async def get_users_in_group( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get the users in group as seen by requester_user_id. - - The ordering is arbitrary at the moment - """ - - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - is_user_in_group = await self.store.is_user_in_group( - requester_user_id, group_id - ) - - user_results = await self.store.get_users_in_group( - group_id, include_private=is_user_in_group - ) - - chunk = [] - for user_result in user_results: - g_user_id = user_result["user_id"] - is_public = user_result["is_public"] - is_privileged = user_result["is_admin"] - - entry = {"user_id": g_user_id} - - profile = await self.profile_handler.get_profile_from_cache(g_user_id) - entry.update(profile) - - entry["is_public"] = bool(is_public) - entry["is_privileged"] = bool(is_privileged) - - if not self.is_mine_id(g_user_id): - attestation = await self.store.get_remote_attestation( - group_id, g_user_id - ) - if not attestation: - continue - - entry["attestation"] = attestation - else: - entry["attestation"] = self.attestations.create_attestation( - group_id, g_user_id - ) - - chunk.append(entry) - - # TODO: If admin add lists of users whose attestations have timed out - - return {"chunk": chunk, "total_user_count_estimate": len(user_results)} - - async def get_invited_users_in_group( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get the users that have been invited to a group as seen by requester_user_id. - - The ordering is arbitrary at the moment - """ - - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - is_user_in_group = await self.store.is_user_in_group( - requester_user_id, group_id - ) - - if not is_user_in_group: - raise SynapseError(403, "User not in group") - - invited_users = await self.store.get_invited_users_in_group(group_id) - - user_profiles = [] - - for user_id in invited_users: - user_profile = {"user_id": user_id} - try: - profile = await self.profile_handler.get_profile_from_cache(user_id) - user_profile.update(profile) - except Exception as e: - logger.warning("Error getting profile for %s: %s", user_id, e) - user_profiles.append(user_profile) - - return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} - - async def get_rooms_in_group( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get the rooms in group as seen by requester_user_id - - This returns rooms in order of decreasing number of joined users - """ - - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - is_user_in_group = await self.store.is_user_in_group( - requester_user_id, group_id - ) - - # Note! room_results["is_public"] is about whether the room is considered - # public from the group's point of view. (i.e. whether non-group members - # should be able to see the room is in the group). - # This is not the same as whether the room itself is public (in the sense - # of being visible in the room directory). - # As such, room_results["is_public"] itself is not sufficient to determine - # whether any given user is permitted to see the room's metadata. - room_results = await self.store.get_rooms_in_group( - group_id, include_private=is_user_in_group - ) - - chunk = [] - for room_result in room_results: - room_id = room_result["room_id"] - - joined_users = await self.store.get_users_in_room(room_id) - - # check the user is actually allowed to see the room before showing it to them - allow_private = requester_user_id in joined_users - - entry = await self.room_list_handler.generate_room_entry( - room_id, - len(joined_users), - with_alias=False, - allow_private=allow_private, - ) - - if not entry: - continue - - entry["is_public"] = bool(room_result["is_public"]) - - chunk.append(entry) - - chunk.sort(key=lambda e: -e["num_joined_members"]) - - return {"chunk": chunk, "total_room_count_estimate": len(chunk)} - - -class GroupsServerHandler(GroupsServerWorkerHandler): - def __init__(self, hs: "HomeServer"): - super().__init__(hs) - - # Ensure attestations get renewed - hs.get_groups_attestation_renewer() - - async def update_group_summary_room( - self, - group_id: str, - requester_user_id: str, - room_id: str, - category_id: str, - content: JsonDict, - ) -> JsonDict: - """Add/update a room to the group summary""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - RoomID.from_string(room_id) # Ensure valid room id - - order = content.get("order", None) - - is_public = _parse_visibility_from_contents(content) - - await self.store.add_room_to_summary( - group_id=group_id, - room_id=room_id, - category_id=category_id, - order=order, - is_public=is_public, - ) - - return {} - - async def delete_group_summary_room( - self, group_id: str, requester_user_id: str, room_id: str, category_id: str - ) -> JsonDict: - """Remove a room from the summary""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - await self.store.remove_room_from_summary( - group_id=group_id, room_id=room_id, category_id=category_id - ) - - return {} - - async def set_group_join_policy( - self, group_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """Sets the group join policy. - - Currently supported policies are: - - "invite": an invite must be received and accepted in order to join. - - "open": anyone can join. - """ - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - join_policy = _parse_join_policy_from_contents(content) - if join_policy is None: - raise SynapseError(400, "No value specified for 'm.join_policy'") - - await self.store.set_group_join_policy(group_id, join_policy=join_policy) - - return {} - - async def update_group_category( - self, group_id: str, requester_user_id: str, category_id: str, content: JsonDict - ) -> JsonDict: - """Add/Update a group category""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - profile = content.get("profile") - - await self.store.upsert_group_category( - group_id=group_id, - category_id=category_id, - is_public=is_public, - profile=profile, - ) - - return {} - - async def delete_group_category( - self, group_id: str, requester_user_id: str, category_id: str - ) -> JsonDict: - """Delete a group category""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - await self.store.remove_group_category( - group_id=group_id, category_id=category_id - ) - - return {} - - async def update_group_role( - self, group_id: str, requester_user_id: str, role_id: str, content: JsonDict - ) -> JsonDict: - """Add/update a role in a group""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - - profile = content.get("profile") - - await self.store.upsert_group_role( - group_id=group_id, role_id=role_id, is_public=is_public, profile=profile - ) - - return {} - - async def delete_group_role( - self, group_id: str, requester_user_id: str, role_id: str - ) -> JsonDict: - """Remove role from group""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - await self.store.remove_group_role(group_id=group_id, role_id=role_id) - - return {} - - async def update_group_summary_user( - self, - group_id: str, - requester_user_id: str, - user_id: str, - role_id: str, - content: JsonDict, - ) -> JsonDict: - """Add/update a users entry in the group summary""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - order = content.get("order", None) - - is_public = _parse_visibility_from_contents(content) - - await self.store.add_user_to_summary( - group_id=group_id, - user_id=user_id, - role_id=role_id, - order=order, - is_public=is_public, - ) - - return {} - - async def delete_group_summary_user( - self, group_id: str, requester_user_id: str, user_id: str, role_id: str - ) -> JsonDict: - """Remove a user from the group summary""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - await self.store.remove_user_from_summary( - group_id=group_id, user_id=user_id, role_id=role_id - ) - - return {} - - async def update_group_profile( - self, group_id: str, requester_user_id: str, content: JsonDict - ) -> None: - """Update the group profile""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - profile = {} - for keyname, max_length in ( - ("name", MAX_DISPLAYNAME_LEN), - ("avatar_url", MAX_AVATAR_URL_LEN), - ("short_description", MAX_SHORT_DESC_LEN), - ("long_description", MAX_LONG_DESC_LEN), - ): - if keyname in content: - value = content[keyname] - if not isinstance(value, str): - raise SynapseError( - 400, - "%r value is not a string" % (keyname,), - errcode=Codes.INVALID_PARAM, - ) - if len(value) > max_length: - raise SynapseError( - 400, - "Invalid %s parameter" % (keyname,), - errcode=Codes.INVALID_PARAM, - ) - profile[keyname] = value - - await self.store.update_group_profile(group_id, profile) - - async def add_room_to_group( - self, group_id: str, requester_user_id: str, room_id: str, content: JsonDict - ) -> JsonDict: - """Add room to group""" - RoomID.from_string(room_id) # Ensure valid room id - - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - is_public = _parse_visibility_from_contents(content) - - await self.store.add_room_to_group(group_id, room_id, is_public=is_public) - - return {} - - async def update_room_in_group( - self, - group_id: str, - requester_user_id: str, - room_id: str, - config_key: str, - content: JsonDict, - ) -> JsonDict: - """Update room in group""" - RoomID.from_string(room_id) # Ensure valid room id - - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - if config_key == "m.visibility": - is_public = _parse_visibility_dict(content) - - await self.store.update_room_in_group_visibility( - group_id, room_id, is_public=is_public - ) - else: - raise SynapseError(400, "Unknown config option") - - return {} - - async def remove_room_from_group( - self, group_id: str, requester_user_id: str, room_id: str - ) -> JsonDict: - """Remove room from group""" - await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - - await self.store.remove_room_from_group(group_id, room_id) - - return {} - - async def invite_to_group( - self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """Invite user to group""" - - group = await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id - ) - if not group: - raise SynapseError(400, "Group does not exist", errcode=Codes.BAD_STATE) - - # TODO: Check if user knocked - - invited_users = await self.store.get_invited_users_in_group(group_id) - if user_id in invited_users: - raise SynapseError( - 400, "User already invited to group", errcode=Codes.BAD_STATE - ) - - user_results = await self.store.get_users_in_group( - group_id, include_private=True - ) - if user_id in (user_result["user_id"] for user_result in user_results): - raise SynapseError(400, "User already in group") - - content = { - "profile": {"name": group["name"], "avatar_url": group["avatar_url"]}, - "inviter": requester_user_id, - } - - if self.hs.is_mine_id(user_id): - groups_local = self.hs.get_groups_local_handler() - assert isinstance( - groups_local, GroupsLocalHandler - ), "Workers cannot invites users to groups." - res = await groups_local.on_invite(group_id, user_id, content) - local_attestation = None - else: - local_attestation = self.attestations.create_attestation(group_id, user_id) - content.update({"attestation": local_attestation}) - - res = await self.transport_client.invite_to_group_notification( - get_domain_from_id(user_id), group_id, user_id, content - ) - - user_profile = res.get("user_profile", {}) - await self.store.add_remote_profile_cache( - user_id, - displayname=user_profile.get("displayname"), - avatar_url=user_profile.get("avatar_url"), - ) - - if res["state"] == "join": - if not self.hs.is_mine_id(user_id): - remote_attestation = res["attestation"] - - await self.attestations.verify_attestation( - remote_attestation, user_id=user_id, group_id=group_id - ) - else: - remote_attestation = None - - await self.store.add_user_to_group( - group_id, - user_id, - is_admin=False, - is_public=False, # TODO - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) - return {"state": "join"} - elif res["state"] == "invite": - await self.store.add_group_invite(group_id, user_id) - return {"state": "invite"} - elif res["state"] == "reject": - return {"state": "reject"} - else: - raise SynapseError(502, "Unknown state returned by HS") - - async def _add_user( - self, group_id: str, user_id: str, content: JsonDict - ) -> Optional[JsonDict]: - """Add a user to a group based on a content dict. - - See accept_invite, join_group. - """ - if not self.hs.is_mine_id(user_id): - local_attestation: Optional[ - JsonDict - ] = self.attestations.create_attestation(group_id, user_id) - - remote_attestation = content["attestation"] - - await self.attestations.verify_attestation( - remote_attestation, user_id=user_id, group_id=group_id - ) - else: - local_attestation = None - remote_attestation = None - - is_public = _parse_visibility_from_contents(content) - - await self.store.add_user_to_group( - group_id, - user_id, - is_admin=False, - is_public=is_public, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) - - return local_attestation - - async def accept_invite( - self, group_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """User tries to accept an invite to the group. - - This is different from them asking to join, and so should error if no - invite exists (and they're not a member of the group) - """ - - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - is_invited = await self.store.is_user_invited_to_local_group( - group_id, requester_user_id - ) - if not is_invited: - raise SynapseError(403, "User not invited to group") - - local_attestation = await self._add_user(group_id, requester_user_id, content) - - return {"state": "join", "attestation": local_attestation} - - async def join_group( - self, group_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """User tries to join the group. - - This will error if the group requires an invite/knock to join - """ - - group_info = await self.check_group_is_ours( - group_id, requester_user_id, and_exists=True - ) - if not group_info: - raise SynapseError(404, "Group does not exist", errcode=Codes.NOT_FOUND) - if group_info["join_policy"] != "open": - raise SynapseError(403, "Group is not publicly joinable") - - local_attestation = await self._add_user(group_id, requester_user_id, content) - - return {"state": "join", "attestation": local_attestation} - - async def remove_user_from_group( - self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """Remove a user from the group; either a user is leaving or an admin - kicked them. - """ - - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - is_kick = False - if requester_user_id != user_id: - is_admin = await self.store.is_user_admin_in_group( - group_id, requester_user_id - ) - if not is_admin: - raise SynapseError(403, "User is not admin in group") - - is_kick = True - - await self.store.remove_user_from_group(group_id, user_id) - - if is_kick: - if self.hs.is_mine_id(user_id): - groups_local = self.hs.get_groups_local_handler() - assert isinstance( - groups_local, GroupsLocalHandler - ), "Workers cannot remove users from groups." - await groups_local.user_removed_from_group(group_id, user_id, {}) - else: - await self.transport_client.remove_user_from_group_notification( - get_domain_from_id(user_id), group_id, user_id, {} - ) - - if not self.hs.is_mine_id(user_id): - await self.store.maybe_delete_remote_profile_cache(user_id) - - # Delete group if the last user has left - users = await self.store.get_users_in_group(group_id, include_private=True) - if not users: - await self.store.delete_group(group_id) - - return {} - - async def create_group( - self, group_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - logger.info("Attempting to create group with ID: %r", group_id) - - # parsing the id into a GroupID validates it. - group_id_obj = GroupID.from_string(group_id) - - group = await self.check_group_is_ours(group_id, requester_user_id) - if group: - raise SynapseError(400, "Group already exists") - - is_admin = await self.auth.is_server_admin( - UserID.from_string(requester_user_id) - ) - if not is_admin: - if not self.hs.config.groups.enable_group_creation: - raise SynapseError( - 403, "Only a server admin can create groups on this server" - ) - localpart = group_id_obj.localpart - if not localpart.startswith(self.hs.config.groups.group_creation_prefix): - raise SynapseError( - 400, - "Can only create groups with prefix %r on this server" - % (self.hs.config.groups.group_creation_prefix,), - ) - - profile = content.get("profile", {}) - name = profile.get("name") - avatar_url = profile.get("avatar_url") - short_description = profile.get("short_description") - long_description = profile.get("long_description") - user_profile = content.get("user_profile", {}) - - await self.store.create_group( - group_id, - requester_user_id, - name=name, - avatar_url=avatar_url, - short_description=short_description, - long_description=long_description, - ) - - if not self.hs.is_mine_id(requester_user_id): - remote_attestation = content["attestation"] - - await self.attestations.verify_attestation( - remote_attestation, user_id=requester_user_id, group_id=group_id - ) - - local_attestation: Optional[ - JsonDict - ] = self.attestations.create_attestation(group_id, requester_user_id) - else: - local_attestation = None - remote_attestation = None - - await self.store.add_user_to_group( - group_id, - requester_user_id, - is_admin=True, - is_public=True, # TODO - local_attestation=local_attestation, - remote_attestation=remote_attestation, - ) - - if not self.hs.is_mine_id(requester_user_id): - await self.store.add_remote_profile_cache( - requester_user_id, - displayname=user_profile.get("displayname"), - avatar_url=user_profile.get("avatar_url"), - ) - - return {"group_id": group_id} - - async def delete_group(self, group_id: str, requester_user_id: str) -> None: - """Deletes a group, kicking out all current members. - - Only group admins or server admins can call this request - - Args: - group_id: The group ID to delete. - requester_user_id: The user requesting to delete the group. - """ - - await self.check_group_is_ours(group_id, requester_user_id, and_exists=True) - - # Only server admins or group admins can delete groups. - - is_admin = await self.store.is_user_admin_in_group(group_id, requester_user_id) - - if not is_admin: - is_admin = await self.auth.is_server_admin( - UserID.from_string(requester_user_id) - ) - - if not is_admin: - raise SynapseError(403, "User is not an admin") - - # Before deleting the group lets kick everyone out of it - users = await self.store.get_users_in_group(group_id, include_private=True) - - async def _kick_user_from_group(user_id: str) -> None: - if self.hs.is_mine_id(user_id): - groups_local = self.hs.get_groups_local_handler() - assert isinstance( - groups_local, GroupsLocalHandler - ), "Workers cannot kick users from groups." - await groups_local.user_removed_from_group(group_id, user_id, {}) - else: - await self.transport_client.remove_user_from_group_notification( - get_domain_from_id(user_id), group_id, user_id, {} - ) - await self.store.maybe_delete_remote_profile_cache(user_id) - - # We kick users out in the order of: - # 1. Non-admins - # 2. Other admins - # 3. The requester - # - # This is so that if the deletion fails for some reason other admins or - # the requester still has auth to retry. - non_admins = [] - admins = [] - for u in users: - if u["user_id"] == requester_user_id: - continue - if u["is_admin"]: - admins.append(u["user_id"]) - else: - non_admins.append(u["user_id"]) - - await concurrently_execute(_kick_user_from_group, non_admins, 10) - await concurrently_execute(_kick_user_from_group, admins, 10) - await _kick_user_from_group(requester_user_id) - - await self.store.delete_group(group_id) - - -def _parse_join_policy_from_contents(content: JsonDict) -> Optional[str]: - """Given a content for a request, return the specified join policy or None""" - - join_policy_dict = content.get("m.join_policy") - if join_policy_dict: - return _parse_join_policy_dict(join_policy_dict) - else: - return None - - -def _parse_join_policy_dict(join_policy_dict: JsonDict) -> str: - """Given a dict for the "m.join_policy" config return the join policy specified""" - join_policy_type = join_policy_dict.get("type") - if not join_policy_type: - return "invite" - - if join_policy_type not in ("invite", "open"): - raise SynapseError(400, "Synapse only supports 'invite'/'open' join rule") - return join_policy_type - - -def _parse_visibility_from_contents(content: JsonDict) -> bool: - """Given a content for a request parse out whether the entity should be - public or not - """ - - visibility = content.get("m.visibility") - if visibility: - return _parse_visibility_dict(visibility) - else: - is_public = True - - return is_public - - -def _parse_visibility_dict(visibility: JsonDict) -> bool: - """Given a dict for the "m.visibility" config return if the entity should - be public or not - """ - vis_type = visibility.get("type") - if not vis_type: - return True - - if vis_type not in ("public", "private"): - raise SynapseError(400, "Synapse only supports 'public'/'private' visibility") - return vis_type == "public" diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 96376963f2..d4fe7df533 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -30,8 +30,8 @@ logger = logging.getLogger(__name__) class AdminHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state async def get_whois(self, user: UserID) -> JsonDict: connections = [] @@ -197,7 +197,9 @@ class AdminHandler: from_key = events[-1].internal_metadata.after - events = await filter_events_for_client(self.storage, user_id, events) + events = await filter_events_for_client( + self._storage_controllers, user_id, events + ) writer.write_events(room_id, events) @@ -233,7 +235,9 @@ class AdminHandler: for event_id in extremities: if not event_to_unseen_prevs[event_id]: continue - state = await self.state_store.get_state_for_event(event_id) + state = await self._state_storage_controller.get_state_for_event( + event_id + ) writer.write_state(room_id, event_id, state) return writer.finished() diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 1da7bcc85b..814553e098 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -19,7 +19,7 @@ from prometheus_client import Counter from twisted.internet import defer import synapse -from synapse.api.constants import EventTypes +from synapse.api.constants import EduTypes, EventTypes from synapse.appservice import ApplicationService from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state @@ -503,7 +503,7 @@ class ApplicationServicesHandler: time_now = self.clock.time_msec() events.extend( { - "type": "m.presence", + "type": EduTypes.PRESENCE, "sender": event.user_id, "content": format_user_presence_state( event, time_now, include_user_id=False diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 1b9050ea96..fbafbbee6b 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -210,7 +210,8 @@ class AuthHandler: self.hs = hs # FIXME better possibility to access registrationHandler later? self.macaroon_gen = hs.get_macaroon_generator() - self._password_enabled = hs.config.auth.password_enabled + self._password_enabled_for_login = hs.config.auth.password_enabled_for_login + self._password_enabled_for_reauth = hs.config.auth.password_enabled_for_reauth self._password_localdb_enabled = hs.config.auth.password_localdb_enabled self._third_party_rules = hs.get_third_party_event_rules() @@ -387,13 +388,13 @@ class AuthHandler: return params, session_id async def _get_available_ui_auth_types(self, user: UserID) -> Iterable[str]: - """Get a list of the authentication types this user can use""" + """Get a list of the user-interactive authentication types this user can use.""" ui_auth_types = set() # if the HS supports password auth, and the user has a non-null password, we # support password auth - if self._password_localdb_enabled and self._password_enabled: + if self._password_localdb_enabled and self._password_enabled_for_reauth: lookupres = await self._find_user_id_and_pwd_hash(user.to_string()) if lookupres: _, password_hash = lookupres @@ -402,7 +403,7 @@ class AuthHandler: # also allow auth from password providers for t in self.password_auth_provider.get_supported_login_types().keys(): - if t == LoginType.PASSWORD and not self._password_enabled: + if t == LoginType.PASSWORD and not self._password_enabled_for_reauth: continue ui_auth_types.add(t) @@ -710,7 +711,7 @@ class AuthHandler: return res # fall back to the v1 login flow - canonical_id, _ = await self.validate_login(authdict) + canonical_id, _ = await self.validate_login(authdict, is_reauth=True) return canonical_id def _get_params_recaptcha(self) -> dict: @@ -1064,7 +1065,7 @@ class AuthHandler: Returns: Whether users on this server are allowed to change or set a password """ - return self._password_enabled and self._password_localdb_enabled + return self._password_enabled_for_login and self._password_localdb_enabled def get_supported_login_types(self) -> Iterable[str]: """Get a the login types supported for the /login API @@ -1089,9 +1090,9 @@ class AuthHandler: # that comes first, where it's present. if LoginType.PASSWORD in types: types.remove(LoginType.PASSWORD) - if self._password_enabled: + if self._password_enabled_for_login: types.insert(0, LoginType.PASSWORD) - elif self._password_localdb_enabled and self._password_enabled: + elif self._password_localdb_enabled and self._password_enabled_for_login: types.insert(0, LoginType.PASSWORD) return types @@ -1100,6 +1101,7 @@ class AuthHandler: self, login_submission: Dict[str, Any], ratelimit: bool = False, + is_reauth: bool = False, ) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]: """Authenticates the user for the /login API @@ -1110,6 +1112,9 @@ class AuthHandler: login_submission: the whole of the login submission (including 'type' and other relevant fields) ratelimit: whether to apply the failed_login_attempt ratelimiter + is_reauth: whether this is part of a User-Interactive Authorisation + flow to reauthenticate for a privileged action (rather than a + new login) Returns: A tuple of the canonical user id, and optional callback to be called once the access token and device id are issued @@ -1132,8 +1137,14 @@ class AuthHandler: # special case to check for "password" for the check_password interface # for the auth providers password = login_submission.get("password") + if login_type == LoginType.PASSWORD: - if not self._password_enabled: + if is_reauth: + passwords_allowed_here = self._password_enabled_for_reauth + else: + passwords_allowed_here = self._password_enabled_for_login + + if not passwords_allowed_here: raise SynapseError(400, "Password login has been disabled.") if not isinstance(password, str): raise SynapseError(400, "Bad parameter: password", Codes.INVALID_PARAM) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 1d6d1f8a92..72faf2ee38 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -28,7 +28,7 @@ from typing import ( ) from synapse.api import errors -from synapse.api.constants import EventTypes +from synapse.api.constants import EduTypes, EventTypes from synapse.api.errors import ( Codes, FederationDeniedError, @@ -61,6 +61,7 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) MAX_DEVICE_DISPLAY_NAME_LEN = 100 +DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000 class DeviceWorkerHandler: @@ -70,7 +71,7 @@ class DeviceWorkerHandler: self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.state = hs.get_state_handler() - self.state_store = hs.get_storage().state + self._state_storage = hs.get_storage_controllers().state self._auth_handler = hs.get_auth_handler() self.server_name = hs.hostname @@ -203,7 +204,9 @@ class DeviceWorkerHandler: continue # mapping from event_id -> state_dict - prev_state_ids = await self.state_store.get_state_ids_for_events(event_ids) + prev_state_ids = await self._state_storage.get_state_ids_for_events( + event_ids + ) # Check if we've joined the room? If so we just blindly add all the users to # the "possibly changed" users. @@ -277,7 +280,8 @@ class DeviceHandler(DeviceWorkerHandler): federation_registry = hs.get_federation_registry() federation_registry.register_edu_handler( - "m.device_list_update", self.device_list_updater.incoming_device_list_update + EduTypes.DEVICE_LIST_UPDATE, + self.device_list_updater.incoming_device_list_update, ) hs.get_distributor().observe("user_left_room", self.user_left_room) @@ -292,6 +296,19 @@ class DeviceHandler(DeviceWorkerHandler): # On start up check if there are any updates pending. hs.get_reactor().callWhenRunning(self._handle_new_device_update_async) + self._delete_stale_devices_after = hs.config.server.delete_stale_devices_after + + # Ideally we would run this on a worker and condition this on the + # "run_background_tasks_on" setting, but this would mean making the notification + # of device list changes over federation work on workers, which is nontrivial. + if self._delete_stale_devices_after is not None: + self.clock.looping_call( + run_as_background_process, + DELETE_STALE_DEVICES_INTERVAL_MS, + "delete_stale_devices", + self._delete_stale_devices, + ) + def _check_device_name_length(self, name: Optional[str]) -> None: """ Checks whether a device name is longer than the maximum allowed length. @@ -367,6 +384,19 @@ class DeviceHandler(DeviceWorkerHandler): raise errors.StoreError(500, "Couldn't generate a device ID.") + async def _delete_stale_devices(self) -> None: + """Background task that deletes devices which haven't been accessed for more than + a configured time period. + """ + # We should only be running this job if the config option is defined. + assert self._delete_stale_devices_after is not None + now_ms = self.clock.time_msec() + since_ms = now_ms - self._delete_stale_devices_after + devices = await self.store.get_local_devices_not_accessed_since(since_ms) + + for user_id, user_devices in devices.items(): + await self.delete_devices(user_id, user_devices) + @trace async def delete_device(self, user_id: str, device_id: str) -> None: """Delete the given device @@ -689,7 +719,8 @@ class DeviceHandler(DeviceWorkerHandler): ) # TODO: when called, this isn't in a logging context. # This leads to log spam, sentry event spam, and massive - # memory usage. See #12552. + # memory usage. + # See https://github.com/matrix-org/synapse/issues/12552. # log_kv( # {"message": "sent device update to host", "host": host} # ) @@ -763,6 +794,10 @@ class DeviceListUpdater: device_id = edu_content.pop("device_id") stream_id = str(edu_content.pop("stream_id")) # They may come as ints prev_ids = edu_content.pop("prev_id", []) + if not isinstance(prev_ids, list): + raise SynapseError( + 400, "Device list update had an invalid 'prev_ids' field" + ) prev_ids = [str(p) for p in prev_ids] # They may come as ints if get_domain_from_id(user_id) != origin: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index 53668cce3b..444c08bc2e 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -15,7 +15,7 @@ import logging from typing import TYPE_CHECKING, Any, Dict -from synapse.api.constants import ToDeviceEventTypes +from synapse.api.constants import EduTypes, ToDeviceEventTypes from synapse.api.errors import SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import run_in_background @@ -59,11 +59,11 @@ class DeviceMessageHandler: # to the appropriate worker. if hs.get_instance_name() in hs.config.worker.writers.to_device: hs.get_federation_registry().register_edu_handler( - "m.direct_to_device", self.on_direct_to_device_edu + EduTypes.DIRECT_TO_DEVICE, self.on_direct_to_device_edu ) else: hs.get_federation_registry().register_instances_for_edu( - "m.direct_to_device", + EduTypes.DIRECT_TO_DEVICE, hs.config.worker.writers.to_device, ) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index e6c2cfb8c8..52bb5c9c55 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64 from twisted.internet import defer +from synapse.api.constants import EduTypes from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace @@ -66,13 +67,13 @@ class E2eKeysHandler: # Only register this edu handler on master as it requires writing # device updates to the db federation_registry.register_edu_handler( - "m.signing_key_update", + EduTypes.SIGNING_KEY_UPDATE, self._edu_updater.incoming_signing_key_update, ) # also handle the unstable version # FIXME: remove this when enough servers have upgraded federation_registry.register_edu_handler( - "org.matrix.signing_key_update", + EduTypes.UNSTABLE_SIGNING_KEY_UPDATE, self._edu_updater.incoming_signing_key_update, ) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 82a5aac3dd..ac13340d3a 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -113,7 +113,7 @@ class EventStreamHandler: states = await presence_handler.get_states(users) to_add.extend( { - "type": EduTypes.Presence, + "type": EduTypes.PRESENCE, "content": format_user_presence_state(state, time_now), } for state in states @@ -139,7 +139,7 @@ class EventStreamHandler: class EventHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main - self.storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() async def get_event( self, @@ -177,7 +177,7 @@ class EventHandler: is_peeking = user.to_string() not in users filtered = await filter_events_for_client( - self.storage, user.to_string(), [event], is_peeking=is_peeking + self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking ) if not filtered: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0386d0a07b..659f279441 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -20,7 +20,16 @@ import itertools import logging from enum import Enum from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, +) import attr from signedjson.key import decode_verify_key_bytes @@ -34,6 +43,7 @@ from synapse.api.errors import ( CodeMessageException, Codes, FederationDeniedError, + FederationError, HttpResponseException, NotFoundError, RequestSendFailed, @@ -125,8 +135,8 @@ class FederationHandler: self.hs = hs self.store = hs.get_datastores().main - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() self.server_name = hs.hostname @@ -159,6 +169,14 @@ class FederationHandler: self.third_party_event_rules = hs.get_third_party_event_rules() + # if this is the main process, fire off a background process to resume + # any partial-state-resync operations which were in flight when we + # were shut down. + if not hs.config.worker.worker_app: + run_as_background_process( + "resume_sync_partial_state_room", self._resume_sync_partial_state_room + ) + async def maybe_backfill( self, room_id: str, current_depth: int, limit: int ) -> bool: @@ -324,7 +342,7 @@ class FederationHandler: # We set `check_history_visibility_only` as we might otherwise get false # positives from users having been erased. filtered_extremities = await filter_events_for_server( - self.storage, + self._storage_controllers, self.server_name, events_to_check, redact=False, @@ -460,6 +478,8 @@ class FederationHandler: """ # TODO: We should be able to call this on workers, but the upgrading of # room stuff after join currently doesn't work on workers. + # TODO: Before we relax this condition, we need to allow re-syncing of + # partial room state to happen on workers. assert self.config.worker.worker_app is None logger.debug("Joining %s to %s", joinee, room_id) @@ -540,12 +560,11 @@ class FederationHandler: if ret.partial_state: # Kick off the process of asynchronously fetching the state for this # room. - # - # TODO(faster_joins): pick this up again on restart run_as_background_process( desc="sync_partial_state_room", func=self._sync_partial_state_room, - destination=origin, + initial_destination=origin, + other_destinations=ret.servers_in_room, room_id=room_id, ) @@ -660,7 +679,7 @@ class FederationHandler: # in the invitee's sync stream. It is stripped out for all other local users. event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"] - context = EventContext.for_outlier(self.storage) + context = EventContext.for_outlier(self._storage_controllers) stream_id = await self._federation_event_handler.persist_events_and_notify( event.room_id, [(event, context)] ) @@ -849,7 +868,7 @@ class FederationHandler: ) ) - context = EventContext.for_outlier(self.storage) + context = EventContext.for_outlier(self._storage_controllers) await self._federation_event_handler.persist_events_and_notify( event.room_id, [(event, context)] ) @@ -878,7 +897,7 @@ class FederationHandler: await self.federation_client.send_leave(host_list, event) - context = EventContext.for_outlier(self.storage) + context = EventContext.for_outlier(self._storage_controllers) stream_id = await self._federation_event_handler.persist_events_and_notify( event.room_id, [(event, context)] ) @@ -1027,7 +1046,9 @@ class FederationHandler: if event.internal_metadata.outlier: raise NotFoundError("State not known at event %s" % (event_id,)) - state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id]) + state_groups = await self._state_storage_controller.get_state_groups_ids( + room_id, [event_id] + ) # get_state_groups_ids should return exactly one result assert len(state_groups) == 1 @@ -1076,7 +1097,9 @@ class FederationHandler: ], ) - events = await filter_events_for_server(self.storage, origin, events) + events = await filter_events_for_server( + self._storage_controllers, origin, events + ) return events @@ -1107,7 +1130,9 @@ class FederationHandler: if not in_room: raise AuthError(403, "Host not in room.") - events = await filter_events_for_server(self.storage, origin, [event]) + events = await filter_events_for_server( + self._storage_controllers, origin, [event] + ) event = events[0] return event else: @@ -1136,7 +1161,7 @@ class FederationHandler: ) missing_events = await filter_events_for_server( - self.storage, origin, missing_events + self._storage_controllers, origin, missing_events ) return missing_events @@ -1446,17 +1471,35 @@ class FederationHandler: # well. return None + async def _resume_sync_partial_state_room(self) -> None: + """Resumes resyncing of all partial-state rooms after a restart.""" + assert not self.config.worker.worker_app + + partial_state_rooms = await self.store.get_partial_state_rooms_and_servers() + for room_id, servers_in_room in partial_state_rooms.items(): + run_as_background_process( + desc="sync_partial_state_room", + func=self._sync_partial_state_room, + initial_destination=None, + other_destinations=servers_in_room, + room_id=room_id, + ) + async def _sync_partial_state_room( self, - destination: str, + initial_destination: Optional[str], + other_destinations: Collection[str], room_id: str, ) -> None: """Background process to resync the state of a partial-state room Args: - destination: homeserver to pull the state from + initial_destination: the initial homeserver to pull the state from + other_destinations: other homeservers to try to pull the state from, if + `initial_destination` is unavailable room_id: room to be resynced """ + assert not self.config.worker.worker_app # TODO(faster_joins): do we need to lock to avoid races? What happens if other # worker processes kick off a resync in parallel? Perhaps we should just elect @@ -1466,8 +1509,29 @@ class FederationHandler: # really leave, that might mean we have difficulty getting the room state over # federation. # - # TODO(faster_joins): try other destinations if the one we have fails + # TODO(faster_joins): we need some way of prioritising which homeservers in + # `other_destinations` to try first, otherwise we'll spend ages trying dead + # homeservers for large rooms. + if initial_destination is None and len(other_destinations) == 0: + raise ValueError( + f"Cannot resync state of {room_id}: no destinations provided" + ) + + # Make an infinite iterator of destinations to try. Once we find a working + # destination, we'll stick with it until it flakes. + if initial_destination is not None: + # Move `initial_destination` to the front of the list. + destinations = list(other_destinations) + if initial_destination in destinations: + destinations.remove(initial_destination) + destinations = [initial_destination] + destinations + destination_iter = itertools.cycle(destinations) + else: + destination_iter = itertools.cycle(other_destinations) + + # `destination` is the current remote homeserver we're pulling from. + destination = next(destination_iter) logger.info("Syncing state for room %s via %s", room_id, destination) # we work through the queue in order of increasing stream ordering. @@ -1478,9 +1542,11 @@ class FederationHandler: # clear the lazy-loading flag. logger.info("Updating current state for %s", room_id) assert ( - self.storage.persistence is not None + self._storage_controllers.persistence is not None ), "TODO(faster_joins): support for workers" - await self.storage.persistence.update_current_state(room_id) + await self._storage_controllers.persistence.update_current_state( + room_id + ) logger.info("Clearing partial-state flag for %s", room_id) success = await self.store.clear_partial_state_room(room_id) @@ -1503,6 +1569,41 @@ class FederationHandler: allow_rejected=True, ) for event in events: - await self._federation_event_handler.update_state_for_partial_state_event( - destination, event - ) + for attempt in itertools.count(): + try: + await self._federation_event_handler.update_state_for_partial_state_event( + destination, event + ) + break + except FederationError as e: + if attempt == len(destinations) - 1: + # We have tried every remote server for this event. Give up. + # TODO(faster_joins) giving up isn't the right thing to do + # if there's a temporary network outage. retrying + # indefinitely is also not the right thing to do if we can + # reach all homeservers and they all claim they don't have + # the state we want. + logger.error( + "Failed to get state for %s at %s from %s because %s, " + "giving up!", + room_id, + event, + destination, + e, + ) + raise + + # Try the next remote server. + logger.info( + "Failed to get state for %s at %s from %s because %s", + room_id, + event, + destination, + e, + ) + destination = next(destination_iter) + logger.info( + "Syncing state for room %s via %s instead", + room_id, + destination, + ) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 05c122f224..549b066dd9 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -98,8 +98,8 @@ class FederationEventHandler: def __init__(self, hs: "HomeServer"): self._store = hs.get_datastores().main - self._storage = hs.get_storage() - self._state_store = self._storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state self._state_handler = hs.get_state_handler() self._event_creation_handler = hs.get_event_creation_handler() @@ -274,7 +274,7 @@ class FederationEventHandler: affected=pdu.event_id, ) - await self._process_received_pdu(origin, pdu, state=None) + await self._process_received_pdu(origin, pdu, state_ids=None) async def on_send_membership_event( self, origin: str, event: EventBase @@ -463,7 +463,9 @@ class FederationEventHandler: with nested_logging_context(suffix=event.event_id): context = await self._state_handler.compute_event_context( event, - old_state=state, + state_ids_before_event={ + (e.type, e.state_key): e.event_id for e in state + }, partial_state=partial_state, ) @@ -477,7 +479,23 @@ class FederationEventHandler: # and discover that we do not have it. event.internal_metadata.proactively_send = False - return await self.persist_events_and_notify(room_id, [(event, context)]) + stream_id_after_persist = await self.persist_events_and_notify( + room_id, [(event, context)] + ) + + # If we're joining the room again, check if there is new marker + # state indicating that there is new history imported somewhere in + # the DAG. Multiple markers can exist in the current state with + # unique state_keys. + # + # Do this after the state from the remote join was persisted (via + # `persist_events_and_notify`). Otherwise we can run into a + # situation where the create event doesn't exist yet in the + # `current_state_events` + for e in state: + await self._handle_marker_event(origin, e) + + return stream_id_after_persist async def update_state_for_partial_state_event( self, destination: str, event: EventBase @@ -487,6 +505,9 @@ class FederationEventHandler: Args: destination: server to request full state from event: partial-state event to be de-partial-stated + + Raises: + FederationError if we fail to request state from the remote server. """ logger.info("Updating state for %s", event.event_id) with nested_logging_context(suffix=event.event_id): @@ -496,12 +517,12 @@ class FederationEventHandler: # # This is the same operation as we do when we receive a regular event # over federation. - state = await self._resolve_state_at_missing_prevs(destination, event) + state_ids = await self._resolve_state_at_missing_prevs(destination, event) # build a new state group for it if need be context = await self._state_handler.compute_event_context( event, - old_state=state, + state_ids_before_event=state_ids, ) if context.partial_state: # this can happen if some or all of the event's prev_events still have @@ -517,7 +538,9 @@ class FederationEventHandler: ) return await self._store.update_state_for_partial_state_event(event, context) - self._state_store.notify_event_un_partial_stated(event.event_id) + self._state_storage_controller.notify_event_un_partial_stated( + event.event_id + ) async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] @@ -751,11 +774,12 @@ class FederationEventHandler: return try: - state = await self._resolve_state_at_missing_prevs(origin, event) + state_ids = await self._resolve_state_at_missing_prevs(origin, event) # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does # not return partial state + await self._process_received_pdu( - origin, event, state=state, backfilled=backfilled + origin, event, state_ids=state_ids, backfilled=backfilled ) except FederationError as e: if e.code == 403: @@ -765,7 +789,7 @@ class FederationEventHandler: async def _resolve_state_at_missing_prevs( self, dest: str, event: EventBase - ) -> Optional[Iterable[EventBase]]: + ) -> Optional[StateMap[str]]: """Calculate the state at an event with missing prev_events. This is used when we have pulled a batch of events from a remote server, and @@ -792,8 +816,12 @@ class FederationEventHandler: event: an event to check for missing prevs. Returns: - if we already had all the prev events, `None`. Otherwise, returns a list of - the events in the state at `event`. + if we already had all the prev events, `None`. Otherwise, returns + the event ids of the state at `event`. + + Raises: + FederationError if we fail to get the state from the remote server after any + missing `prev_event`s. """ room_id = event.room_id event_id = event.event_id @@ -813,10 +841,12 @@ class FederationEventHandler: ) # Calculate the state after each of the previous events, and # resolve them to find the correct state at the current event. - event_map = {event_id: event} + try: # Get the state of the events we know about - ours = await self._state_store.get_state_groups_ids(room_id, seen) + ours = await self._state_storage_controller.get_state_groups_ids( + room_id, seen + ) # state_maps is a list of mappings from (type, state_key) to event_id state_maps: List[StateMap[str]] = list(ours.values()) @@ -833,40 +863,23 @@ class FederationEventHandler: # note that if any of the missing prevs share missing state or # auth events, the requests to fetch those events are deduped # by the get_pdu_cache in federation_client. - remote_state = await self._get_state_after_missing_prev_event( - dest, room_id, p + remote_state_map = ( + await self._get_state_ids_after_missing_prev_event( + dest, room_id, p + ) ) - remote_state_map = { - (x.type, x.state_key): x.event_id for x in remote_state - } state_maps.append(remote_state_map) - for x in remote_state: - event_map[x.event_id] = x - room_version = await self._store.get_room_version_id(room_id) state_map = await self._state_resolution_handler.resolve_events_with_store( room_id, room_version, state_maps, - event_map, + event_map={event_id: event}, state_res_store=StateResolutionStore(self._store), ) - # We need to give _process_received_pdu the actual state events - # rather than event ids, so generate that now. - - # First though we need to fetch all the events that are in - # state_map, so we can build up the state below. - evs = await self._store.get_events( - list(state_map.values()), - get_prev_content=False, - redact_behaviour=EventRedactBehaviour.as_is, - ) - event_map.update(evs) - - state = [event_map[e] for e in state_map.values()] except Exception: logger.warning( "Error attempting to resolve state at missing prev_events", @@ -878,14 +891,14 @@ class FederationEventHandler: "We can't get valid state history.", affected=event_id, ) - return state + return state_map - async def _get_state_after_missing_prev_event( + async def _get_state_ids_after_missing_prev_event( self, destination: str, room_id: str, event_id: str, - ) -> List[EventBase]: + ) -> StateMap[str]: """Requests all of the room state at a given event from a remote homeserver. Args: @@ -894,7 +907,11 @@ class FederationEventHandler: event_id: The id of the event we want the state at. Returns: - A list of events in the state, including the event itself + The event ids of the state *after* the given event. + + Raises: + InvalidResponseError: if the remote homeserver's response contains fields + of the wrong type. """ ( state_event_ids, @@ -909,19 +926,17 @@ class FederationEventHandler: len(auth_event_ids), ) - # start by just trying to fetch the events from the store + # Start by checking events we already have in the DB desired_events = set(state_event_ids) desired_events.add(event_id) logger.debug("Fetching %i events from cache/store", len(desired_events)) - fetched_events = await self._store.get_events( - desired_events, allow_rejected=True - ) + have_events = await self._store.have_seen_events(room_id, desired_events) - missing_desired_events = desired_events - fetched_events.keys() + missing_desired_events = desired_events - have_events logger.debug( "We are missing %i events (got %i)", len(missing_desired_events), - len(fetched_events), + len(have_events), ) # We probably won't need most of the auth events, so let's just check which @@ -932,7 +947,7 @@ class FederationEventHandler: # already have a bunch of the state events. It would be nice if the # federation api gave us a way of finding out which we actually need. - missing_auth_events = set(auth_event_ids) - fetched_events.keys() + missing_auth_events = set(auth_event_ids) - have_events missing_auth_events.difference_update( await self._store.have_seen_events(room_id, missing_auth_events) ) @@ -958,47 +973,51 @@ class FederationEventHandler: destination=destination, room_id=room_id, event_ids=missing_events ) - # we need to make sure we re-load from the database to get the rejected - # state correct. - fetched_events.update( - await self._store.get_events(missing_desired_events, allow_rejected=True) - ) + # We now need to fill out the state map, which involves fetching the + # type and state key for each event ID in the state. + state_map = {} - # check for events which were in the wrong room. - # - # this can happen if a remote server claims that the state or - # auth_events at an event in room A are actually events in room B + event_metadata = await self._store.get_metadata_for_events(state_event_ids) + for state_event_id, metadata in event_metadata.items(): + if metadata.room_id != room_id: + # This is a bogus situation, but since we may only discover it a long time + # after it happened, we try our best to carry on, by just omitting the + # bad events from the returned state set. + # + # This can happen if a remote server claims that the state or + # auth_events at an event in room A are actually events in room B + logger.warning( + "Remote server %s claims event %s in room %s is an auth/state " + "event in room %s", + destination, + state_event_id, + metadata.room_id, + room_id, + ) + continue - bad_events = [ - (event_id, event.room_id) - for event_id, event in fetched_events.items() - if event.room_id != room_id - ] + if metadata.state_key is None: + logger.warning( + "Remote server gave us non-state event in state: %s", state_event_id + ) + continue - for bad_event_id, bad_room_id in bad_events: - # This is a bogus situation, but since we may only discover it a long time - # after it happened, we try our best to carry on, by just omitting the - # bad events from the returned state set. - logger.warning( - "Remote server %s claims event %s in room %s is an auth/state " - "event in room %s", - destination, - bad_event_id, - bad_room_id, - room_id, - ) - - del fetched_events[bad_event_id] + state_map[(metadata.event_type, metadata.state_key)] = state_event_id # if we couldn't get the prev event in question, that's a problem. - remote_event = fetched_events.get(event_id) + remote_event = await self._store.get_event( + event_id, + allow_none=True, + allow_rejected=True, + redact_behaviour=EventRedactBehaviour.as_is, + ) if not remote_event: raise Exception("Unable to get missing prev_event %s" % (event_id,)) # missing state at that event is a warning, not a blocker # XXX: this doesn't sound right? it means that we'll end up with incomplete # state. - failed_to_fetch = desired_events - fetched_events.keys() + failed_to_fetch = desired_events - event_metadata.keys() if failed_to_fetch: logger.warning( "Failed to fetch missing state events for %s %s", @@ -1006,14 +1025,12 @@ class FederationEventHandler: failed_to_fetch, ) - remote_state = [ - fetched_events[e_id] for e_id in state_event_ids if e_id in fetched_events - ] - if remote_event.is_state() and remote_event.rejected_reason is None: - remote_state.append(remote_event) + state_map[ + (remote_event.type, remote_event.state_key) + ] = remote_event.event_id - return remote_state + return state_map async def _get_state_and_persist( self, destination: str, room_id: str, event_id: str @@ -1040,7 +1057,7 @@ class FederationEventHandler: self, origin: str, event: EventBase, - state: Optional[Iterable[EventBase]], + state_ids: Optional[StateMap[str]], backfilled: bool = False, ) -> None: """Called when we have a new non-outlier event. @@ -1062,7 +1079,7 @@ class FederationEventHandler: event: event to be persisted - state: Normally None, but if we are handling a gap in the graph + state_ids: Normally None, but if we are handling a gap in the graph (ie, we are missing one or more prev_events), the resolved state at the event @@ -1074,7 +1091,8 @@ class FederationEventHandler: try: context = await self._state_handler.compute_event_context( - event, old_state=state + event, + state_ids_before_event=state_ids, ) context = await self._check_event_auth( origin, @@ -1091,7 +1109,7 @@ class FederationEventHandler: # For new (non-backfilled and non-outlier) events we check if the event # passes auth based on the current state. If it doesn't then we # "soft-fail" the event. - await self._check_for_soft_fail(event, state, origin=origin) + await self._check_for_soft_fail(event, state_ids, origin=origin) await self._run_push_actions_and_persist_event(event, context, backfilled) @@ -1230,6 +1248,14 @@ class FederationEventHandler: # Nothing to retrieve then (invalid marker) return + already_seen_insertion_event = await self._store.have_seen_event( + marker_event.room_id, insertion_event_id + ) + if already_seen_insertion_event: + # No need to process a marker again if we have already seen the + # insertion event that it was pointing to + return + logger.debug( "_handle_marker_event: backfilling insertion event %s", insertion_event_id ) @@ -1425,7 +1451,7 @@ class FederationEventHandler: # we're not bothering about room state, so flag the event as an outlier. event.internal_metadata.outlier = True - context = EventContext.for_outlier(self._storage) + context = EventContext.for_outlier(self._storage_controllers) try: validate_event_for_room_version(room_version_obj, event) check_auth_rules_for_event(room_version_obj, event, auth) @@ -1565,7 +1591,7 @@ class FederationEventHandler: async def _check_for_soft_fail( self, event: EventBase, - state: Optional[Iterable[EventBase]], + state_ids: Optional[StateMap[str]], origin: str, ) -> None: """Checks if we should soft fail the event; if so, marks the event as @@ -1573,7 +1599,7 @@ class FederationEventHandler: Args: event - state: The state at the event if we don't have all the event's prev events + state_ids: The state at the event if we don't have all the event's prev events origin: The host the event originates from. """ extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id) @@ -1589,7 +1615,7 @@ class FederationEventHandler: room_version_obj = KNOWN_ROOM_VERSIONS[room_version] # Calculate the "current state". - if state is not None: + if state_ids is not None: # If we're explicitly given the state then we won't have all the # prev events, and so we have a gap in the graph. In this case # we want to be a little careful as we might have been down for @@ -1602,17 +1628,20 @@ class FederationEventHandler: # given state at the event. This should correctly handle cases # like bans, especially with state res v2. - state_sets_d = await self._state_store.get_state_groups( + state_sets_d = await self._state_storage_controller.get_state_groups_ids( event.room_id, extrem_ids ) - state_sets: List[Iterable[EventBase]] = list(state_sets_d.values()) - state_sets.append(state) - current_states = await self._state_handler.resolve_events( - room_version, state_sets, event + state_sets: List[StateMap[str]] = list(state_sets_d.values()) + state_sets.append(state_ids) + current_state_ids = ( + await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version, + state_sets, + event_map=None, + state_res_store=StateResolutionStore(self._store), + ) ) - current_state_ids: StateMap[str] = { - k: e.event_id for k, e in current_states.items() - } else: current_state_ids = await self._state_handler.get_current_state_ids( event.room_id, latest_event_ids=extrem_ids @@ -1871,7 +1900,7 @@ class FederationEventHandler: # create a new state group as a delta from the existing one. prev_group = context.state_group - state_group = await self._state_store.store_state_group( + state_group = await self._state_storage_controller.store_state_group( event.event_id, event.room_id, prev_group=prev_group, @@ -1880,7 +1909,7 @@ class FederationEventHandler: ) return EventContext.with_state( - storage=self._storage, + storage=self._storage_controllers, state_group=state_group, state_group_before_event=context.state_group_before_event, state_delta_due_to_event=state_updates, @@ -1970,11 +1999,14 @@ class FederationEventHandler: ) return result["max_stream_id"] else: - assert self._storage.persistence + assert self._storage_controllers.persistence # Note that this returns the events that were persisted, which may not be # the same as were passed in if some were deduplicated due to transaction IDs. - events, max_stream_token = await self._storage.persistence.persist_events( + ( + events, + max_stream_token, + ) = await self._storage_controllers.persistence.persist_events( event_and_contexts, backfilled=backfilled ) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py deleted file mode 100644 index e7a399787b..0000000000 --- a/synapse/handlers/groups_local.py +++ /dev/null @@ -1,503 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Iterable, List, Set - -from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError -from synapse.types import GroupID, JsonDict, get_domain_from_id - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -def _create_rerouter(func_name: str) -> Callable[..., Awaitable[JsonDict]]: - """Returns an async function that looks at the group id and calls the function - on federation or the local group server if the group is local - """ - - async def f( - self: "GroupsLocalWorkerHandler", group_id: str, *args: Any, **kwargs: Any - ) -> JsonDict: - if not GroupID.is_valid(group_id): - raise SynapseError(400, "%s is not a legal group ID" % (group_id,)) - - if self.is_mine_id(group_id): - return await getattr(self.groups_server_handler, func_name)( - group_id, *args, **kwargs - ) - else: - destination = get_domain_from_id(group_id) - - try: - return await getattr(self.transport_client, func_name)( - destination, group_id, *args, **kwargs - ) - except HttpResponseException as e: - # Capture errors returned by the remote homeserver and - # re-throw specific errors as SynapseErrors. This is so - # when the remote end responds with things like 403 Not - # In Group, we can communicate that to the client instead - # of a 500. - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - return f - - -class GroupsLocalWorkerHandler: - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.store = hs.get_datastores().main - self.room_list_handler = hs.get_room_list_handler() - self.groups_server_handler = hs.get_groups_server_handler() - self.transport_client = hs.get_federation_transport_client() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.keyring = hs.get_keyring() - self.is_mine_id = hs.is_mine_id - self.signing_key = hs.signing_key - self.server_name = hs.hostname - self.notifier = hs.get_notifier() - self.attestations = hs.get_groups_attestation_signing() - - self.profile_handler = hs.get_profile_handler() - - # The following functions merely route the query to the local groups server - # or federation depending on if the group is local or remote - - get_group_profile = _create_rerouter("get_group_profile") - get_rooms_in_group = _create_rerouter("get_rooms_in_group") - get_invited_users_in_group = _create_rerouter("get_invited_users_in_group") - get_group_category = _create_rerouter("get_group_category") - get_group_categories = _create_rerouter("get_group_categories") - get_group_role = _create_rerouter("get_group_role") - get_group_roles = _create_rerouter("get_group_roles") - - async def get_group_summary( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get the group summary for a group. - - If the group is remote we check that the users have valid attestations. - """ - if self.is_mine_id(group_id): - res = await self.groups_server_handler.get_group_summary( - group_id, requester_user_id - ) - else: - try: - res = await self.transport_client.get_group_summary( - get_domain_from_id(group_id), group_id, requester_user_id - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - group_server_name = get_domain_from_id(group_id) - - # Loop through the users and validate the attestations. - chunk = res["users_section"]["users"] - valid_users = [] - for entry in chunk: - g_user_id = entry["user_id"] - attestation = entry.pop("attestation", {}) - try: - if get_domain_from_id(g_user_id) != group_server_name: - await self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - server_name=get_domain_from_id(g_user_id), - ) - valid_users.append(entry) - except Exception as e: - logger.info("Failed to verify user is in group: %s", e) - - res["users_section"]["users"] = valid_users - - res["users_section"]["users"].sort(key=lambda e: e.get("order", 0)) - res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0)) - - # Add `is_publicised` flag to indicate whether the user has publicised their - # membership of the group on their profile - result = await self.store.get_publicised_groups_for_user(requester_user_id) - is_publicised = group_id in result - - res.setdefault("user", {})["is_publicised"] = is_publicised - - return res - - async def get_users_in_group( - self, group_id: str, requester_user_id: str - ) -> JsonDict: - """Get users in a group""" - if self.is_mine_id(group_id): - return await self.groups_server_handler.get_users_in_group( - group_id, requester_user_id - ) - - group_server_name = get_domain_from_id(group_id) - - try: - res = await self.transport_client.get_users_in_group( - get_domain_from_id(group_id), group_id, requester_user_id - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - chunk = res["chunk"] - valid_entries = [] - for entry in chunk: - g_user_id = entry["user_id"] - attestation = entry.pop("attestation", {}) - try: - if get_domain_from_id(g_user_id) != group_server_name: - await self.attestations.verify_attestation( - attestation, - group_id=group_id, - user_id=g_user_id, - server_name=get_domain_from_id(g_user_id), - ) - valid_entries.append(entry) - except Exception as e: - logger.info("Failed to verify user is in group: %s", e) - - res["chunk"] = valid_entries - - return res - - async def get_joined_groups(self, user_id: str) -> JsonDict: - group_ids = await self.store.get_joined_groups(user_id) - return {"groups": group_ids} - - async def get_publicised_groups_for_user(self, user_id: str) -> JsonDict: - if self.hs.is_mine_id(user_id): - result = await self.store.get_publicised_groups_for_user(user_id) - - # Check AS associated groups for this user - this depends on the - # RegExps in the AS registration file (under `users`) - for app_service in self.store.get_app_services(): - result.extend(app_service.get_groups_for_user(user_id)) - - return {"groups": result} - else: - try: - bulk_result = await self.transport_client.bulk_get_publicised_groups( - get_domain_from_id(user_id), [user_id] - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - result = bulk_result.get("users", {}).get(user_id) - # TODO: Verify attestations - return {"groups": result} - - async def bulk_get_publicised_groups( - self, user_ids: Iterable[str], proxy: bool = True - ) -> JsonDict: - destinations: Dict[str, Set[str]] = {} - local_users = set() - - for user_id in user_ids: - if self.hs.is_mine_id(user_id): - local_users.add(user_id) - else: - destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id) - - if not proxy and destinations: - raise SynapseError(400, "Some user_ids are not local") - - results = {} - failed_results: List[str] = [] - for destination, dest_user_ids in destinations.items(): - try: - r = await self.transport_client.bulk_get_publicised_groups( - destination, list(dest_user_ids) - ) - results.update(r["users"]) - except Exception: - failed_results.extend(dest_user_ids) - - for uid in local_users: - results[uid] = await self.store.get_publicised_groups_for_user(uid) - - # Check AS associated groups for this user - this depends on the - # RegExps in the AS registration file (under `users`) - for app_service in self.store.get_app_services(): - results[uid].extend(app_service.get_groups_for_user(uid)) - - return {"users": results} - - -class GroupsLocalHandler(GroupsLocalWorkerHandler): - def __init__(self, hs: "HomeServer"): - super().__init__(hs) - - # Ensure attestations get renewed - hs.get_groups_attestation_renewer() - - # The following functions merely route the query to the local groups server - # or federation depending on if the group is local or remote - - update_group_profile = _create_rerouter("update_group_profile") - - add_room_to_group = _create_rerouter("add_room_to_group") - update_room_in_group = _create_rerouter("update_room_in_group") - remove_room_from_group = _create_rerouter("remove_room_from_group") - - update_group_summary_room = _create_rerouter("update_group_summary_room") - delete_group_summary_room = _create_rerouter("delete_group_summary_room") - - update_group_category = _create_rerouter("update_group_category") - delete_group_category = _create_rerouter("delete_group_category") - - update_group_summary_user = _create_rerouter("update_group_summary_user") - delete_group_summary_user = _create_rerouter("delete_group_summary_user") - - update_group_role = _create_rerouter("update_group_role") - delete_group_role = _create_rerouter("delete_group_role") - - set_group_join_policy = _create_rerouter("set_group_join_policy") - - async def create_group( - self, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Create a group""" - - logger.info("Asking to create group with ID: %r", group_id) - - if self.is_mine_id(group_id): - res = await self.groups_server_handler.create_group( - group_id, user_id, content - ) - local_attestation = None - remote_attestation = None - else: - raise SynapseError(400, "Unable to create remote groups") - - is_publicised = content.get("publicise", False) - token = await self.store.register_user_group_membership( - group_id, - user_id, - membership="join", - is_admin=True, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - is_publicised=is_publicised, - ) - self.notifier.on_new_event("groups_key", token, users=[user_id]) - - return res - - async def join_group( - self, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Request to join a group""" - if self.is_mine_id(group_id): - await self.groups_server_handler.join_group(group_id, user_id, content) - local_attestation = None - remote_attestation = None - else: - local_attestation = self.attestations.create_attestation(group_id, user_id) - content["attestation"] = local_attestation - - try: - res = await self.transport_client.join_group( - get_domain_from_id(group_id), group_id, user_id, content - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - remote_attestation = res["attestation"] - - await self.attestations.verify_attestation( - remote_attestation, - group_id=group_id, - user_id=user_id, - server_name=get_domain_from_id(group_id), - ) - - # TODO: Check that the group is public and we're being added publicly - is_publicised = content.get("publicise", False) - - token = await self.store.register_user_group_membership( - group_id, - user_id, - membership="join", - is_admin=False, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - is_publicised=is_publicised, - ) - self.notifier.on_new_event("groups_key", token, users=[user_id]) - - return {} - - async def accept_invite( - self, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """Accept an invite to a group""" - if self.is_mine_id(group_id): - await self.groups_server_handler.accept_invite(group_id, user_id, content) - local_attestation = None - remote_attestation = None - else: - local_attestation = self.attestations.create_attestation(group_id, user_id) - content["attestation"] = local_attestation - - try: - res = await self.transport_client.accept_group_invite( - get_domain_from_id(group_id), group_id, user_id, content - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - remote_attestation = res["attestation"] - - await self.attestations.verify_attestation( - remote_attestation, - group_id=group_id, - user_id=user_id, - server_name=get_domain_from_id(group_id), - ) - - # TODO: Check that the group is public and we're being added publicly - is_publicised = content.get("publicise", False) - - token = await self.store.register_user_group_membership( - group_id, - user_id, - membership="join", - is_admin=False, - local_attestation=local_attestation, - remote_attestation=remote_attestation, - is_publicised=is_publicised, - ) - self.notifier.on_new_event("groups_key", token, users=[user_id]) - - return {} - - async def invite( - self, group_id: str, user_id: str, requester_user_id: str, config: JsonDict - ) -> JsonDict: - """Invite a user to a group""" - content = {"requester_user_id": requester_user_id, "config": config} - if self.is_mine_id(group_id): - res = await self.groups_server_handler.invite_to_group( - group_id, user_id, requester_user_id, content - ) - else: - try: - res = await self.transport_client.invite_to_group( - get_domain_from_id(group_id), - group_id, - user_id, - requester_user_id, - content, - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - return res - - async def on_invite( - self, group_id: str, user_id: str, content: JsonDict - ) -> JsonDict: - """One of our users were invited to a group""" - # TODO: Support auto join and rejection - - if not self.is_mine_id(user_id): - raise SynapseError(400, "User not on this server") - - local_profile = {} - if "profile" in content: - if "name" in content["profile"]: - local_profile["name"] = content["profile"]["name"] - if "avatar_url" in content["profile"]: - local_profile["avatar_url"] = content["profile"]["avatar_url"] - - token = await self.store.register_user_group_membership( - group_id, - user_id, - membership="invite", - content={"profile": local_profile, "inviter": content["inviter"]}, - ) - self.notifier.on_new_event("groups_key", token, users=[user_id]) - try: - user_profile = await self.profile_handler.get_profile(user_id) - except Exception as e: - logger.warning("No profile for user %s: %s", user_id, e) - user_profile = {} - - return {"state": "invite", "user_profile": user_profile} - - async def remove_user_from_group( - self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict - ) -> JsonDict: - """Remove a user from a group""" - if user_id == requester_user_id: - token = await self.store.register_user_group_membership( - group_id, user_id, membership="leave" - ) - self.notifier.on_new_event("groups_key", token, users=[user_id]) - - # TODO: Should probably remember that we tried to leave so that we can - # retry if the group server is currently down. - - if self.is_mine_id(group_id): - res = await self.groups_server_handler.remove_user_from_group( - group_id, user_id, requester_user_id, content - ) - else: - content["requester_user_id"] = requester_user_id - try: - res = await self.transport_client.remove_user_from_group( - get_domain_from_id(group_id), - group_id, - requester_user_id, - user_id, - content, - ) - except HttpResponseException as e: - raise e.to_synapse_error() - except RequestSendFailed: - raise SynapseError(502, "Failed to contact group server") - - return res - - async def user_removed_from_group( - self, group_id: str, user_id: str, content: JsonDict - ) -> None: - """One of our users was removed/kicked from a group""" - # TODO: Check if user in group - token = await self.store.register_user_group_membership( - group_id, user_id, membership="leave" - ) - self.notifier.on_new_event("groups_key", token, users=[user_id]) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index d79248ad90..d2b489e816 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -67,8 +67,8 @@ class InitialSyncHandler: ] ] = ResponseCache(hs.get_clock(), "initial_sync_cache") self._event_serializer = hs.get_event_client_serializer() - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state async def snapshot_all_rooms( self, @@ -198,7 +198,8 @@ class InitialSyncHandler: event.stream_ordering, ) deferred_room_state = run_in_background( - self.state_store.get_state_for_events, [event.event_id] + self._state_storage_controller.get_state_for_events, + [event.event_id], ).addCallback( lambda states: cast(StateMap[EventBase], states[event.event_id]) ) @@ -218,7 +219,7 @@ class InitialSyncHandler: ).addErrback(unwrapFirstError) messages = await filter_events_for_client( - self.storage, user_id, messages + self._storage_controllers, user_id, messages ) start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token) @@ -274,7 +275,7 @@ class InitialSyncHandler: "rooms": rooms_ret, "presence": [ { - "type": "m.presence", + "type": EduTypes.PRESENCE, "content": format_user_presence_state(event, now), } for event in presence @@ -355,7 +356,9 @@ class InitialSyncHandler: member_event_id: str, is_peeking: bool, ) -> JsonDict: - room_state = await self.state_store.get_state_for_event(member_event_id) + room_state = await self._state_storage_controller.get_state_for_event( + member_event_id + ) limit = pagin_config.limit if pagin_config else None if limit is None: @@ -369,7 +372,7 @@ class InitialSyncHandler: ) messages = await filter_events_for_client( - self.storage, user_id, messages, is_peeking=is_peeking + self._storage_controllers, user_id, messages, is_peeking=is_peeking ) start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token) @@ -439,7 +442,7 @@ class InitialSyncHandler: return [ { - "type": EduTypes.Presence, + "type": EduTypes.PRESENCE, "content": format_user_presence_state(s, time_now), } for s in states @@ -474,7 +477,7 @@ class InitialSyncHandler: ) messages = await filter_events_for_client( - self.storage, user_id, messages, is_peeking=is_peeking + self._storage_controllers, user_id, messages, is_peeking=is_peeking ) start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index cb1bc4c06f..cf7c2d1979 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json from twisted.internet.interfaces import IDelayedCall -import synapse from synapse import event_auth from synapse.api.constants import ( EventContentFields, @@ -55,7 +54,14 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter -from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester +from synapse.types import ( + MutableStateMap, + Requester, + RoomAlias, + StreamToken, + UserID, + create_requester, +) from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError from synapse.util.async_helpers import Linearizer, gather_results from synapse.util.caches.expiringcache import ExpiringCache @@ -77,8 +83,8 @@ class MessageHandler: self.clock = hs.get_clock() self.state = hs.get_state_handler() self.store = hs.get_datastores().main - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state self._event_serializer = hs.get_event_client_serializer() self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages @@ -125,7 +131,7 @@ class MessageHandler: assert ( membership_event_id is not None ), "check_user_in_room_or_world_readable returned invalid data" - room_state = await self.state_store.get_state_for_events( + room_state = await self._state_storage_controller.get_state_for_events( [membership_event_id], StateFilter.from_types([key]) ) data = room_state[membership_event_id].get(key) @@ -186,7 +192,7 @@ class MessageHandler: # check whether the user is in the room at that time to determine # whether they should be treated as peeking. - state_map = await self.state_store.get_state_for_event( + state_map = await self._state_storage_controller.get_state_for_event( last_event.event_id, StateFilter.from_types([(EventTypes.Member, user_id)]), ) @@ -199,7 +205,7 @@ class MessageHandler: is_peeking = not joined visible_events = await filter_events_for_client( - self.storage, + self._storage_controllers, user_id, [last_event], filter_send_to_client=False, @@ -207,8 +213,10 @@ class MessageHandler: ) if visible_events: - room_state_events = await self.state_store.get_state_for_events( - [last_event.event_id], state_filter=state_filter + room_state_events = ( + await self._state_storage_controller.get_state_for_events( + [last_event.event_id], state_filter=state_filter + ) ) room_state: Mapping[Any, EventBase] = room_state_events[ last_event.event_id @@ -237,8 +245,10 @@ class MessageHandler: assert ( membership_event_id is not None ), "check_user_in_room_or_world_readable returned invalid data" - room_state_events = await self.state_store.get_state_for_events( - [membership_event_id], state_filter=state_filter + room_state_events = ( + await self._state_storage_controller.get_state_for_events( + [membership_event_id], state_filter=state_filter + ) ) room_state = room_state_events[membership_event_id] @@ -395,7 +405,7 @@ class EventCreationHandler: self.auth = hs.get_auth() self._event_auth_handler = hs.get_event_auth_handler() self.store = hs.get_datastores().main - self.storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() self.state = hs.get_state_handler() self.clock = hs.get_clock() self.validator = EventValidator() @@ -886,10 +896,38 @@ class EventCreationHandler: event.sender, ) - spam_check = await self.spam_checker.check_event_for_spam(event) - if spam_check is not synapse.spam_checker_api.Allow.ALLOW: + spam_check_result = await self.spam_checker.check_event_for_spam(event) + if spam_check_result != self.spam_checker.NOT_SPAM: + if isinstance(spam_check_result, tuple): + try: + [code, dict] = spam_check_result + raise SynapseError( + 403, + "This message had been rejected as probable spam", + code, + dict, + ) + except ValueError: + logger.error( + "Spam-check module returned invalid error value. Expecting [code, dict], got %s", + spam_check_result, + ) + spam_check_result = Codes.FORBIDDEN + + if isinstance(spam_check_result, Codes): + raise SynapseError( + 403, + "This message has been rejected as probable spam", + spam_check_result, + ) + + # Backwards compatibility: if the return value is not an error code, it + # means the module returned an error message to be included in the + # SynapseError (which is now deprecated). raise SynapseError( - 403, "This message had been rejected as probable spam", spam_check + 403, + spam_check_result, + Codes.FORBIDDEN, ) ev = await self.handle_new_client_event( @@ -1010,7 +1048,7 @@ class EventCreationHandler: # after it is created if builder.internal_metadata.outlier: event.internal_metadata.outlier = True - context = EventContext.for_outlier(self.storage) + context = EventContext.for_outlier(self._storage_controllers) elif ( event.type == EventTypes.MSC2716_INSERTION and state_event_ids @@ -1022,8 +1060,35 @@ class EventCreationHandler: # # TODO(faster_joins): figure out how this works, and make sure that the # old state is complete. - old_state = await self.store.get_events_as_list(state_event_ids) - context = await self.state.compute_event_context(event, old_state=old_state) + metadata = await self.store.get_metadata_for_events(state_event_ids) + + state_map_for_event: MutableStateMap[str] = {} + for state_id in state_event_ids: + data = metadata.get(state_id) + if data is None: + # We're trying to persist a new historical batch of events + # with the given state, e.g. via + # `RoomBatchSendEventRestServlet`. The state can be inferred + # by Synapse or set directly by the client. + # + # Either way, we should have persisted all the state before + # getting here. + raise Exception( + f"State event {state_id} not found in DB," + " Synapse should have persisted it before using it." + ) + + if data.state_key is None: + raise Exception( + f"Trying to set non-state event {state_id} as state" + ) + + state_map_for_event[(data.event_type, data.state_key)] = state_id + + context = await self.state.compute_event_context( + event, + state_ids_before_event=state_map_for_event, + ) else: context = await self.state.compute_event_context(event) @@ -1396,7 +1461,7 @@ class EventCreationHandler: """ extra_users = extra_users or [] - assert self.storage.persistence is not None + assert self._storage_controllers.persistence is not None assert self._events_shard_config.should_handle( self._instance_name, event.room_id ) @@ -1630,7 +1695,7 @@ class EventCreationHandler: event, event_pos, max_stream_token, - ) = await self.storage.persistence.persist_event( + ) = await self._storage_controllers.persistence.persist_event( event, context=context, backfilled=backfilled ) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 19a4407050..6262a35822 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -129,8 +129,8 @@ class PaginationHandler: self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state self.clock = hs.get_clock() self._server_name = hs.hostname self._room_shutdown_handler = hs.get_room_shutdown_handler() @@ -352,7 +352,7 @@ class PaginationHandler: self._purges_in_progress_by_room.add(room_id) try: async with self.pagination_lock.write(room_id): - await self.storage.purge_events.purge_history( + await self._storage_controllers.purge_events.purge_history( room_id, token, delete_local_events ) logger.info("[purge] complete") @@ -414,7 +414,7 @@ class PaginationHandler: if joined: raise SynapseError(400, "Users are still joined to this room") - await self.storage.purge_events.purge_room(room_id) + await self._storage_controllers.purge_events.purge_room(room_id) async def get_messages( self, @@ -515,14 +515,28 @@ class PaginationHandler: next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key) - if events: - if event_filter: - events = await event_filter.filter(events) + # if no events are returned from pagination, that implies + # we have reached the end of the available events. + # In that case we do not return end, to tell the client + # there is no need for further queries. + if not events: + return { + "chunk": [], + "start": await from_token.to_string(self.store), + } - events = await filter_events_for_client( - self.storage, user_id, events, is_peeking=(member_event_id is None) - ) + if event_filter: + events = await event_filter.filter(events) + events = await filter_events_for_client( + self._storage_controllers, + user_id, + events, + is_peeking=(member_event_id is None), + ) + + # if after the filter applied there are no more events + # return immediately - but there might be more in next_token batch if not events: return { "chunk": [], @@ -539,7 +553,7 @@ class PaginationHandler: (EventTypes.Member, event.sender) for event in events ) - state_ids = await self.state_store.get_state_ids_for_event( + state_ids = await self._state_storage_controller.get_state_ids_for_event( events[0].event_id, state_filter=state_filter ) @@ -653,7 +667,7 @@ class PaginationHandler: 400, "Users are still joined to this room" ) - await self.storage.purge_events.purge_room(room_id) + await self._storage_controllers.purge_events.purge_room(room_id) logger.info("complete") self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index dd84e6c88b..bf112b9e1e 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -49,7 +49,7 @@ from prometheus_client import Counter from typing_extensions import ContextManager import synapse.metrics -from synapse.api.constants import EventTypes, Membership, PresenceState +from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState from synapse.appservice import ApplicationService @@ -394,7 +394,7 @@ class WorkerPresenceHandler(BasePresenceHandler): # Route presence EDUs to the right worker hs.get_federation_registry().register_instances_for_edu( - "m.presence", + EduTypes.PRESENCE, hs.config.worker.writers.presence, ) @@ -649,7 +649,9 @@ class PresenceHandler(BasePresenceHandler): federation_registry = hs.get_federation_registry() - federation_registry.register_edu_handler("m.presence", self.incoming_presence) + federation_registry.register_edu_handler( + EduTypes.PRESENCE, self.incoming_presence + ) LaterGauge( "synapse_handlers_presence_user_to_current_state_size", diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index e6a35f1d09..43d2882b0a 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -14,7 +14,7 @@ import logging from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple -from synapse.api.constants import ReceiptTypes +from synapse.api.constants import EduTypes, ReceiptTypes from synapse.appservice import ApplicationService from synapse.streams import EventSource from synapse.types import ( @@ -52,11 +52,11 @@ class ReceiptsHandler: # to the appropriate worker. if hs.get_instance_name() in hs.config.worker.writers.receipts: hs.get_federation_registry().register_edu_handler( - "m.receipt", self._received_remote_receipt + EduTypes.RECEIPT, self._received_remote_receipt ) else: hs.get_federation_registry().register_instances_for_edu( - "m.receipt", + EduTypes.RECEIPT, hs.config.worker.writers.receipts, ) diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index ab7e54857d..9a1cc11bb3 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -69,7 +69,7 @@ class BundledAggregations: class RelationsHandler: def __init__(self, hs: "HomeServer"): self._main_store = hs.get_datastores().main - self._storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() self._auth = hs.get_auth() self._clock = hs.get_clock() self._event_handler = hs.get_event_handler() @@ -143,7 +143,10 @@ class RelationsHandler: ) events = await filter_events_for_client( - self._storage, user_id, events, is_peeking=(member_event_id is None) + self._storage_controllers, + user_id, + events, + is_peeking=(member_event_id is None), ) now = self._clock.time_msec() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 92e1de0500..5c91d33f58 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1192,8 +1192,8 @@ class RoomContextHandler: self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state self._relations_handler = hs.get_relations_handler() async def get_event_context( @@ -1236,7 +1236,10 @@ class RoomContextHandler: if use_admin_priviledge: return events return await filter_events_for_client( - self.storage, user.to_string(), events, is_peeking=is_peeking + self._storage_controllers, + user.to_string(), + events, + is_peeking=is_peeking, ) event = await self.store.get_event( @@ -1293,7 +1296,7 @@ class RoomContextHandler: # first? Shouldn't we be consistent with /sync? # https://github.com/matrix-org/matrix-doc/issues/687 - state = await self.state_store.get_state_for_events( + state = await self._state_storage_controller.get_state_for_events( [last_event_id], state_filter=state_filter ) diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py index fbfd748406..1414e575d6 100644 --- a/synapse/handlers/room_batch.py +++ b/synapse/handlers/room_batch.py @@ -17,7 +17,7 @@ class RoomBatchHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.store = hs.get_datastores().main - self.state_store = hs.get_storage().state + self._state_storage_controller = hs.get_storage_controllers().state self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -141,7 +141,7 @@ class RoomBatchHandler: ) = await self.store.get_max_depth_of(event_ids) # mapping from (type, state_key) -> state_event_id assert most_recent_event_id is not None - prev_state_map = await self.state_store.get_state_ids_for_event( + prev_state_map = await self._state_storage_controller.get_state_ids_for_event( most_recent_event_id ) # List of state event ID's diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ea876c168d..00662dc961 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1081,17 +1081,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # Transfer alias mappings in the room directory await self.store.update_aliases_for_room(old_room_id, room_id) - # Check if any groups we own contain the predecessor room - local_group_ids = await self.store.get_local_groups_for_room(old_room_id) - for group_id in local_group_ids: - # Add new the new room to those groups - await self.store.add_room_to_group( - group_id, room_id, old_room is not None and old_room["is_public"] - ) - - # Remove the old room from those groups - await self.store.remove_room_from_group(group_id, old_room_id) - async def copy_user_state_on_room_upgrade( self, old_room_id: str, new_room_id: str, user_ids: Iterable[str] ) -> None: diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index af83de3193..75aee6a111 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -662,7 +662,8 @@ class RoomSummaryHandler: # The API doesn't return the room version so assume that a # join rule of knock is valid. if ( - room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK) + room.get("join_rule") + in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED) or room.get("world_readable") is True ): return True @@ -713,9 +714,6 @@ class RoomSummaryHandler: "canonical_alias": stats["canonical_alias"], "num_joined_members": stats["joined_members"], "avatar_url": stats["avatar"], - # plural join_rules is a documentation error but kept for historical - # purposes. Should match /publicRooms. - "join_rules": stats["join_rules"], "join_rule": stats["join_rules"], "world_readable": ( stats["history_visibility"] == HistoryVisibility.WORLD_READABLE diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index cd1c47dae8..659f99f7e2 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -55,8 +55,8 @@ class SearchHandler: self.hs = hs self._event_serializer = hs.get_event_client_serializer() self._relations_handler = hs.get_relations_handler() - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state self.auth = hs.get_auth() async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]: @@ -460,7 +460,7 @@ class SearchHandler: filtered_events = await search_filter.filter([r["event"] for r in results]) events = await filter_events_for_client( - self.storage, user.to_string(), filtered_events + self._storage_controllers, user.to_string(), filtered_events ) events.sort(key=lambda e: -rank_map[e.event_id]) @@ -559,7 +559,7 @@ class SearchHandler: filtered_events = await search_filter.filter([r["event"] for r in results]) events = await filter_events_for_client( - self.storage, user.to_string(), filtered_events + self._storage_controllers, user.to_string(), filtered_events ) room_events.extend(events) @@ -644,11 +644,11 @@ class SearchHandler: ) events_before = await filter_events_for_client( - self.storage, user.to_string(), res.events_before + self._storage_controllers, user.to_string(), res.events_before ) events_after = await filter_events_for_client( - self.storage, user.to_string(), res.events_after + self._storage_controllers, user.to_string(), res.events_after ) context: JsonDict = { @@ -677,7 +677,7 @@ class SearchHandler: [(EventTypes.Member, sender) for sender in senders] ) - state = await self.state_store.get_state_for_event( + state = await self._state_storage_controller.get_state_for_event( last_event_id, state_filter ) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 59b5d497be..b5859dcb28 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -166,16 +166,6 @@ class KnockedSyncResult: return True -@attr.s(slots=True, frozen=True, auto_attribs=True) -class GroupsSyncResult: - join: JsonDict - invite: JsonDict - leave: JsonDict - - def __bool__(self) -> bool: - return bool(self.join or self.invite or self.leave) - - @attr.s(slots=True, auto_attribs=True) class _RoomChanges: """The set of room entries to include in the sync, plus the set of joined @@ -206,7 +196,6 @@ class SyncResult: for this device device_unused_fallback_key_types: List of key types that have an unused fallback key - groups: Group updates, if any """ next_batch: StreamToken @@ -220,7 +209,6 @@ class SyncResult: device_lists: DeviceListUpdates device_one_time_keys_count: JsonDict device_unused_fallback_key_types: List[str] - groups: Optional[GroupsSyncResult] def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -236,7 +224,6 @@ class SyncResult: or self.account_data or self.to_device or self.device_lists - or self.groups ) @@ -251,8 +238,8 @@ class SyncHandler: self.clock = hs.get_clock() self.state = hs.get_state_handler() self.auth = hs.get_auth() - self.storage = hs.get_storage() - self.state_store = self.storage.state + self._storage_controllers = hs.get_storage_controllers() + self._state_storage_controller = self._storage_controllers.state # TODO: flush cache entries on subsequent sync request. # Once we get the next /sync request (ie, one with the same access token @@ -525,7 +512,7 @@ class SyncHandler: current_state_ids = frozenset(current_state_ids_map.values()) recents = await filter_events_for_client( - self.storage, + self._storage_controllers, sync_config.user.to_string(), recents, always_include_ids=current_state_ids, @@ -593,7 +580,7 @@ class SyncHandler: current_state_ids = frozenset(current_state_ids_map.values()) loaded_recents = await filter_events_for_client( - self.storage, + self._storage_controllers, sync_config.user.to_string(), loaded_recents, always_include_ids=current_state_ids, @@ -643,7 +630,7 @@ class SyncHandler: event: event of interest state_filter: The state filter used to fetch state from the database. """ - state_ids = await self.state_store.get_state_ids_for_event( + state_ids = await self._state_storage_controller.get_state_ids_for_event( event.event_id, state_filter=state_filter or StateFilter.all() ) if event.is_state(): @@ -723,7 +710,7 @@ class SyncHandler: return None last_event = last_events[-1] - state_ids = await self.state_store.get_state_ids_for_event( + state_ids = await self._state_storage_controller.get_state_ids_for_event( last_event.event_id, state_filter=StateFilter.from_types( [(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")] @@ -901,12 +888,16 @@ class SyncHandler: if full_state: if batch: - current_state_ids = await self.state_store.get_state_ids_for_event( - batch.events[-1].event_id, state_filter=state_filter + current_state_ids = ( + await self._state_storage_controller.get_state_ids_for_event( + batch.events[-1].event_id, state_filter=state_filter + ) ) - state_ids = await self.state_store.get_state_ids_for_event( - batch.events[0].event_id, state_filter=state_filter + state_ids = ( + await self._state_storage_controller.get_state_ids_for_event( + batch.events[0].event_id, state_filter=state_filter + ) ) else: @@ -926,7 +917,7 @@ class SyncHandler: elif batch.limited: if batch: state_at_timeline_start = ( - await self.state_store.get_state_ids_for_event( + await self._state_storage_controller.get_state_ids_for_event( batch.events[0].event_id, state_filter=state_filter ) ) @@ -960,8 +951,10 @@ class SyncHandler: ) if batch: - current_state_ids = await self.state_store.get_state_ids_for_event( - batch.events[-1].event_id, state_filter=state_filter + current_state_ids = ( + await self._state_storage_controller.get_state_ids_for_event( + batch.events[-1].event_id, state_filter=state_filter + ) ) else: # Its not clear how we get here, but empirically we do @@ -991,7 +984,7 @@ class SyncHandler: # So we fish out all the member events corresponding to the # timeline here, and then dedupe any redundant ones below. - state_ids = await self.state_store.get_state_ids_for_event( + state_ids = await self._state_storage_controller.get_state_ids_for_event( batch.events[0].event_id, # we only want members! state_filter=StateFilter.from_types( @@ -1157,10 +1150,6 @@ class SyncHandler: await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) ) - if self.hs_config.experimental.groups_enabled: - logger.debug("Fetching group data") - await self._generate_sync_entry_for_groups(sync_result_builder) - num_events = 0 # debug for https://github.com/matrix-org/synapse/issues/9424 @@ -1184,57 +1173,11 @@ class SyncHandler: archived=sync_result_builder.archived, to_device=sync_result_builder.to_device, device_lists=device_lists, - groups=sync_result_builder.groups, device_one_time_keys_count=one_time_key_counts, device_unused_fallback_key_types=unused_fallback_key_types, next_batch=sync_result_builder.now_token, ) - @measure_func("_generate_sync_entry_for_groups") - async def _generate_sync_entry_for_groups( - self, sync_result_builder: "SyncResultBuilder" - ) -> None: - user_id = sync_result_builder.sync_config.user.to_string() - since_token = sync_result_builder.since_token - now_token = sync_result_builder.now_token - - if since_token and since_token.groups_key: - results = await self.store.get_groups_changes_for_user( - user_id, since_token.groups_key, now_token.groups_key - ) - else: - results = await self.store.get_all_groups_for_user( - user_id, now_token.groups_key - ) - - invited = {} - joined = {} - left = {} - for result in results: - membership = result["membership"] - group_id = result["group_id"] - gtype = result["type"] - content = result["content"] - - if membership == "join": - if gtype == "membership": - # TODO: Add profile - content.pop("membership", None) - joined[group_id] = content["content"] - else: - joined.setdefault(group_id, {})[gtype] = content - elif membership == "invite": - if gtype == "membership": - content.pop("membership", None) - invited[group_id] = content["content"] - else: - if gtype == "membership": - left[group_id] = content["content"] - - sync_result_builder.groups = GroupsSyncResult( - join=joined, invite=invited, leave=left - ) - @measure_func("_generate_sync_entry_for_device_list") async def _generate_sync_entry_for_device_list( self, @@ -2333,7 +2276,6 @@ class SyncResultBuilder: invited knocked archived - groups to_device """ @@ -2349,7 +2291,6 @@ class SyncResultBuilder: invited: List[InvitedSyncResult] = attr.Factory(list) knocked: List[KnockedSyncResult] = attr.Factory(list) archived: List[ArchivedSyncResult] = attr.Factory(list) - groups: Optional[GroupsSyncResult] = None to_device: List[JsonDict] = attr.Factory(list) def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]: diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index bb00750bfd..0aeab86bbb 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple import attr +from synapse.api.constants import EduTypes from synapse.api.errors import AuthError, ShadowBanError, SynapseError from synapse.appservice import ApplicationService from synapse.metrics.background_process_metrics import ( @@ -68,7 +69,7 @@ class FollowerTypingHandler: if hs.get_instance_name() not in hs.config.worker.writers.typing: hs.get_federation_registry().register_instances_for_edu( - "m.typing", + EduTypes.TYPING, hs.config.worker.writers.typing, ) @@ -143,7 +144,7 @@ class FollowerTypingHandler: logger.debug("sending typing update to %s", domain) self.federation.build_and_send_edu( destination=domain, - edu_type="m.typing", + edu_type=EduTypes.TYPING, content={ "room_id": member.room_id, "user_id": member.user_id, @@ -218,7 +219,9 @@ class TypingWriterHandler(FollowerTypingHandler): self.hs = hs - hs.get_federation_registry().register_edu_handler("m.typing", self._recv_edu) + hs.get_federation_registry().register_edu_handler( + EduTypes.TYPING, self._recv_edu + ) hs.get_distributor().observe("user_left_room", self.user_left_room) @@ -458,7 +461,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]): def _make_event_for(self, room_id: str) -> JsonDict: typing = self.get_typing_handler()._room_typing[room_id] return { - "type": "m.typing", + "type": EduTypes.TYPING, "room_id": room_id, "content": {"user_ids": list(typing)}, } diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 0b9475debd..776ed43f03 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -92,9 +92,6 @@ incoming_responses_counter = Counter( "synapse_http_matrixfederationclient_responses", "", ["method", "code"] ) -# a federation response can be rather large (eg a big state_ids is 50M or so), so we -# need a generous limit here. -MAX_RESPONSE_SIZE = 100 * 1024 * 1024 MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 @@ -116,6 +113,11 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC): the content type doesn't match we fail the request. """ + # a federation response can be rather large (eg a big state_ids is 50M or so), so we + # need a generous limit here. + MAX_RESPONSE_SIZE: int = 100 * 1024 * 1024 + """The largest response this parser will accept.""" + @abc.abstractmethod def finish(self) -> T: """Called when response has finished streaming and the parser should @@ -203,7 +205,6 @@ async def _handle_response( response: IResponse, start_ms: int, parser: ByteParser[T], - max_response_size: Optional[int] = None, ) -> T: """ Reads the body of a response with a timeout and sends it to a parser @@ -215,16 +216,14 @@ async def _handle_response( response: response to the request start_ms: Timestamp when request was made parser: The parser for the response - max_response_size: The maximum size to read from the response, if None - uses the default. Returns: The parsed response """ - if max_response_size is None: - max_response_size = MAX_RESPONSE_SIZE + max_response_size = parser.MAX_RESPONSE_SIZE + finished = False try: check_content_type_is(response.headers, parser.CONTENT_TYPE) @@ -233,6 +232,7 @@ async def _handle_response( length = await make_deferred_yieldable(d) + finished = True value = parser.finish() except BodyExceededMaxSize as e: # The response was too big. @@ -240,7 +240,7 @@ async def _handle_response( "{%s} [%s] JSON response exceeded max size %i - %s %s", request.txn_id, request.destination, - MAX_RESPONSE_SIZE, + max_response_size, request.method, request.uri.decode("ascii"), ) @@ -283,6 +283,15 @@ async def _handle_response( e, ) raise + finally: + if not finished: + # There was an exception and we didn't `finish()` the parse. + # Let the parser know that it can free up any resources. + try: + parser.finish() + except Exception: + # Ignore any additional exceptions. + pass time_taken_secs = reactor.seconds() - start_ms / 1000 @@ -772,7 +781,6 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, - max_response_size: Optional[int] = None, ) -> Union[JsonDict, list]: ... @@ -790,7 +798,6 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser[T]] = None, - max_response_size: Optional[int] = None, ) -> T: ... @@ -807,7 +814,6 @@ class MatrixFederationHttpClient: backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser] = None, - max_response_size: Optional[int] = None, ): """Sends the specified json data using PUT @@ -843,8 +849,6 @@ class MatrixFederationHttpClient: enabled. parser: The parser to use to decode the response. Defaults to parsing as JSON. - max_response_size: The maximum size to read from the response, if None - uses the default. Returns: Succeeds when we get a 2xx HTTP response. The @@ -895,7 +899,6 @@ class MatrixFederationHttpClient: response, start_ms, parser=parser, - max_response_size=max_response_size, ) return body @@ -984,7 +987,6 @@ class MatrixFederationHttpClient: ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, - max_response_size: Optional[int] = None, ) -> Union[JsonDict, list]: ... @@ -999,7 +1001,6 @@ class MatrixFederationHttpClient: ignore_backoff: bool = ..., try_trailing_slash_on_400: bool = ..., parser: ByteParser[T] = ..., - max_response_size: Optional[int] = ..., ) -> T: ... @@ -1013,7 +1014,6 @@ class MatrixFederationHttpClient: ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser] = None, - max_response_size: Optional[int] = None, ): """GETs some json from the given host homeserver and path @@ -1043,9 +1043,6 @@ class MatrixFederationHttpClient: parser: The parser to use to decode the response. Defaults to parsing as JSON. - max_response_size: The maximum size to read from the response. If None, - uses the default. - Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. @@ -1090,7 +1087,6 @@ class MatrixFederationHttpClient: response, start_ms, parser=parser, - max_response_size=max_response_size, ) return body diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index a02b5bf6bd..903ec40c86 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -168,9 +168,24 @@ import inspect import logging import re from functools import wraps -from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Pattern, Type +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Collection, + Dict, + Generator, + Iterable, + List, + Optional, + Pattern, + Type, + TypeVar, + Union, +) import attr +from typing_extensions import ParamSpec from twisted.internet import defer from twisted.web.http import Request @@ -256,7 +271,7 @@ try: def set_process(self, *args, **kwargs): return self._reporter.set_process(*args, **kwargs) - def report_span(self, span): + def report_span(self, span: "opentracing.Span") -> None: try: return self._reporter.report_span(span) except Exception: @@ -307,15 +322,19 @@ _homeserver_whitelist: Optional[Pattern[str]] = None Sentinel = object() -def only_if_tracing(func): +P = ParamSpec("P") +R = TypeVar("R") + + +def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: """Executes the function only if we're tracing. Otherwise returns None.""" @wraps(func) - def _only_if_tracing_inner(*args, **kwargs): + def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: if opentracing: return func(*args, **kwargs) else: - return + return None return _only_if_tracing_inner @@ -356,17 +375,10 @@ def ensure_active_span(message, ret=None): return ensure_active_span_inner_1 -@contextlib.contextmanager -def noop_context_manager(*args, **kwargs): - """Does exactly what it says on the tin""" - # TODO: replace with contextlib.nullcontext once we drop support for Python 3.6 - yield - - # Setup -def init_tracer(hs: "HomeServer"): +def init_tracer(hs: "HomeServer") -> None: """Set the whitelists and initialise the JaegerClient tracer""" global opentracing if not hs.config.tracing.opentracer_enabled: @@ -408,11 +420,11 @@ def init_tracer(hs: "HomeServer"): @only_if_tracing -def set_homeserver_whitelist(homeserver_whitelist): +def set_homeserver_whitelist(homeserver_whitelist: Iterable[str]) -> None: """Sets the homeserver whitelist Args: - homeserver_whitelist (Iterable[str]): regex of whitelisted homeservers + homeserver_whitelist: regexes specifying whitelisted homeservers """ global _homeserver_whitelist if homeserver_whitelist: @@ -423,15 +435,15 @@ def set_homeserver_whitelist(homeserver_whitelist): @only_if_tracing -def whitelisted_homeserver(destination): +def whitelisted_homeserver(destination: str) -> bool: """Checks if a destination matches the whitelist Args: - destination (str) + destination """ if _homeserver_whitelist: - return _homeserver_whitelist.match(destination) + return _homeserver_whitelist.match(destination) is not None return False @@ -457,11 +469,11 @@ def start_active_span( Args: See opentracing.tracer Returns: - scope (Scope) or noop_context_manager + scope (Scope) or contextlib.nullcontext """ if opentracing is None: - return noop_context_manager() # type: ignore[unreachable] + return contextlib.nullcontext() # type: ignore[unreachable] if tracer is None: # use the global tracer by default @@ -505,7 +517,7 @@ def start_active_span_follows_from( tracer: override the opentracing tracer. By default the global tracer is used. """ if opentracing is None: - return noop_context_manager() # type: ignore[unreachable] + return contextlib.nullcontext() # type: ignore[unreachable] references = [opentracing.follows_from(context) for context in contexts] scope = start_active_span( @@ -525,19 +537,19 @@ def start_active_span_follows_from( def start_active_span_from_edu( - edu_content, - operation_name, - references: Optional[list] = None, - tags=None, - start_time=None, - ignore_active_span=False, - finish_on_close=True, -): + edu_content: Dict[str, Any], + operation_name: str, + references: Optional[List["opentracing.Reference"]] = None, + tags: Optional[Dict] = None, + start_time: Optional[float] = None, + ignore_active_span: bool = False, + finish_on_close: bool = True, +) -> "opentracing.Scope": """ Extracts a span context from an edu and uses it to start a new active span Args: - edu_content (dict): and edu_content with a `context` field whose value is + edu_content: an edu_content with a `context` field whose value is canonical json for a dict which contains opentracing information. For the other args see opentracing.tracer @@ -545,7 +557,7 @@ def start_active_span_from_edu( references = references or [] if opentracing is None: - return noop_context_manager() # type: ignore[unreachable] + return contextlib.nullcontext() # type: ignore[unreachable] carrier = json_decoder.decode(edu_content.get("context", "{}")).get( "opentracing", {} @@ -578,27 +590,27 @@ def start_active_span_from_edu( # Opentracing setters for tags, logs, etc @only_if_tracing -def active_span(): +def active_span() -> Optional["opentracing.Span"]: """Get the currently active span, if any""" return opentracing.tracer.active_span @ensure_active_span("set a tag") -def set_tag(key, value): +def set_tag(key: str, value: Union[str, bool, int, float]) -> None: """Sets a tag on the active span""" assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.set_tag(key, value) @ensure_active_span("log") -def log_kv(key_values, timestamp=None): +def log_kv(key_values: Dict[str, Any], timestamp: Optional[float] = None) -> None: """Log to the active span""" assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.log_kv(key_values, timestamp) @ensure_active_span("set the traces operation name") -def set_operation_name(operation_name): +def set_operation_name(operation_name: str) -> None: """Sets the operation name of the active span""" assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.set_operation_name(operation_name) @@ -624,7 +636,9 @@ def force_tracing(span=Sentinel) -> None: span.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1") -def is_context_forced_tracing(span_context) -> bool: +def is_context_forced_tracing( + span_context: Optional["opentracing.SpanContext"], +) -> bool: """Check if sampling has been force for the given span context.""" if span_context is None: return False @@ -696,13 +710,13 @@ def inject_response_headers(response_headers: Headers) -> None: @ensure_active_span("get the active span context as a dict", ret={}) -def get_active_span_text_map(destination=None): +def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]: """ Gets a span context as a dict. This can be used instead of manually injecting a span into an empty carrier. Args: - destination (str): the name of the remote server. + destination: the name of the remote server. Returns: dict: the active span's context if opentracing is enabled, otherwise empty. @@ -721,7 +735,7 @@ def get_active_span_text_map(destination=None): @ensure_active_span("get the span context as a string.", ret={}) -def active_span_context_as_string(): +def active_span_context_as_string() -> str: """ Returns: The active span context encoded as a string. @@ -750,21 +764,21 @@ def span_context_from_request(request: Request) -> "Optional[opentracing.SpanCon @only_if_tracing -def span_context_from_string(carrier): +def span_context_from_string(carrier: str) -> Optional["opentracing.SpanContext"]: """ Returns: The active span context decoded from a string. """ - carrier = json_decoder.decode(carrier) - return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + payload: Dict[str, str] = json_decoder.decode(carrier) + return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, payload) @only_if_tracing -def extract_text_map(carrier): +def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanContext"]: """ Wrapper method for opentracing's tracer.extract for TEXT_MAP. Args: - carrier (dict): a dict possibly containing a span context. + carrier: a dict possibly containing a span context. Returns: The active span context extracted from carrier. @@ -843,7 +857,7 @@ def trace(func=None, opname=None): return decorator -def tag_args(func): +def tag_args(func: Callable[P, R]) -> Callable[P, R]: """ Tags all of the args to the active span. """ @@ -852,11 +866,11 @@ def tag_args(func): return func @wraps(func) - def _tag_args_inner(*args, **kwargs): + def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R: argspec = inspect.getfullargspec(func) for i, arg in enumerate(argspec.args[1:]): - set_tag("ARG_" + arg, args[i]) - set_tag("args", args[len(argspec.args) :]) + set_tag("ARG_" + arg, args[i]) # type: ignore[index] + set_tag("args", args[len(argspec.args) :]) # type: ignore[index] set_tag("kwargs", kwargs) return func(*args, **kwargs) @@ -864,7 +878,9 @@ def tag_args(func): @contextlib.contextmanager -def trace_servlet(request: "SynapseRequest", extract_context: bool = False): +def trace_servlet( + request: "SynapseRequest", extract_context: bool = False +) -> Generator[None, None, None]: """Returns a context manager which traces a request. It starts a span with some servlet specific tags such as the request metrics name and request information. diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 298809742a..eef3462e10 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -14,6 +14,7 @@ import logging import threading +from contextlib import nullcontext from functools import wraps from types import TracebackType from typing import ( @@ -41,11 +42,7 @@ from synapse.logging.context import ( LoggingContext, PreserveLoggingContext, ) -from synapse.logging.opentracing import ( - SynapseTags, - noop_context_manager, - start_active_span, -) +from synapse.logging.opentracing import SynapseTags, start_active_span from synapse.metrics._types import Collector if TYPE_CHECKING: @@ -238,7 +235,7 @@ def run_as_background_process( f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)} ) else: - ctx = noop_context_manager() + ctx = nullcontext() with ctx: return await func(*args, **kwargs) except Exception: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 95f3b27927..b7451fc870 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -35,7 +35,6 @@ from typing_extensions import ParamSpec from twisted.internet import defer from twisted.web.resource import Resource -from synapse import spam_checker_api from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.events.presence_router import ( @@ -55,6 +54,7 @@ from synapse.events.spamcheck import ( USER_MAY_JOIN_ROOM_CALLBACK, USER_MAY_PUBLISH_ROOM_CALLBACK, USER_MAY_SEND_3PID_INVITE_CALLBACK, + SpamChecker, ) from synapse.events.third_party_rules import ( CHECK_CAN_DEACTIVATE_USER_CALLBACK, @@ -140,9 +140,7 @@ are loaded into Synapse. """ PRESENCE_ALL_USERS = PresenceRouter.ALL_USERS - -ALLOW = spam_checker_api.Allow.ALLOW -# Singleton value used to mark a message as permitted. +NOT_SPAM = SpamChecker.NOT_SPAM __all__ = [ "errors", @@ -151,7 +149,7 @@ __all__ = [ "respond_with_html", "run_in_background", "cached", - "Allow", + "NOT_SPAM", "UserID", "DatabasePool", "LoggingTransaction", @@ -1149,7 +1147,10 @@ class ModuleApi: ) async def sleep(self, seconds: float) -> None: - """Sleeps for the given number of seconds.""" + """Sleeps for the given number of seconds. + + Added in Synapse v1.49.0. + """ await self._clock.sleep(seconds) @@ -1429,6 +1430,28 @@ class ModuleApi: user_id, spec, {"actions": actions} ) + async def get_monthly_active_users_by_service( + self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None + ) -> List[Tuple[str, str]]: + """Generates list of monthly active users and their services. + Please see corresponding storage docstring for more details. + + Added in Synapse v1.61.0. + + Arguments: + start_timestamp: If specified, only include users that were first active + at or after this point + end_timestamp: If specified, only include users that were first active + at or before this point + + Returns: + A list of tuples (appservice_id, user_id) + + """ + return await self._store.get_monthly_active_users_by_service( + start_timestamp, end_timestamp + ) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/notifier.py b/synapse/notifier.py index ba23257f54..1100434b3f 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -33,7 +33,7 @@ from prometheus_client import Counter from twisted.internet import defer -from synapse.api.constants import EventTypes, HistoryVisibility, Membership +from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state @@ -221,7 +221,7 @@ class Notifier: self.room_to_user_streams: Dict[str, Set[_NotifierUserStream]] = {} self.hs = hs - self.storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() self.event_sources = hs.get_event_sources() self.store = hs.get_datastores().main self.pending_new_room_events: List[_PendingRoomEventEntry] = [] @@ -623,7 +623,7 @@ class Notifier: if name == "room": new_events = await filter_events_for_client( - self.storage, + self._storage_controllers, user.to_string(), new_events, is_peeking=is_peeking, @@ -632,7 +632,7 @@ class Notifier: now = self.clock.time_msec() new_events[:] = [ { - "type": "m.presence", + "type": EduTypes.PRESENCE, "content": format_user_presence_state(event, now), } for event in new_events diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index a17b35a605..819bc9e9b6 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -139,6 +139,7 @@ BASE_APPEND_CONTENT_RULES: List[Dict[str, Any]] = [ { "kind": "event_match", "key": "content.body", + # Match the localpart of the requester's MXID. "pattern_type": "user_localpart", } ], @@ -191,6 +192,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "pattern": "invite", "_cache_key": "_invite_member", }, + # Match the requester's MXID. {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, ], "actions": [ @@ -290,7 +292,7 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "_cache_key": "_room_server_acl", } ], - "actions": ["dont_notify"], + "actions": [], }, ] @@ -350,6 +352,18 @@ BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ {"set_tweak": "highlight", "value": False}, ], }, + { + "rule_id": "global/underride/.org.matrix.msc3772.thread_reply", + "conditions": [ + { + "kind": "org.matrix.msc3772.relation_match", + "rel_type": "m.thread", + # Match the requester's MXID. + "sender_type": "user_id", + } + ], + "actions": ["notify", {"set_tweak": "highlight", "value": False}], + }, { "rule_id": "global/underride/.m.rule.message", "conditions": [ diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 4cc8a2ecca..7791b289e2 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -13,8 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union import attr from prometheus_client import Counter @@ -121,6 +122,9 @@ class BulkPushRuleEvaluator: resizable=False, ) + # Whether to support MSC3772 is supported. + self._relations_match_enabled = self.hs.config.experimental.msc3772_enabled + async def _get_rules_for_event( self, event: EventBase, context: EventContext ) -> Dict[str, List[Dict[str, Any]]]: @@ -149,12 +153,10 @@ class BulkPushRuleEvaluator: if event.type == "m.room.member" and event.content["membership"] == "invite": invited = event.state_key if invited and self.hs.is_mine_id(invited): - has_pusher = await self.store.user_has_pusher(invited) - if has_pusher: - rules_by_user = dict(rules_by_user) - rules_by_user[invited] = await self.store.get_push_rules_for_user( - invited - ) + rules_by_user = dict(rules_by_user) + rules_by_user[invited] = await self.store.get_push_rules_for_user( + invited + ) return rules_by_user @@ -192,6 +194,60 @@ class BulkPushRuleEvaluator: return pl_event.content if pl_event else {}, sender_level + async def _get_mutual_relations( + self, event: EventBase, rules: Iterable[Dict[str, Any]] + ) -> Dict[str, Set[Tuple[str, str]]]: + """ + Fetch event metadata for events which related to the same event as the given event. + + If the given event has no relation information, returns an empty dictionary. + + Args: + event_id: The event ID which is targeted by relations. + rules: The push rules which will be processed for this event. + + Returns: + A dictionary of relation type to: + A set of tuples of: + The sender + The event type + """ + + # If the experimental feature is not enabled, skip fetching relations. + if not self._relations_match_enabled: + return {} + + # If the event does not have a relation, then cannot have any mutual + # relations. + relation = relation_from_event(event) + if not relation: + return {} + + # Pre-filter to figure out which relation types are interesting. + rel_types = set() + for rule in rules: + # Skip disabled rules. + if "enabled" in rule and not rule["enabled"]: + continue + + for condition in rule["conditions"]: + if condition["kind"] != "org.matrix.msc3772.relation_match": + continue + + # rel_type is required. + rel_type = condition.get("rel_type") + if rel_type: + rel_types.add(rel_type) + + # If no valid rules were found, no mutual relations. + if not rel_types: + return {} + + # If any valid rules were found, fetch the mutual relations. + return await self.store.get_mutual_event_relations( + relation.parent_id, rel_types + ) + @measure_func("action_for_event_by_user") async def action_for_event_by_user( self, event: EventBase, context: EventContext @@ -216,8 +272,17 @@ class BulkPushRuleEvaluator: sender_power_level, ) = await self._get_power_levels_and_sender_level(event, context) + relations = await self._get_mutual_relations( + event, itertools.chain(*rules_by_user.values()) + ) + evaluator = PushRuleEvaluatorForEvent( - event, len(room_members), sender_power_level, power_levels + event, + len(room_members), + sender_power_level, + power_levels, + relations, + self._relations_match_enabled, ) # If the event is not a state event check if any users ignore the sender. diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 63b22d50ae..5117ef6854 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -48,6 +48,10 @@ def format_push_rules_for_user( elif pattern_type == "user_localpart": c["pattern"] = user.localpart + sender_type = c.pop("sender_type", None) + if sender_type == "user_id": + c["sender"] = user.to_string() + rulearray = rules["global"][template_name] template_rule = _rule_to_template(r) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index d5603596c0..e96fb45e9f 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -65,7 +65,7 @@ class HttpPusher(Pusher): def __init__(self, hs: "HomeServer", pusher_config: PusherConfig): super().__init__(hs, pusher_config) - self.storage = self.hs.get_storage() + self._storage_controllers = self.hs.get_storage_controllers() self.app_display_name = pusher_config.app_display_name self.device_display_name = pusher_config.device_display_name self.pushkey_ts = pusher_config.ts @@ -343,7 +343,9 @@ class HttpPusher(Pusher): } return d - ctx = await push_tools.get_context_for_event(self.storage, event, self.user_id) + ctx = await push_tools.get_context_for_event( + self._storage_controllers, event, self.user_id + ) d = { "notification": { diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 5ccdd88364..63aefd07f5 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -114,10 +114,10 @@ class Mailer: self.send_email_handler = hs.get_send_email_handler() self.store = self.hs.get_datastores().main - self.state_store = self.hs.get_storage().state + self._state_storage_controller = self.hs.get_storage_controllers().state self.macaroon_gen = self.hs.get_macaroon_generator() self.state_handler = self.hs.get_state_handler() - self.storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() self.app_name = app_name self.email_subjects: EmailSubjectConfig = hs.config.email.email_subjects @@ -456,7 +456,7 @@ class Mailer: } the_events = await filter_events_for_client( - self.storage, user_id, results.events_before + self._storage_controllers, user_id, results.events_before ) the_events.append(notif_event) @@ -494,7 +494,7 @@ class Mailer: ) else: # Attempt to check the historical state for the room. - historical_state = await self.state_store.get_state_for_event( + historical_state = await self._state_storage_controller.get_state_for_event( event.event_id, StateFilter.from_types((type_state_key,)) ) sender_state_event = historical_state.get(type_state_key) @@ -767,8 +767,10 @@ class Mailer: member_event_ids.append(sender_state_event_id) else: # Attempt to check the historical state for the room. - historical_state = await self.state_store.get_state_for_event( - event_id, StateFilter.from_types((type_state_key,)) + historical_state = ( + await self._state_storage_controller.get_state_for_event( + event_id, StateFilter.from_types((type_state_key,)) + ) ) sender_state_event = historical_state.get(type_state_key) if sender_state_event: diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 54db6b5612..2e8a017add 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -15,7 +15,7 @@ import logging import re -from typing import Any, Dict, List, Mapping, Optional, Pattern, Tuple, Union +from typing import Any, Dict, List, Mapping, Optional, Pattern, Set, Tuple, Union from matrix_common.regex import glob_to_regex, to_word_pattern @@ -120,11 +120,15 @@ class PushRuleEvaluatorForEvent: room_member_count: int, sender_power_level: int, power_levels: Dict[str, Union[int, Dict[str, int]]], + relations: Dict[str, Set[Tuple[str, str]]], + relations_match_enabled: bool, ): self._event = event self._room_member_count = room_member_count self._sender_power_level = sender_power_level self._power_levels = power_levels + self._relations = relations + self._relations_match_enabled = relations_match_enabled # Maps strings of e.g. 'content.body' -> event["content"]["body"] self._value_cache = _flatten_dict(event) @@ -188,7 +192,16 @@ class PushRuleEvaluatorForEvent: return _sender_notification_permission( self._event, condition, self._sender_power_level, self._power_levels ) + elif ( + condition["kind"] == "org.matrix.msc3772.relation_match" + and self._relations_match_enabled + ): + return self._relation_match(condition, user_id) else: + # XXX This looks incorrect -- we have reached an unknown condition + # kind and are unconditionally returning that it matches. Note + # that it seems possible to provide a condition to the /pushrules + # endpoint with an unknown kind, see _rule_tuple_from_request_object. return True def _event_match(self, condition: dict, user_id: str) -> bool: @@ -256,6 +269,41 @@ class PushRuleEvaluatorForEvent: return bool(r.search(body)) + def _relation_match(self, condition: dict, user_id: str) -> bool: + """ + Check an "relation_match" push rule condition. + + Args: + condition: The "event_match" push rule condition to match. + user_id: The user's MXID. + + Returns: + True if the condition matches the event, False otherwise. + """ + rel_type = condition.get("rel_type") + if not rel_type: + logger.warning("relation_match condition missing rel_type") + return False + + sender_pattern = condition.get("sender") + if sender_pattern is None: + sender_type = condition.get("sender_type") + if sender_type == "user_id": + sender_pattern = user_id + type_pattern = condition.get("type") + + # If any other relations matches, return True. + for sender, event_type in self._relations.get(rel_type, ()): + if sender_pattern and not _glob_matches(sender_pattern, sender): + continue + if type_pattern and not _glob_matches(type_pattern, event_type): + continue + # All values must have matched. + return True + + # No relations matched. + return False + # Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache( diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index a1bf5b20dd..8397229ccb 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -16,7 +16,7 @@ from typing import Dict from synapse.api.constants import ReceiptTypes from synapse.events import EventBase from synapse.push.presentable_names import calculate_room_name, name_from_member_event -from synapse.storage import Storage +from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore @@ -52,7 +52,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - async def get_context_for_event( - storage: Storage, ev: EventBase, user_id: str + storage: StorageControllers, ev: EventBase, user_id: str ) -> Dict[str, str]: ctx = {} diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 3e7300b4a1..eed29cd597 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -69,7 +69,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): super().__init__(hs) self.store = hs.get_datastores().main - self.storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() self.clock = hs.get_clock() self.federation_event_handler = hs.get_federation_event_handler() @@ -133,7 +133,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event.internal_metadata.outlier = event_payload["outlier"] context = EventContext.deserialize( - self.storage, event_payload["context"] + self._storage_controllers, event_payload["context"] ) event_and_contexts.append((event, context)) diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index ce78176836..c2b2588ea5 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -70,7 +70,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main - self.storage = hs.get_storage() + self._storage_controllers = hs.get_storage_controllers() self.clock = hs.get_clock() @staticmethod @@ -127,7 +127,9 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): event.internal_metadata.outlier = content["outlier"] requester = Requester.deserialize(self.store, content["requester"]) - context = EventContext.deserialize(self.storage, content["context"]) + context = EventContext.deserialize( + self._storage_controllers, content["context"] + ) ratelimit = content["ratelimit"] extra_users = [UserID.from_string(u) for u in content["extra_users"]] diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py deleted file mode 100644 index d6f37d7479..0000000000 --- a/synapse/replication/slave/storage/groups.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING, Any, Iterable - -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker -from synapse.replication.tcp.streams import GroupServerStream -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.group_server import GroupServerWorkerStore -from synapse.util.caches.stream_change_cache import StreamChangeCache - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - - self.hs = hs - - self._group_updates_id_gen = SlavedIdTracker( - db_conn, "local_group_updates", "stream_id" - ) - self._group_updates_stream_cache = StreamChangeCache( - "_group_updates_stream_cache", - self._group_updates_id_gen.get_current_token(), - ) - - def get_group_stream_token(self) -> int: - return self._group_updates_id_gen.get_current_token() - - def process_replication_rows( - self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] - ) -> None: - if stream_name == GroupServerStream.NAME: - self._group_updates_id_gen.advance(instance_name, token) - for row in rows: - self._group_updates_stream_cache.entity_has_changed(row.user_id, token) - - return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index a52e25c1af..2f59245058 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -30,7 +30,6 @@ from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, - GroupServerStream, PushersStream, PushRulesStream, ReceiptsStream, @@ -185,10 +184,6 @@ class ReplicationDataHandler: self.notifier.on_new_event( StreamKeyType.DEVICE_LIST, token, rooms=all_room_ids ) - elif stream_name == GroupServerStream.NAME: - self.notifier.on_new_event( - "groups_key", token, users=[row.user_id for row in rows] - ) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index f41eabd85e..b1cd55bf6f 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -29,7 +29,6 @@ from synapse.replication.tcp.streams._base import ( BackfillStream, CachesStream, DeviceListsStream, - GroupServerStream, PresenceFederationStream, PresenceStream, PushersStream, @@ -61,7 +60,6 @@ STREAMS_MAP = { FederationStream, TagAccountDataStream, AccountDataStream, - GroupServerStream, UserSignatureStream, ) } @@ -81,6 +79,5 @@ __all__ = [ "ToDeviceStream", "TagAccountDataStream", "AccountDataStream", - "GroupServerStream", "UserSignatureStream", ] diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 495f2f0285..398bebeaa6 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -585,26 +585,6 @@ class AccountDataStream(Stream): return updates, to_token, limited -class GroupServerStream(Stream): - @attr.s(slots=True, frozen=True, auto_attribs=True) - class GroupsStreamRow: - group_id: str - user_id: str - type: str - content: JsonDict - - NAME = "groups" - ROW_TYPE = GroupsStreamRow - - def __init__(self, hs: "HomeServer"): - store = hs.get_datastores().main - super().__init__( - hs.get_instance_name(), - current_token_without_instance(store.get_group_stream_token), - store.get_all_groups_changes, - ) - - class UserSignatureStream(Stream): """A user has signed their own device with their user-signing key""" diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 57c4773edc..b712215112 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -26,7 +26,6 @@ from synapse.rest.client import ( directory, events, filter, - groups, initial_sync, keys, knock, @@ -118,8 +117,6 @@ class ClientRestResource(JsonResource): thirdparty.register_servlets(hs, client_resource) sendtodevice.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource) - if hs.config.experimental.groups_enabled: - groups.register_servlets(hs, client_resource) room_upgrade_rest_servlet.register_servlets(hs, client_resource) room_batch.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index cb4d55c89d..1aa08f8d95 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -47,7 +47,6 @@ from synapse.rest.admin.federation import ( DestinationRestServlet, ListDestinationsRestServlet, ) -from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.registration_tokens import ( ListRegistrationTokensRestServlet, @@ -293,8 +292,6 @@ def register_servlets_for_client_rest_resource( ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) UserRegisterServlet(hs).register(http_server) - if hs.config.experimental.groups_enabled: - DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) # Load the media repo ones if we're using them. Otherwise load the servlets which diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py deleted file mode 100644 index cd697e180e..0000000000 --- a/synapse/rest/admin/groups.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple - -from synapse.api.errors import SynapseError -from synapse.http.servlet import RestServlet -from synapse.http.site import SynapseRequest -from synapse.rest.admin._base import admin_patterns, assert_user_is_admin -from synapse.types import JsonDict - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class DeleteGroupAdminRestServlet(RestServlet): - """Allows deleting of local groups""" - - PATTERNS = admin_patterns("/delete_group/(?P[^/]*)$") - - def __init__(self, hs: "HomeServer"): - self.group_server = hs.get_groups_server_handler() - self.is_mine_id = hs.is_mine_id - self.auth = hs.get_auth() - - async def on_POST( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - if not self.is_mine_id(group_id): - raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local groups") - - await self.group_server.delete_group(group_id, requester.user.to_string()) - return HTTPStatus.OK, {} diff --git a/synapse/rest/client/groups.py b/synapse/rest/client/groups.py deleted file mode 100644 index 7e1149c7f4..0000000000 --- a/synapse/rest/client/groups.py +++ /dev/null @@ -1,962 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from functools import wraps -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple - -from twisted.web.server import Request - -from synapse.api.constants import ( - MAX_GROUP_CATEGORYID_LENGTH, - MAX_GROUP_ROLEID_LENGTH, - MAX_GROUPID_LENGTH, -) -from synapse.api.errors import Codes, SynapseError -from synapse.handlers.groups_local import GroupsLocalHandler -from synapse.http.server import HttpServer -from synapse.http.servlet import ( - RestServlet, - assert_params_in_dict, - parse_json_object_from_request, -) -from synapse.http.site import SynapseRequest -from synapse.types import GroupID, JsonDict - -from ._base import client_patterns - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -def _validate_group_id( - f: Callable[..., Awaitable[Tuple[int, JsonDict]]] -) -> Callable[..., Awaitable[Tuple[int, JsonDict]]]: - """Wrapper to validate the form of the group ID. - - Can be applied to any on_FOO methods that accepts a group ID as a URL parameter. - """ - - @wraps(f) - def wrapper( - self: RestServlet, request: Request, group_id: str, *args: Any, **kwargs: Any - ) -> Awaitable[Tuple[int, JsonDict]]: - if not GroupID.is_valid(group_id): - raise SynapseError(400, "%s is not a legal group ID" % (group_id,)) - - return f(self, request, group_id, *args, **kwargs) - - return wrapper - - -class GroupServlet(RestServlet): - """Get the group profile""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/profile$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - group_description = await self.groups_handler.get_group_profile( - group_id, requester_user_id - ) - - return 200, group_description - - @_validate_group_id - async def on_POST( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert_params_in_dict( - content, ("name", "avatar_url", "short_description", "long_description") - ) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot create group profiles." - await self.groups_handler.update_group_profile( - group_id, requester_user_id, content - ) - - return 200, {} - - -class GroupSummaryServlet(RestServlet): - """Get the full group summary""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/summary$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - get_group_summary = await self.groups_handler.get_group_summary( - group_id, requester_user_id - ) - - return 200, get_group_summary - - -class GroupSummaryRoomsCatServlet(RestServlet): - """Update/delete a rooms entry in the summary. - - Matches both: - - /groups/:group/summary/rooms/:room_id - - /groups/:group/summary/categories/:category/rooms/:room_id - """ - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/summary" - "(/categories/(?P[^/]+))?" - "/rooms/(?P[^/]*)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, - request: SynapseRequest, - group_id: str, - category_id: Optional[str], - room_id: str, - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - if category_id == "": - raise SynapseError(400, "category_id cannot be empty", Codes.INVALID_PARAM) - - if category_id and len(category_id) > MAX_GROUP_CATEGORYID_LENGTH: - raise SynapseError( - 400, - "category_id may not be longer than %s characters" - % (MAX_GROUP_CATEGORYID_LENGTH,), - Codes.INVALID_PARAM, - ) - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group summaries." - resp = await self.groups_handler.update_group_summary_room( - group_id, - requester_user_id, - room_id=room_id, - category_id=category_id, - content=content, - ) - - return 200, resp - - @_validate_group_id - async def on_DELETE( - self, request: SynapseRequest, group_id: str, category_id: str, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group profiles." - resp = await self.groups_handler.delete_group_summary_room( - group_id, requester_user_id, room_id=room_id, category_id=category_id - ) - - return 200, resp - - -class GroupCategoryServlet(RestServlet): - """Get/add/update/delete a group category""" - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/categories/(?P[^/]+)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str, category_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - category = await self.groups_handler.get_group_category( - group_id, requester_user_id, category_id=category_id - ) - - return 200, category - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str, category_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - if not category_id: - raise SynapseError(400, "category_id cannot be empty", Codes.INVALID_PARAM) - - if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH: - raise SynapseError( - 400, - "category_id may not be longer than %s characters" - % (MAX_GROUP_CATEGORYID_LENGTH,), - Codes.INVALID_PARAM, - ) - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group categories." - resp = await self.groups_handler.update_group_category( - group_id, requester_user_id, category_id=category_id, content=content - ) - - return 200, resp - - @_validate_group_id - async def on_DELETE( - self, request: SynapseRequest, group_id: str, category_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group categories." - resp = await self.groups_handler.delete_group_category( - group_id, requester_user_id, category_id=category_id - ) - - return 200, resp - - -class GroupCategoriesServlet(RestServlet): - """Get all group categories""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/categories/$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - category = await self.groups_handler.get_group_categories( - group_id, requester_user_id - ) - - return 200, category - - -class GroupRoleServlet(RestServlet): - """Get/add/update/delete a group role""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/roles/(?P[^/]+)$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str, role_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - category = await self.groups_handler.get_group_role( - group_id, requester_user_id, role_id=role_id - ) - - return 200, category - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str, role_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - if not role_id: - raise SynapseError(400, "role_id cannot be empty", Codes.INVALID_PARAM) - - if len(role_id) > MAX_GROUP_ROLEID_LENGTH: - raise SynapseError( - 400, - "role_id may not be longer than %s characters" - % (MAX_GROUP_ROLEID_LENGTH,), - Codes.INVALID_PARAM, - ) - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group roles." - resp = await self.groups_handler.update_group_role( - group_id, requester_user_id, role_id=role_id, content=content - ) - - return 200, resp - - @_validate_group_id - async def on_DELETE( - self, request: SynapseRequest, group_id: str, role_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group roles." - resp = await self.groups_handler.delete_group_role( - group_id, requester_user_id, role_id=role_id - ) - - return 200, resp - - -class GroupRolesServlet(RestServlet): - """Get all group roles""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/roles/$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - category = await self.groups_handler.get_group_roles( - group_id, requester_user_id - ) - - return 200, category - - -class GroupSummaryUsersRoleServlet(RestServlet): - """Update/delete a user's entry in the summary. - - Matches both: - - /groups/:group/summary/users/:room_id - - /groups/:group/summary/roles/:role/users/:user_id - """ - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/summary" - "(/roles/(?P[^/]+))?" - "/users/(?P[^/]*)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, - request: SynapseRequest, - group_id: str, - role_id: Optional[str], - user_id: str, - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - if role_id == "": - raise SynapseError(400, "role_id cannot be empty", Codes.INVALID_PARAM) - - if role_id and len(role_id) > MAX_GROUP_ROLEID_LENGTH: - raise SynapseError( - 400, - "role_id may not be longer than %s characters" - % (MAX_GROUP_ROLEID_LENGTH,), - Codes.INVALID_PARAM, - ) - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group summaries." - resp = await self.groups_handler.update_group_summary_user( - group_id, - requester_user_id, - user_id=user_id, - role_id=role_id, - content=content, - ) - - return 200, resp - - @_validate_group_id - async def on_DELETE( - self, request: SynapseRequest, group_id: str, role_id: str, user_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group summaries." - resp = await self.groups_handler.delete_group_summary_user( - group_id, requester_user_id, user_id=user_id, role_id=role_id - ) - - return 200, resp - - -class GroupRoomServlet(RestServlet): - """Get all rooms in a group""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/rooms$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - result = await self.groups_handler.get_rooms_in_group( - group_id, requester_user_id - ) - - return 200, result - - -class GroupUsersServlet(RestServlet): - """Get all users in a group""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/users$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - result = await self.groups_handler.get_users_in_group( - group_id, requester_user_id - ) - - return 200, result - - -class GroupInvitedUsersServlet(RestServlet): - """Get users invited to a group""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/invited_users$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_GET( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - result = await self.groups_handler.get_invited_users_in_group( - group_id, requester_user_id - ) - - return 200, result - - -class GroupSettingJoinPolicyServlet(RestServlet): - """Set group join policy""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group join policy." - result = await self.groups_handler.set_group_join_policy( - group_id, requester_user_id, content - ) - - return 200, result - - -class GroupCreateServlet(RestServlet): - """Create a group""" - - PATTERNS = client_patterns("/create_group$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - self.server_name = hs.hostname - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - # TODO: Create group on remote server - content = parse_json_object_from_request(request) - localpart = content.pop("localpart") - group_id = GroupID(localpart, self.server_name).to_string() - - if not localpart: - raise SynapseError(400, "Group ID cannot be empty", Codes.INVALID_PARAM) - - if len(group_id) > MAX_GROUPID_LENGTH: - raise SynapseError( - 400, - "Group ID may not be longer than %s characters" % (MAX_GROUPID_LENGTH,), - Codes.INVALID_PARAM, - ) - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot create groups." - result = await self.groups_handler.create_group( - group_id, requester_user_id, content - ) - - return 200, result - - -class GroupAdminRoomsServlet(RestServlet): - """Add a room to the group""" - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify rooms in a group." - result = await self.groups_handler.add_room_to_group( - group_id, requester_user_id, room_id, content - ) - - return 200, result - - @_validate_group_id - async def on_DELETE( - self, request: SynapseRequest, group_id: str, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group categories." - result = await self.groups_handler.remove_room_from_group( - group_id, requester_user_id, room_id - ) - - return 200, result - - -class GroupAdminRoomsConfigServlet(RestServlet): - """Update the config of a room in a group""" - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)" - "/config/(?P[^/]*)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str, room_id: str, config_key: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot modify group categories." - result = await self.groups_handler.update_room_in_group( - group_id, requester_user_id, room_id, config_key, content - ) - - return 200, result - - -class GroupAdminUsersInviteServlet(RestServlet): - """Invite a user to the group""" - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/admin/users/invite/(?P[^/]*)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - self.store = hs.get_datastores().main - self.is_mine_id = hs.is_mine_id - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str, user_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - config = content.get("config", {}) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot invite users to a group." - result = await self.groups_handler.invite( - group_id, user_id, requester_user_id, config - ) - - return 200, result - - -class GroupAdminUsersKickServlet(RestServlet): - """Kick a user from the group""" - - PATTERNS = client_patterns( - "/groups/(?P[^/]*)/admin/users/remove/(?P[^/]*)$" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str, user_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot kick users from a group." - result = await self.groups_handler.remove_user_from_group( - group_id, user_id, requester_user_id, content - ) - - return 200, result - - -class GroupSelfLeaveServlet(RestServlet): - """Leave a joined group""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/self/leave$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot leave a group for a users." - result = await self.groups_handler.remove_user_from_group( - group_id, requester_user_id, requester_user_id, content - ) - - return 200, result - - -class GroupSelfJoinServlet(RestServlet): - """Attempt to join a group, or knock""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/self/join$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot join a user to a group." - result = await self.groups_handler.join_group( - group_id, requester_user_id, content - ) - - return 200, result - - -class GroupSelfAcceptInviteServlet(RestServlet): - """Accept a group invite""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/self/accept_invite$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - assert isinstance( - self.groups_handler, GroupsLocalHandler - ), "Workers cannot accept an invite to a group." - result = await self.groups_handler.accept_invite( - group_id, requester_user_id, content - ) - - return 200, result - - -class GroupSelfUpdatePublicityServlet(RestServlet): - """Update whether we publicise a users membership of a group""" - - PATTERNS = client_patterns("/groups/(?P[^/]*)/self/update_publicity$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - - @_validate_group_id - async def on_PUT( - self, request: SynapseRequest, group_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - requester_user_id = requester.user.to_string() - - content = parse_json_object_from_request(request) - publicise = content["publicise"] - await self.store.update_group_publicity(group_id, requester_user_id, publicise) - - return 200, {} - - -class PublicisedGroupsForUserServlet(RestServlet): - """Get the list of groups a user is advertising""" - - PATTERNS = client_patterns("/publicised_groups/(?P[^/]*)$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - self.groups_handler = hs.get_groups_local_handler() - - async def on_GET( - self, request: SynapseRequest, user_id: str - ) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) - - result = await self.groups_handler.get_publicised_groups_for_user(user_id) - - return 200, result - - -class PublicisedGroupsForUsersServlet(RestServlet): - """Get the list of groups a user is advertising""" - - PATTERNS = client_patterns("/publicised_groups$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.store = hs.get_datastores().main - self.groups_handler = hs.get_groups_local_handler() - - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await self.auth.get_user_by_req(request, allow_guest=True) - - content = parse_json_object_from_request(request) - user_ids = content["user_ids"] - - result = await self.groups_handler.bulk_get_publicised_groups(user_ids) - - return 200, result - - -class GroupsForUserServlet(RestServlet): - """Get all groups the logged in user is joined to""" - - PATTERNS = client_patterns("/joined_groups$") - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.clock = hs.get_clock() - self.groups_handler = hs.get_groups_local_handler() - - async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - requester_user_id = requester.user.to_string() - - result = await self.groups_handler.get_joined_groups(requester_user_id) - - return 200, result - - -def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - GroupServlet(hs).register(http_server) - GroupSummaryServlet(hs).register(http_server) - GroupInvitedUsersServlet(hs).register(http_server) - GroupUsersServlet(hs).register(http_server) - GroupRoomServlet(hs).register(http_server) - GroupSettingJoinPolicyServlet(hs).register(http_server) - GroupCreateServlet(hs).register(http_server) - GroupAdminRoomsServlet(hs).register(http_server) - GroupAdminRoomsConfigServlet(hs).register(http_server) - GroupAdminUsersInviteServlet(hs).register(http_server) - GroupAdminUsersKickServlet(hs).register(http_server) - GroupSelfLeaveServlet(hs).register(http_server) - GroupSelfJoinServlet(hs).register(http_server) - GroupSelfAcceptInviteServlet(hs).register(http_server) - GroupsForUserServlet(hs).register(http_server) - GroupCategoryServlet(hs).register(http_server) - GroupCategoriesServlet(hs).register(http_server) - GroupSummaryRoomsCatServlet(hs).register(http_server) - GroupRoleServlet(hs).register(http_server) - GroupRolesServlet(hs).register(http_server) - GroupSelfUpdatePublicityServlet(hs).register(http_server) - GroupSummaryUsersRoleServlet(hs).register(http_server) - PublicisedGroupsForUserServlet(hs).register(http_server) - PublicisedGroupsForUsersServlet(hs).register(http_server) diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index 27bfaf0b29..38ef4e459f 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -42,21 +42,10 @@ class UserMutualRoomsServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.user_directory_search_enabled = ( - hs.config.userdirectory.user_directory_search_enabled - ) async def on_GET( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: - - if not self.user_directory_search_enabled: - raise SynapseError( - code=400, - msg="User directory searching is disabled. Cannot determine shared rooms.", - errcode=Codes.UNKNOWN, - ) - UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) @@ -67,8 +56,8 @@ class UserMutualRoomsServlet(RestServlet): errcode=Codes.FORBIDDEN, ) - rooms = await self.store.get_mutual_rooms_for_users( - requester.user.to_string(), user_id + rooms = await self.store.get_mutual_rooms_between_users( + frozenset((requester.user.to_string(), user_id)) ) return 200, {"joined": list(rooms)} diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 5a2361a2e6..7a5ce8ad0e 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -1193,12 +1193,7 @@ class TimestampLookupRestServlet(RestServlet): class RoomHierarchyRestServlet(RestServlet): - PATTERNS = ( - re.compile( - "^/_matrix/client/(v1|unstable/org.matrix.msc2946)" - "/rooms/(?P[^/]*)/hierarchy$" - ), - ) + PATTERNS = (re.compile("^/_matrix/client/v1/rooms/(?P[^/]*)/hierarchy$"),) def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index e8772f86e7..8bbf35148d 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -16,7 +16,7 @@ import logging from collections import defaultdict from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union -from synapse.api.constants import Membership, PresenceState +from synapse.api.constants import EduTypes, Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState @@ -298,14 +298,6 @@ class SyncRestServlet(RestServlet): if archived: response["rooms"][Membership.LEAVE] = archived - if sync_result.groups is not None: - if sync_result.groups.join: - response["groups"][Membership.JOIN] = sync_result.groups.join - if sync_result.groups.invite: - response["groups"][Membership.INVITE] = sync_result.groups.invite - if sync_result.groups.leave: - response["groups"][Membership.LEAVE] = sync_result.groups.leave - return response @staticmethod @@ -313,7 +305,7 @@ class SyncRestServlet(RestServlet): return { "events": [ { - "type": "m.presence", + "type": EduTypes.PRESENCE, "sender": event.user_id, "content": format_user_presence_state( event, time_now, include_user_id=False diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 3e5d6c6294..20af366538 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -65,7 +65,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 +# How often to run the background job to update the "recently accessed" +# attribute of local and remote media. +UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 # 1 minute +# How often to run the background job to check for local and remote media +# that should be purged according to the configured media retention settings. +MEDIA_RETENTION_CHECK_PERIOD_MS = 60 * 60 * 1000 # 1 hour class MediaRepository: @@ -122,11 +127,36 @@ class MediaRepository: self._start_update_recently_accessed, UPDATE_RECENTLY_ACCESSED_TS ) + # Media retention configuration options + self._media_retention_local_media_lifetime_ms = ( + hs.config.media.media_retention_local_media_lifetime_ms + ) + self._media_retention_remote_media_lifetime_ms = ( + hs.config.media.media_retention_remote_media_lifetime_ms + ) + + # Check whether local or remote media retention is configured + if ( + hs.config.media.media_retention_local_media_lifetime_ms is not None + or hs.config.media.media_retention_remote_media_lifetime_ms is not None + ): + # Run the background job to apply media retention rules routinely, + # with the duration between runs dictated by the homeserver config. + self.clock.looping_call( + self._start_apply_media_retention_rules, + MEDIA_RETENTION_CHECK_PERIOD_MS, + ) + def _start_update_recently_accessed(self) -> Deferred: return run_as_background_process( "update_recently_accessed_media", self._update_recently_accessed ) + def _start_apply_media_retention_rules(self) -> Deferred: + return run_as_background_process( + "apply_media_retention_rules", self._apply_media_retention_rules + ) + async def _update_recently_accessed(self) -> None: remote_media = self.recently_accessed_remotes self.recently_accessed_remotes = set() @@ -835,6 +865,45 @@ class MediaRepository: return {"width": m_width, "height": m_height} + async def _apply_media_retention_rules(self) -> None: + """ + Purge old local and remote media according to the media retention rules + defined in the homeserver config. + """ + # Purge remote media + if self._media_retention_remote_media_lifetime_ms is not None: + # Calculate a threshold timestamp derived from the configured lifetime. Any + # media that has not been accessed since this timestamp will be removed. + remote_media_threshold_timestamp_ms = ( + self.clock.time_msec() - self._media_retention_remote_media_lifetime_ms + ) + + logger.info( + "Purging remote media last accessed before" + f" {remote_media_threshold_timestamp_ms}" + ) + + await self.delete_old_remote_media( + before_ts=remote_media_threshold_timestamp_ms + ) + + # And now do the same for local media + if self._media_retention_local_media_lifetime_ms is not None: + # This works the same as the remote media threshold + local_media_threshold_timestamp_ms = ( + self.clock.time_msec() - self._media_retention_local_media_lifetime_ms + ) + + logger.info( + "Purging local media last accessed before" + f" {local_media_threshold_timestamp_ms}" + ) + + await self.delete_old_local_media( + before_ts=local_media_threshold_timestamp_ms, + keep_profiles=True, + ) + async def delete_old_remote_media(self, before_ts: int) -> Dict[str, int]: old_media = await self.store.get_remote_media_before(before_ts) diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index ca73965fc2..13ec7ab533 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -246,7 +246,9 @@ def parse_html_description(tree: "etree.Element") -> Optional[str]: Grabs any text nodes which are inside the tag, unless they are within an HTML5 semantic markup tag (
,