Compare commits
2 Commits
squah/test
...
dmr/cut-sc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5452fdc269 | ||
|
|
5f11d0cd7e |
@@ -4,12 +4,8 @@
|
||||
# things to include
|
||||
!docker
|
||||
!synapse
|
||||
!rust
|
||||
!README.rst
|
||||
!pyproject.toml
|
||||
!poetry.lock
|
||||
!build_rust.py
|
||||
|
||||
rust/target
|
||||
|
||||
**/__pycache__
|
||||
|
||||
34
.github/workflows/latest_deps.yml
vendored
34
.github/workflows/latest_deps.yml
vendored
@@ -5,7 +5,7 @@
|
||||
#
|
||||
# As an overview this workflow:
|
||||
# - checks out develop,
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - installs from source, pulling in the dependencies like a fresh `pip install` would, and
|
||||
# - runs mypy and test suites in that checkout.
|
||||
#
|
||||
# Based on the twisted trunk CI job.
|
||||
@@ -26,19 +26,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# The dev dependencies aren't exposed in the wheel metadata (at least with current
|
||||
# poetry-core versions), so we install with poetry.
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
poetry-version: "1.2.0"
|
||||
poetry-version: "1.2.0b1"
|
||||
extras: "all"
|
||||
# Dump installed versions for debugging.
|
||||
- run: poetry run pip list > before.txt
|
||||
@@ -60,14 +53,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
|
||||
if: ${{ matrix.postgres-version }}
|
||||
@@ -84,12 +69,6 @@ jobs:
|
||||
if: ${{ matrix.postgres-version }}
|
||||
timeout-minutes: 2
|
||||
run: until pg_isready -h localhost; do sleep 1; done
|
||||
|
||||
# We nuke the local copy, as we've installed synapse into the virtualenv
|
||||
# (rather than use an editable install, which we no longer support). If we
|
||||
# don't do this then python can't find the native lib.
|
||||
- run: rm -rf synapse/
|
||||
|
||||
- run: python -m twisted.trial --jobs=2 tests
|
||||
env:
|
||||
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
|
||||
@@ -134,14 +113,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Ensure sytest runs `pip install`
|
||||
# Delete the lockfile so sytest will `pip install` rather than `poetry install`
|
||||
run: rm /src/poetry.lock
|
||||
@@ -216,3 +187,4 @@ jobs:
|
||||
with:
|
||||
update_existing: true
|
||||
filename: .ci/latest_deps_build_failed_issue_template.md
|
||||
|
||||
|
||||
65
.github/workflows/release-artifacts.yml
vendored
65
.github/workflows/release-artifacts.yml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
@@ -89,67 +89,9 @@ jobs:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-wheels:
|
||||
name: Build wheels on ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-10.15]
|
||||
is_pr:
|
||||
- ${{ startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
exclude:
|
||||
# Don't build macos wheels on PR CI.
|
||||
- is_pr: true
|
||||
os: "macos-10.15"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-python@v3
|
||||
|
||||
- name: Install cibuildwheel
|
||||
run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0
|
||||
|
||||
# Only build a single wheel in CI.
|
||||
- name: Set env vars.
|
||||
run: |
|
||||
echo "CIBW_BUILD="cp37-manylinux_x86_64"" >> $GITHUB_ENV
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
env:
|
||||
# Skip testing for platforms which various libraries don't have wheels
|
||||
# for, and so need extra build deps.
|
||||
CIBW_TEST_SKIP: pp39-* *i686* *musl* pp37-macosx*
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: Wheel
|
||||
path: ./wheelhouse/*.whl
|
||||
|
||||
build-sdist:
|
||||
name: Build sdist
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ !startsWith(github.ref, 'refs/pull/') }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- run: pip install build
|
||||
|
||||
- name: Build sdist
|
||||
run: python -m build --sdist
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Sdist
|
||||
path: dist/*.tar.gz
|
||||
|
||||
name: "Build pypi distribution files"
|
||||
uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1"
|
||||
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
@@ -157,7 +99,6 @@ jobs:
|
||||
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
|
||||
needs:
|
||||
- build-debs
|
||||
- build-wheels
|
||||
- build-sdist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
107
.github/workflows/tests.yml
vendored
107
.github/workflows/tests.yml
vendored
@@ -10,23 +10,6 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Job to detect what has changed so we don't run e.g. Rust checks on PRs that
|
||||
# don't modify Rust code.
|
||||
changes:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
rust: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.rust }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
# We only check on PRs
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
with:
|
||||
filters: |
|
||||
rust:
|
||||
- 'rust/**'
|
||||
- 'Cargo.toml'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -82,54 +65,10 @@ jobs:
|
||||
extras: "all"
|
||||
- run: poetry run scripts-dev/check_pydantic_models.py
|
||||
|
||||
lint-clippy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
components: clippy
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo clippy
|
||||
|
||||
lint-rustfmt:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
components: rustfmt
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
needs:
|
||||
- lint
|
||||
- lint-crlf
|
||||
- lint-newsfile
|
||||
- lint-pydantic
|
||||
- check-sampleconfig
|
||||
- check-schema-delta
|
||||
- lint-clippy
|
||||
- lint-rustfmt
|
||||
needs: [lint, lint-crlf, lint-newsfile, lint-pydantic, check-sampleconfig, check-schema-delta]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: "true"
|
||||
@@ -200,13 +139,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
# There aren't wheels for some of the older deps, so we need to install
|
||||
# their build dependencies
|
||||
- run: |
|
||||
@@ -243,7 +175,7 @@ jobs:
|
||||
python-version: '3.7'
|
||||
extras: "all test"
|
||||
|
||||
- run: poetry run trial -j2 tests
|
||||
- run: poetry run trial -j 2 tests
|
||||
- name: Dump logs
|
||||
# Logs are most useful when the command fails, always include them.
|
||||
if: ${{ always() }}
|
||||
@@ -315,14 +247,6 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Prepare test blacklist
|
||||
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Run SyTest
|
||||
run: /bootstrap.sh synapse
|
||||
working-directory: /src
|
||||
@@ -429,13 +353,6 @@ jobs:
|
||||
with:
|
||||
path: synapse
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Prepare Complement's Prerequisites
|
||||
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
|
||||
|
||||
@@ -445,25 +362,6 @@ jobs:
|
||||
shell: bash
|
||||
name: Run Complement Tests
|
||||
|
||||
cargo-test:
|
||||
if: ${{ needs.changes.outputs.rust == 'true' }}
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- linting-done
|
||||
- changes
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: 1.61.0
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- run: cargo test
|
||||
|
||||
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
|
||||
tests-done:
|
||||
if: ${{ always() }}
|
||||
@@ -478,7 +376,6 @@ jobs:
|
||||
- export-data
|
||||
- portdb
|
||||
- complement
|
||||
- cargo-test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
|
||||
24
.github/workflows/twisted_trunk.yml
vendored
24
.github/workflows/twisted_trunk.yml
vendored
@@ -16,14 +16,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -42,14 +34,6 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- run: sudo apt-get -qq install xmlsec1
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- uses: matrix-org/setup-python-poetry@v1
|
||||
with:
|
||||
python-version: "3.x"
|
||||
@@ -82,14 +66,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Patch dependencies
|
||||
# Note: The poetry commands want to create a virtualenv in /src/.venv/,
|
||||
# but the sytest-synapse container expects it to be in /venv/.
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -60,10 +60,3 @@ book/
|
||||
# complement
|
||||
/complement-*
|
||||
/master.tar.gz
|
||||
|
||||
# rust
|
||||
/target/
|
||||
/synapse/*.so
|
||||
|
||||
# Poetry will create a setup.py, which we don't want to include.
|
||||
/setup.py
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
# We make the whole Synapse folder a workspace so that we can run `cargo`
|
||||
# commands from the root (rather than having to cd into rust/).
|
||||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
@@ -1,20 +0,0 @@
|
||||
# A build script for poetry that adds the rust extension.
|
||||
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from setuptools_rust import Binding, RustExtension
|
||||
|
||||
|
||||
def build(setup_kwargs: Dict[str, Any]) -> None:
|
||||
original_project_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
cargo_toml_path = os.path.join(original_project_dir, "rust", "Cargo.toml")
|
||||
|
||||
extension = RustExtension(
|
||||
target="synapse.synapse_rust",
|
||||
path=cargo_toml_path,
|
||||
binding=Binding.PyO3,
|
||||
py_limited_api=True,
|
||||
)
|
||||
setup_kwargs.setdefault("rust_extensions", []).append(extension)
|
||||
setup_kwargs["zip_safe"] = False
|
||||
@@ -1 +0,0 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).
|
||||
@@ -1 +0,0 @@
|
||||
Add admin APIs to fetch messages within a particular window of time.
|
||||
@@ -1 +0,0 @@
|
||||
Cancel the processing of key query requests when they time out.
|
||||
@@ -1 +0,0 @@
|
||||
Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status).
|
||||
@@ -1 +0,0 @@
|
||||
Rename the `EventFormatVersions` enum values so that they line up with room version numbers.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
@@ -1 +0,0 @@
|
||||
Add experimental configuration option to allow disabling legacy Prometheus metric names.
|
||||
@@ -1 +0,0 @@
|
||||
Update trial old deps CI to use poetry 1.2.0.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22.
|
||||
@@ -1 +0,0 @@
|
||||
Define Synapse's compatability policy for SQLite versions.
|
||||
@@ -1 +0,0 @@
|
||||
Instrument `get_metadata_for_events` for understandable traces in Jaeger.
|
||||
@@ -1 +0,0 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -1 +0,0 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a bug where Synapse fails to start if a signing key file contains an empty line.
|
||||
@@ -1 +0,0 @@
|
||||
Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used.
|
||||
@@ -1 +0,0 @@
|
||||
Add a stub Rust crate.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases.
|
||||
@@ -1 +0,0 @@
|
||||
Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit.
|
||||
@@ -1 +0,0 @@
|
||||
Fix a long standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver.
|
||||
@@ -1 +0,0 @@
|
||||
Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state.
|
||||
@@ -1 +0,0 @@
|
||||
User an additional database query when persisting receipts.
|
||||
@@ -1 +0,0 @@
|
||||
Re-type hint some collections as read-only.
|
||||
@@ -1 +0,0 @@
|
||||
Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used.
|
||||
@@ -1 +0,0 @@
|
||||
Synapse will now refuse to start if configured to use SQLite < 3.27.
|
||||
@@ -1 +0,0 @@
|
||||
Add a stub Rust crate.
|
||||
21
contrib/prometheus/synapse-v1.rules
Normal file
21
contrib/prometheus/synapse-v1.rules
Normal file
@@ -0,0 +1,21 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -1,35 +1,55 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus_total + 0'
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations_count_total + 0'
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
# These 3 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus_total + 0'
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus_total + 0'
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
# These 2 rules are used in the included Prometheus console
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
@@ -39,25 +59,20 @@ groups:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
|
||||
# These 3 rules are used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_type="remote"})
|
||||
labels:
|
||||
type: remote
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: local
|
||||
- record: synapse_storage_events_persisted_by_source_type
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_entity!="*client*",origin_type="local"})
|
||||
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep{origin_entity!="*client*",origin_type="local"})
|
||||
labels:
|
||||
type: bridges
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
- record: synapse_storage_events_persisted_by_event_type
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
|
||||
|
||||
# This rule is used in the included Grafana dashboard
|
||||
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep)
|
||||
- record: synapse_storage_events_persisted_by_origin
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
|
||||
expr: sum without(type) (synapse_storage_events_persisted_events_sep)
|
||||
|
||||
|
||||
7
debian/build_virtualenv
vendored
7
debian/build_virtualenv
vendored
@@ -61,7 +61,7 @@ dh_virtualenv \
|
||||
--extras="all,systemd,test" \
|
||||
--requirements="exported_requirements.txt"
|
||||
|
||||
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
@@ -78,14 +78,9 @@ case "$DEB_BUILD_OPTIONS" in
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
# To avoid pulling in the unbuilt Synapse in the local directory
|
||||
pushd /
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
popd
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
4
debian/changelog
vendored
4
debian/changelog
vendored
@@ -12,15 +12,11 @@ matrix-synapse-py3 (1.66.0) stable; urgency=medium
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
|
||||
|
||||
[ Jörg Behrmann ]
|
||||
* Update debhelper to compatibility level 12.
|
||||
* Drop the preinst script stopping synapse.
|
||||
* Allocate a group for the system user.
|
||||
* Change dpkg-statoverride to --force-statoverride-add.
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Disable `dh_auto_configure` as it broke during Rust build.
|
||||
|
||||
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
|
||||
|
||||
2
debian/rules
vendored
2
debian/rules
vendored
@@ -12,8 +12,6 @@ override_dh_installsystemd:
|
||||
# we don't really want to strip the symbols from our object files.
|
||||
override_dh_strip:
|
||||
|
||||
override_dh_auto_configure:
|
||||
|
||||
# many libraries pulled from PyPI have allocatable sections after
|
||||
# non-allocatable ones on which dwz errors out. For those without the issue the
|
||||
# gains are only marginal
|
||||
|
||||
@@ -92,20 +92,11 @@ RUN \
|
||||
libxml++2.6-dev \
|
||||
libxslt1-dev \
|
||||
openssl \
|
||||
rustc \
|
||||
zlib1g-dev \
|
||||
git \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install rust and ensure its in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
# To speed up rebuilds, install all of the dependencies before we copy over
|
||||
# the whole synapse project, so that this layer in the Docker cache can be
|
||||
# used while you develop on the source
|
||||
@@ -117,9 +108,8 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
# Copy over the rest of the synapse source code.
|
||||
COPY synapse /synapse/synapse/
|
||||
COPY rust /synapse/rust/
|
||||
# ... and what we need to `pip install`.
|
||||
COPY pyproject.toml README.rst build_rust.py /synapse/
|
||||
COPY pyproject.toml README.rst /synapse/
|
||||
|
||||
# Repeat of earlier build argument declaration, as this is a new build stage.
|
||||
ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE
|
||||
|
||||
@@ -72,7 +72,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
@@ -86,15 +85,6 @@ RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
libpq-dev \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
ENV CARGO_HOME=/cargo
|
||||
ENV PATH=/cargo/bin:/rust/bin:$PATH
|
||||
RUN mkdir /rust /cargo
|
||||
|
||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
|
||||
|
||||
|
||||
COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb /
|
||||
|
||||
# install dhvirtualenv. Update the apt cache again first, in case we got a
|
||||
|
||||
@@ -393,151 +393,6 @@ A response body like the following is returned:
|
||||
}
|
||||
```
|
||||
|
||||
# Room Messages API
|
||||
|
||||
The Room Messages admin API allows server admins to get all messages
|
||||
sent to a room in a given timeframe. There are various parameters available
|
||||
that allow for filtering and ordering the returned list. This API supports pagination.
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api).
|
||||
|
||||
This endpoint mirrors the [Matrix Spec defined Messages API](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/messages
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish you fetch messages from.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `from` (required) - The token to start returning events from. This token can be obtained from a prev_batch
|
||||
or next_batch token returned by the /sync endpoint, or from an end token returned by a previous request to this endpoint.
|
||||
* `to` - The token to spot returning events at.
|
||||
* `limit` - The maximum number of events to return. Defaults to `10`.
|
||||
* `filter` - A JSON RoomEventFilter to filter returned events with.
|
||||
* `dir` - The direction to return events from. Either `f` for forwards or `b` for backwards. Setting
|
||||
this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
The following fields are possible in the JSON response body:
|
||||
|
||||
* `chunk` - A list of room events. The order depends on the dir parameter.
|
||||
Note that an empty chunk does not necessarily imply that no more events are available. Clients should continue to paginate until no end property is returned.
|
||||
* `end` - A token corresponding to the end of chunk. This token can be passed back to this endpoint to request further events.
|
||||
If no further events are available, this property is omitted from the response.
|
||||
* `start` - A token corresponding to the start of chunk.
|
||||
* `state` - A list of state events relevant to showing the chunk.
|
||||
|
||||
**Example**
|
||||
|
||||
For more details on each chunk, read [the Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#get_matrixclientv3roomsroomidmessages).
|
||||
|
||||
```json
|
||||
{
|
||||
"chunk": [
|
||||
{
|
||||
"content": {
|
||||
"body": "This is an example text message",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<b>This is an example text message</b>",
|
||||
"msgtype": "m.text"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"name": "The room name"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"state_key": "",
|
||||
"type": "m.room.name",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
},
|
||||
{
|
||||
"content": {
|
||||
"body": "Gangnam Style",
|
||||
"info": {
|
||||
"duration": 2140786,
|
||||
"h": 320,
|
||||
"mimetype": "video/mp4",
|
||||
"size": 1563685,
|
||||
"thumbnail_info": {
|
||||
"h": 300,
|
||||
"mimetype": "image/jpeg",
|
||||
"size": 46144,
|
||||
"w": 300
|
||||
},
|
||||
"thumbnail_url": "mxc://example.org/FHyPlCeYUSFFxlgbQYZmoEoe",
|
||||
"w": 480
|
||||
},
|
||||
"msgtype": "m.video",
|
||||
"url": "mxc://example.org/a526eYUSFFxlgbQYZmo442"
|
||||
},
|
||||
"event_id": "$143273582443PhrSn:example.org",
|
||||
"origin_server_ts": 1432735824653,
|
||||
"room_id": "!636q39766251:example.com",
|
||||
"sender": "@example:example.org",
|
||||
"type": "m.room.message",
|
||||
"unsigned": {
|
||||
"age": 1234
|
||||
}
|
||||
}
|
||||
],
|
||||
"end": "t47409-4357353_219380_26003_2265",
|
||||
"start": "t47429-4392820_219380_26003_2265"
|
||||
}
|
||||
```
|
||||
|
||||
# Room Timestamp to Event API
|
||||
|
||||
The Room Timestamp to Event API endpoint fetches the `event_id` of the closest event to the given
|
||||
timestamp (`ts` query parameter) in the given direction (`dir` query parameter).
|
||||
|
||||
Useful for cases like jump to date so you can start paginating messages from
|
||||
a given date in the archive.
|
||||
|
||||
The API is:
|
||||
```
|
||||
GET /_synapse/admin/v1/rooms/<room_id>/timestamp_to_event
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
The following path parameters are required:
|
||||
|
||||
* `room_id` - The ID of the room you wish to check.
|
||||
|
||||
The following query parameters are available:
|
||||
|
||||
* `ts` - a timestamp in milliseconds where we will find the closest event in
|
||||
the given direction.
|
||||
* `dir` - can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp. Defaults to `f`.
|
||||
|
||||
**Response**
|
||||
|
||||
* `event_id` - converted from timestamp
|
||||
|
||||
# Block Room API
|
||||
The Block Room admin API allows server admins to block and unblock rooms,
|
||||
and query to see if a given room is blocked.
|
||||
|
||||
@@ -42,7 +42,6 @@ It returns a JSON body like the following:
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null,
|
||||
"consent_ts": null,
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
@@ -365,7 +364,6 @@ The following actions are **NOT** performed. The list may be incomplete.
|
||||
- Remove the user's creation (registration) timestamp
|
||||
- [Remove rate limit overrides](#override-ratelimiting-for-users)
|
||||
- Remove from monthly active users
|
||||
- Remove user's consent information (consent version and timestamp)
|
||||
|
||||
## Reset password
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
Deprecation Policy for Platform Dependencies
|
||||
============================================
|
||||
|
||||
Synapse has a number of platform dependencies, including Python, Rust,
|
||||
PostgreSQL and SQLite. This document outlines the policy towards which versions
|
||||
we support, and when we drop support for versions in the future.
|
||||
Synapse has a number of platform dependencies, including Python and PostgreSQL.
|
||||
This document outlines the policy towards which versions we support, and when we
|
||||
drop support for versions in the future.
|
||||
|
||||
|
||||
Policy
|
||||
@@ -17,14 +17,6 @@ Details on the upstream support life cycles for Python and PostgreSQL are
|
||||
documented at [https://endoflife.date/python](https://endoflife.date/python) and
|
||||
[https://endoflife.date/postgresql](https://endoflife.date/postgresql).
|
||||
|
||||
A Rust compiler is required to build Synapse from source. For any given release
|
||||
the minimum required version may be bumped up to a recent Rust version, and so
|
||||
people building from source should ensure they can fetch recent versions of Rust
|
||||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
-------
|
||||
@@ -39,15 +31,3 @@ long process.
|
||||
By following the upstream support life cycles Synapse can ensure that its
|
||||
dependencies continue to get security patches, while not requiring system admins
|
||||
to constantly update their platform dependencies to the latest versions.
|
||||
|
||||
For Rust, the situation is a bit different given that a) the Rust foundation
|
||||
does not generally support older Rust versions, and b) the library ecosystem
|
||||
generally bump their minimum support Rust versions frequently. In general, the
|
||||
Synapse team will try to avoid updating the dependency on Rust to the absolute
|
||||
latest version, but introducing a formal policy is hard given the constraints of
|
||||
the ecosystem.
|
||||
|
||||
On a similar note, SQLite does not generally have a concept of "supported
|
||||
release"; bugfixes are published for the latest minor release only. We chose to
|
||||
track Debian's oldstable as this is relatively conservative, predictably updated
|
||||
and is consistent with the `.deb` packages released by Matrix.org.
|
||||
@@ -28,9 +28,6 @@ The source code of Synapse is hosted on GitHub. You will also need [a recent ver
|
||||
|
||||
For some tests, you will need [a recent version of Docker](https://docs.docker.com/get-docker/).
|
||||
|
||||
A recent version of the Rust compiler is needed to build the native modules. The
|
||||
easiest way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
|
||||
# 3. Get the source.
|
||||
|
||||
@@ -117,11 +114,6 @@ Some documentation also exists in [Synapse's GitHub
|
||||
Wiki](https://github.com/matrix-org/synapse/wiki), although this is primarily
|
||||
contributed to by community authors.
|
||||
|
||||
When changes are made to any Rust code then you must call either `poetry install`
|
||||
or `maturin develop` (if installed) to rebuild the Rust code. Using [`maturin`](https://github.com/PyO3/maturin)
|
||||
is quicker than `poetry install`, so is recommended when making frequent
|
||||
changes to the Rust code.
|
||||
|
||||
|
||||
# 8. Test, test, test!
|
||||
<a name="test-test-test"></a>
|
||||
@@ -203,7 +195,7 @@ The database file can then be inspected with:
|
||||
sqlite3 _trial_temp/test.db
|
||||
```
|
||||
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
Note that the database file is cleared at the beginning of each test run. Thus it
|
||||
will always only contain the data generated by the *last run test*. Though generally
|
||||
when debugging, one is only running a single test anyway.
|
||||
|
||||
|
||||
@@ -196,10 +196,6 @@ System requirements:
|
||||
- Python 3.7 or later, up to Python 3.10.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
unavailable, you will need to have a recent Rust compiler installed. The easiest
|
||||
way of installing the latest version is to use [rustup](https://rustup.rs/).
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
|
||||
@@ -1069,10 +1069,8 @@ Options related to caching.
|
||||
---
|
||||
### `event_cache_size`
|
||||
|
||||
The number of events to cache in memory. Defaults to 10K. Like other caches,
|
||||
this is affected by `caches.global_factor` (see below).
|
||||
|
||||
Note that this option is not part of the `caches` section.
|
||||
The number of events to cache in memory. Not affected by
|
||||
`caches.global_factor` and is not part of the `caches` section. Defaults to 10K.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
|
||||
6
mypy.ini
6
mypy.ini
@@ -16,8 +16,7 @@ files =
|
||||
docker/,
|
||||
scripts-dev/,
|
||||
synapse/,
|
||||
tests/,
|
||||
build_rust.py
|
||||
tests/
|
||||
|
||||
# Note: Better exclusion syntax coming in mypy > 0.910
|
||||
# https://github.com/python/mypy/pull/11329
|
||||
@@ -182,6 +181,3 @@ ignore_missing_imports = True
|
||||
|
||||
[mypy-incremental.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-setuptools_rust.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
35
poetry.lock
generated
35
poetry.lock
generated
@@ -1035,18 +1035,6 @@ python-versions = ">=3.6"
|
||||
cryptography = ">=2.0"
|
||||
jeepney = ">=0.6"
|
||||
|
||||
[[package]]
|
||||
name = "semantic-version"
|
||||
version = "2.10.0"
|
||||
description = "A library implementing the 'SemVer' scheme."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=2.7"
|
||||
|
||||
[package.extras]
|
||||
dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"]
|
||||
doc = ["Sphinx", "sphinx-rtd-theme"]
|
||||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.5.11"
|
||||
@@ -1111,19 +1099,6 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-g
|
||||
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
|
||||
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools-rust"
|
||||
version = "1.5.1"
|
||||
description = "Setuptools Rust extension plugin"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
semantic-version = ">=2.8.2,<3"
|
||||
setuptools = ">=62.4"
|
||||
typing-extensions = ">=3.7.4.3"
|
||||
|
||||
[[package]]
|
||||
name = "signedjson"
|
||||
version = "1.1.4"
|
||||
@@ -1625,7 +1600,7 @@ url_preview = ["lxml"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "79cfa09d59f9f8b5ef24318fb860df1915f54328692aa56d04331ecbdd92a8cb"
|
||||
content-hash = "7de518bf27967b3547eab8574342cfb67f87d6b47b4145c13de11112141dbf2d"
|
||||
|
||||
[metadata.files]
|
||||
attrs = [
|
||||
@@ -2497,10 +2472,6 @@ secretstorage = [
|
||||
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
]
|
||||
semantic-version = [
|
||||
{file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"},
|
||||
{file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
|
||||
]
|
||||
sentry-sdk = [
|
||||
{file = "sentry-sdk-1.5.11.tar.gz", hash = "sha256:6c01d9d0b65935fd275adc120194737d1df317dce811e642cbf0394d0d37a007"},
|
||||
{file = "sentry_sdk-1.5.11-py2.py3-none-any.whl", hash = "sha256:c17179183cac614e900cbd048dab03f49a48e2820182ec686c25e7ce46f8548f"},
|
||||
@@ -2513,10 +2484,6 @@ setuptools = [
|
||||
{file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
|
||||
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
|
||||
]
|
||||
setuptools-rust = [
|
||||
{file = "setuptools-rust-1.5.1.tar.gz", hash = "sha256:0e05e456645d59429cb1021370aede73c0760e9360bbfdaaefb5bced530eb9d7"},
|
||||
{file = "setuptools_rust-1.5.1-py3-none-any.whl", hash = "sha256:306b236ff3aa5229180e58292610d0c2c51bb488191122d2fc559ae4caeb7d5e"},
|
||||
]
|
||||
signedjson = [
|
||||
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
|
||||
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},
|
||||
|
||||
@@ -52,9 +52,6 @@ include_trailing_comma = true
|
||||
combine_as_imports = true
|
||||
skip_gitignore = true
|
||||
|
||||
[tool.maturin]
|
||||
manifest-path = "rust/Cargo.toml"
|
||||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.66.0"
|
||||
@@ -85,16 +82,7 @@ include = [
|
||||
{ path = "sytest-blacklist", format = "sdist" },
|
||||
{ path = "tests", format = "sdist" },
|
||||
{ path = "UPGRADE.rst", format = "sdist" },
|
||||
{ path = "Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/Cargo.toml", format = "sdist" },
|
||||
{ path = "rust/Cargo.lock", format = "sdist" },
|
||||
{ path = "rust/src/**", format = "sdist" },
|
||||
]
|
||||
exclude = [
|
||||
{ path = "synapse/*.so", format = "sdist"}
|
||||
]
|
||||
|
||||
build = "build_rust.py"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
synapse_homeserver = "synapse.app.homeserver:main"
|
||||
@@ -138,7 +126,7 @@ pyOpenSSL = ">=16.0.0"
|
||||
PyYAML = ">=3.11"
|
||||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
bcrypt = ">=3.1.0"
|
||||
Pillow = ">=5.4.0"
|
||||
sortedcontainers = ">=1.4.4"
|
||||
pymacaroons = ">=0.13.0"
|
||||
@@ -173,15 +161,6 @@ importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
# https://github.com/python-poetry/poetry/issues/6154). Both `pip install` and
|
||||
# `poetry build` do the right thing without this explicit dependency.
|
||||
#
|
||||
# This isn't really a dev-dependency, as `poetry install --no-dev` will fail,
|
||||
# but the alternative is to add it to the main list of deps where it isn't
|
||||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
|
||||
# Optional Dependencies
|
||||
@@ -306,21 +285,5 @@ twine = "*"
|
||||
towncrier = ">=18.6.0rc1"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0", "setuptools_rust>=1.3"]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
|
||||
[tool.cibuildwheel]
|
||||
# Skip unsupported platforms (by us or by Rust).
|
||||
skip = "cp36* *-musllinux_i686"
|
||||
|
||||
# We need a rust compiler
|
||||
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y"
|
||||
environment= { PATH = "$PATH:$HOME/.cargo/bin" }
|
||||
|
||||
# For some reason if we don't manually clean the build directory we
|
||||
# can end up polluting the next build with a .so that is for the wrong
|
||||
# Python version.
|
||||
before-build = "rm -rf {project}/build"
|
||||
build-frontend = "build"
|
||||
test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print(sum_as_string(1, 2))'"
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
# We name the package `synapse` so that things like logging have the right
|
||||
# logging target.
|
||||
name = "synapse"
|
||||
|
||||
# dummy version. See pyproject.toml for the Synapse's version number.
|
||||
version = "0.1.0"
|
||||
|
||||
edition = "2021"
|
||||
rust-version = "1.61.0"
|
||||
|
||||
[lib]
|
||||
name = "synapse"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[package.metadata.maturin]
|
||||
# This is where we tell maturin where to place the built library.
|
||||
name = "synapse.synapse_rust"
|
||||
|
||||
[dependencies]
|
||||
pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] }
|
||||
@@ -1,16 +0,0 @@
|
||||
use pyo3::prelude::*;
|
||||
|
||||
/// Formats the sum of two numbers as string.
|
||||
#[pyfunction]
|
||||
#[pyo3(text_signature = "(a, b, /)")]
|
||||
fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
|
||||
Ok((a + b).to_string())
|
||||
}
|
||||
|
||||
/// The entry point for defining the Python module.
|
||||
#[pymodule]
|
||||
fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG"
|
||||
|
||||
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
|
||||
echo "Running db background jobs..."
|
||||
synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG"
|
||||
synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
|
||||
|
||||
# Create the PostgreSQL database.
|
||||
echo "Creating postgres database..."
|
||||
@@ -185,7 +185,7 @@ sqlite3 "$SQLITE_DB" <<< "$SQL"
|
||||
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
|
||||
|
||||
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
|
||||
sqlite3 "$SQLITE_DB" ".schema --indent" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE'..."
|
||||
pg_dump --format=plain --no-tablespaces --no-acl --no-owner $POSTGRES_DB_NAME | sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_FULL_SCHEMA_OUTPUT_FILE"
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
def sum_as_string(a: int, b: int) -> str: ...
|
||||
@@ -38,7 +38,6 @@ from synapse.logging.opentracing import (
|
||||
trace,
|
||||
)
|
||||
from synapse.types import Requester, create_requester
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -119,7 +118,6 @@ class Auth:
|
||||
errcode=Codes.NOT_JOINED,
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -168,7 +166,6 @@ class Auth:
|
||||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -284,7 +281,6 @@ class Auth:
|
||||
403, "Application service has not registered this user (%s)" % user_id
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def _get_appservice_user(self, request: Request) -> Optional[Requester]:
|
||||
"""
|
||||
Given a request, reads the request parameters to determine:
|
||||
@@ -527,7 +523,6 @@ class Auth:
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
@cancellable
|
||||
def get_access_token_from_request(request: Request) -> str:
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
|
||||
@@ -19,23 +19,18 @@ import attr
|
||||
|
||||
class EventFormatVersions:
|
||||
"""This is an internal enum for tracking the version of the event format,
|
||||
independently of the room version.
|
||||
|
||||
To reduce confusion, the event format versions are named after the room
|
||||
versions that they were used or introduced in.
|
||||
The concept of an 'event format version' is specific to Synapse (the
|
||||
specification does not mention this term.)
|
||||
independently from the room version.
|
||||
"""
|
||||
|
||||
ROOM_V1_V2 = 1 # $id:server event id format: used for room v1 and v2
|
||||
ROOM_V3 = 2 # MSC1659-style $hash event id format: used for room v3
|
||||
ROOM_V4_PLUS = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
V1 = 1 # $id:server event id format
|
||||
V2 = 2 # MSC1659-style $hash event id format: introduced for room v3
|
||||
V3 = 3 # MSC1884-style $hash format: introduced for room v4
|
||||
|
||||
|
||||
KNOWN_EVENT_FORMAT_VERSIONS = {
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V1,
|
||||
EventFormatVersions.V2,
|
||||
EventFormatVersions.V3,
|
||||
}
|
||||
|
||||
|
||||
@@ -97,7 +92,7 @@ class RoomVersions:
|
||||
V1 = RoomVersion(
|
||||
"1",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V1,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -115,7 +110,7 @@ class RoomVersions:
|
||||
V2 = RoomVersion(
|
||||
"2",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V1_V2,
|
||||
EventFormatVersions.V1,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -133,7 +128,7 @@ class RoomVersions:
|
||||
V3 = RoomVersion(
|
||||
"3",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V3,
|
||||
EventFormatVersions.V2,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -151,7 +146,7 @@ class RoomVersions:
|
||||
V4 = RoomVersion(
|
||||
"4",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=False,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -169,7 +164,7 @@ class RoomVersions:
|
||||
V5 = RoomVersion(
|
||||
"5",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=True,
|
||||
@@ -187,7 +182,7 @@ class RoomVersions:
|
||||
V6 = RoomVersion(
|
||||
"6",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -205,7 +200,7 @@ class RoomVersions:
|
||||
MSC2176 = RoomVersion(
|
||||
"org.matrix.msc2176",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -223,7 +218,7 @@ class RoomVersions:
|
||||
V7 = RoomVersion(
|
||||
"7",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -241,7 +236,7 @@ class RoomVersions:
|
||||
V8 = RoomVersion(
|
||||
"8",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -259,7 +254,7 @@ class RoomVersions:
|
||||
V9 = RoomVersion(
|
||||
"9",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -277,7 +272,7 @@ class RoomVersions:
|
||||
MSC3787 = RoomVersion(
|
||||
"org.matrix.msc3787",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -295,7 +290,7 @@ class RoomVersions:
|
||||
V10 = RoomVersion(
|
||||
"10",
|
||||
RoomDisposition.STABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
@@ -313,7 +308,7 @@ class RoomVersions:
|
||||
MSC2716v4 = RoomVersion(
|
||||
"org.matrix.msc2716v4",
|
||||
RoomDisposition.UNSTABLE,
|
||||
EventFormatVersions.ROOM_V4_PLUS,
|
||||
EventFormatVersions.V3,
|
||||
StateResolutionVersions.V2,
|
||||
enforce_key_validity=True,
|
||||
special_case_aliases_auth=False,
|
||||
|
||||
@@ -32,15 +32,15 @@ logger = logging.getLogger("synapse.app.homeserver")
|
||||
_stats_process: List[Tuple[int, "resource.struct_rusage"]] = []
|
||||
|
||||
# Gauges to expose monthly active user control metrics
|
||||
current_mau_gauge = Gauge("synapse_admin_mau_current", "Current MAU")
|
||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||
current_mau_by_service_gauge = Gauge(
|
||||
"synapse_admin_mau_current_mau_by_service",
|
||||
"Current MAU by service",
|
||||
["app_service"],
|
||||
)
|
||||
max_mau_gauge = Gauge("synapse_admin_mau_max", "MAU Limit")
|
||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||
registered_reserved_users_mau_gauge = Gauge(
|
||||
"synapse_admin_mau_registered_reserved_users",
|
||||
"synapse_admin_mau:registered_reserved_users",
|
||||
"Registered users with reserved threepids",
|
||||
)
|
||||
|
||||
|
||||
@@ -217,18 +217,7 @@ class KeyConfig(Config):
|
||||
|
||||
signing_keys = self.read_file(signing_key_path, name)
|
||||
try:
|
||||
loaded_signing_keys = read_signing_keys(
|
||||
[
|
||||
signing_key_line
|
||||
for signing_key_line in signing_keys.splitlines(keepends=False)
|
||||
if signing_key_line.strip()
|
||||
]
|
||||
)
|
||||
|
||||
if not loaded_signing_keys:
|
||||
raise ConfigError(f"No signing keys in file {signing_key_path}")
|
||||
|
||||
return loaded_signing_keys
|
||||
return read_signing_keys(signing_keys.splitlines(True))
|
||||
except Exception as e:
|
||||
raise ConfigError("Error reading %s: %s" % (name, str(e)))
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ def validate_event_for_room_version(event: "EventBase") -> None:
|
||||
if not is_invite_via_3pid:
|
||||
raise AuthError(403, "Event not signed by sender's server")
|
||||
|
||||
if event.format_version in (EventFormatVersions.ROOM_V1_V2,):
|
||||
if event.format_version in (EventFormatVersions.V1,):
|
||||
# Only older room versions have event IDs to check.
|
||||
event_id_domain = get_domain_from_id(event.event_id)
|
||||
|
||||
@@ -716,7 +716,7 @@ def check_redaction(
|
||||
if user_level >= redact_level:
|
||||
return False
|
||||
|
||||
if room_version_obj.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
if room_version_obj.event_format == EventFormatVersions.V1:
|
||||
redacter_domain = get_domain_from_id(event.event_id)
|
||||
if not isinstance(event.redacts, str):
|
||||
return False
|
||||
|
||||
@@ -442,7 +442,7 @@ class EventBase(metaclass=abc.ABCMeta):
|
||||
|
||||
|
||||
class FrozenEvent(EventBase):
|
||||
format_version = EventFormatVersions.ROOM_V1_V2 # All events of this type are V1
|
||||
format_version = EventFormatVersions.V1 # All events of this type are V1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -490,7 +490,7 @@ class FrozenEvent(EventBase):
|
||||
|
||||
|
||||
class FrozenEventV2(EventBase):
|
||||
format_version = EventFormatVersions.ROOM_V3 # All events of this type are V2
|
||||
format_version = EventFormatVersions.V2 # All events of this type are V2
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -567,7 +567,7 @@ class FrozenEventV2(EventBase):
|
||||
class FrozenEventV3(FrozenEventV2):
|
||||
"""FrozenEventV3, which differs from FrozenEventV2 only in the event_id format"""
|
||||
|
||||
format_version = EventFormatVersions.ROOM_V4_PLUS # All events of this type are V3
|
||||
format_version = EventFormatVersions.V3 # All events of this type are V3
|
||||
|
||||
@property
|
||||
def event_id(self) -> str:
|
||||
@@ -597,11 +597,11 @@ def _event_type_from_format_version(
|
||||
`FrozenEvent`
|
||||
"""
|
||||
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
if format_version == EventFormatVersions.V1:
|
||||
return FrozenEvent
|
||||
elif format_version == EventFormatVersions.ROOM_V3:
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
return FrozenEventV2
|
||||
elif format_version == EventFormatVersions.ROOM_V4_PLUS:
|
||||
elif format_version == EventFormatVersions.V3:
|
||||
return FrozenEventV3
|
||||
else:
|
||||
raise Exception("No event format %r" % (format_version,))
|
||||
|
||||
@@ -137,7 +137,7 @@ class EventBuilder:
|
||||
# The types of auth/prev events changes between event versions.
|
||||
prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]]
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
if format_version == EventFormatVersions.V1:
|
||||
auth_events = await self._store.add_event_hashes(auth_event_ids)
|
||||
prev_events = await self._store.add_event_hashes(prev_event_ids)
|
||||
else:
|
||||
@@ -253,7 +253,7 @@ def create_local_event_from_event_dict(
|
||||
|
||||
time_now = int(clock.time_msec())
|
||||
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
if format_version == EventFormatVersions.V1:
|
||||
event_dict["event_id"] = _create_event_id(clock, hostname)
|
||||
|
||||
event_dict["origin"] = hostname
|
||||
|
||||
@@ -45,7 +45,7 @@ class EventValidator:
|
||||
"""
|
||||
self.validate_builder(event)
|
||||
|
||||
if event.format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
if event.format_version == EventFormatVersions.V1:
|
||||
EventID.from_string(event.event_id)
|
||||
|
||||
required = [
|
||||
|
||||
@@ -194,7 +194,7 @@ async def _check_sigs_on_pdu(
|
||||
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||
# checks. Only do this if the room version has a concept of event ID domain
|
||||
# (ie, the room version uses old-style non-hash event IDs).
|
||||
if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
event_domain = get_domain_from_id(pdu.event_id)
|
||||
if event_domain != sender_domain:
|
||||
try:
|
||||
|
||||
@@ -1190,7 +1190,7 @@ class FederationClient(FederationBase):
|
||||
# Otherwise, consider it a legitimate error and raise.
|
||||
err = e.to_synapse_error()
|
||||
if self._is_unknown_endpoint(e, err):
|
||||
if room_version.event_format != EventFormatVersions.ROOM_V1_V2:
|
||||
if room_version.event_format != EventFormatVersions.V1:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"User's homeserver does not support this room version",
|
||||
|
||||
@@ -62,12 +62,12 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
sent_pdus_destination_dist_count = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations_count",
|
||||
"synapse_federation_client_sent_pdu_destinations:count",
|
||||
"Number of PDUs queued for sending to one or more destinations",
|
||||
)
|
||||
|
||||
sent_pdus_destination_dist_total = Counter(
|
||||
"synapse_federation_client_sent_pdu_destinations",
|
||||
"synapse_federation_client_sent_pdu_destinations:total",
|
||||
"Total number of PDUs queued for sending across all destinations",
|
||||
)
|
||||
|
||||
|
||||
@@ -70,7 +70,6 @@ class AdminHandler:
|
||||
"appservice_id",
|
||||
"consent_server_notice_sent",
|
||||
"consent_version",
|
||||
"consent_ts",
|
||||
"user_type",
|
||||
"is_guest",
|
||||
}
|
||||
|
||||
@@ -45,13 +45,13 @@ from synapse.types import (
|
||||
JsonDict,
|
||||
StreamKeyType,
|
||||
StreamToken,
|
||||
UserID,
|
||||
get_domain_from_id,
|
||||
get_verify_key_from_cross_signing_key,
|
||||
)
|
||||
from synapse.util import stringutils
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
@@ -124,7 +124,6 @@ class DeviceWorkerHandler:
|
||||
|
||||
return device
|
||||
|
||||
@cancellable
|
||||
async def get_device_changes_in_shared_rooms(
|
||||
self, user_id: str, room_ids: Collection[str], from_token: StreamToken
|
||||
) -> Collection[str]:
|
||||
@@ -164,7 +163,6 @@ class DeviceWorkerHandler:
|
||||
|
||||
@trace
|
||||
@measure_func("device.get_user_ids_changed")
|
||||
@cancellable
|
||||
async def get_user_ids_changed(
|
||||
self, user_id: str, from_token: StreamToken
|
||||
) -> JsonDict:
|
||||
@@ -323,6 +321,8 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
self.device_list_updater.incoming_device_list_update,
|
||||
)
|
||||
|
||||
hs.get_distributor().observe("user_left_room", self.user_left_room)
|
||||
|
||||
# Whether `_handle_new_device_update_async` is currently processing.
|
||||
self._handle_new_device_update_is_processing = False
|
||||
|
||||
@@ -566,6 +566,14 @@ class DeviceHandler(DeviceWorkerHandler):
|
||||
StreamKeyType.DEVICE_LIST, position, users=[from_user_id]
|
||||
)
|
||||
|
||||
async def user_left_room(self, user: UserID, room_id: str) -> None:
|
||||
user_id = user.to_string()
|
||||
room_ids = await self.store.get_rooms_for_user(user_id)
|
||||
if not room_ids:
|
||||
# We no longer share rooms with this user, so we'll no longer
|
||||
# receive device updates. Mark this in DB.
|
||||
await self.store.mark_remote_user_device_list_as_unsubscribed(user_id)
|
||||
|
||||
async def store_dehydrated_device(
|
||||
self,
|
||||
user_id: str,
|
||||
|
||||
@@ -37,8 +37,7 @@ from synapse.types import (
|
||||
get_verify_key_from_cross_signing_key,
|
||||
)
|
||||
from synapse.util import json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, delay_cancellation
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -92,7 +91,6 @@ class E2eKeysHandler:
|
||||
)
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_devices(
|
||||
self,
|
||||
query_body: JsonDict,
|
||||
@@ -175,32 +173,6 @@ class E2eKeysHandler:
|
||||
user_ids_not_in_cache,
|
||||
remote_results,
|
||||
) = await self.store.get_user_devices_from_cache(query_list)
|
||||
|
||||
# Check that the homeserver still shares a room with all cached users.
|
||||
# Note that this check may be slightly racy when a remote user leaves a
|
||||
# room after we have fetched their cached device list. In the worst case
|
||||
# we will do extra federation queries for devices that we had cached.
|
||||
cached_users = set(remote_results.keys())
|
||||
valid_cached_users = (
|
||||
await self.store.get_users_server_still_shares_room_with(
|
||||
remote_results.keys()
|
||||
)
|
||||
)
|
||||
invalid_cached_users = cached_users - valid_cached_users
|
||||
if invalid_cached_users:
|
||||
# Fix up results. If we get here, there is either a bug in device
|
||||
# list tracking, or we hit the race mentioned above.
|
||||
user_ids_not_in_cache.update(invalid_cached_users)
|
||||
for invalid_user_id in invalid_cached_users:
|
||||
remote_results.pop(invalid_user_id)
|
||||
# This log message may be removed if it turns out it's almost
|
||||
# entirely triggered by races.
|
||||
logger.error(
|
||||
"Devices for %s were cached, but the server no longer shares "
|
||||
"any rooms with them. The cached device lists are stale.",
|
||||
invalid_cached_users,
|
||||
)
|
||||
|
||||
for user_id, devices in remote_results.items():
|
||||
user_devices = results.setdefault(user_id, {})
|
||||
for device_id, device in devices.items():
|
||||
@@ -236,26 +208,22 @@ class E2eKeysHandler:
|
||||
r[user_id] = remote_queries[user_id]
|
||||
|
||||
# Now fetch any devices that we don't have in our cache
|
||||
# TODO It might make sense to propagate cancellations into the
|
||||
# deferreds which are querying remote homeservers.
|
||||
await make_deferred_yieldable(
|
||||
delay_cancellation(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._query_devices_for_destination,
|
||||
results,
|
||||
cross_signing_keys,
|
||||
failures,
|
||||
destination,
|
||||
queries,
|
||||
timeout,
|
||||
)
|
||||
for destination, queries in remote_queries_not_in_cache.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
self._query_devices_for_destination,
|
||||
results,
|
||||
cross_signing_keys,
|
||||
failures,
|
||||
destination,
|
||||
queries,
|
||||
timeout,
|
||||
)
|
||||
for destination, queries in remote_queries_not_in_cache.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
).addErrback(unwrapFirstError)
|
||||
)
|
||||
|
||||
ret = {"device_keys": results, "failures": failures}
|
||||
@@ -379,7 +347,6 @@ class E2eKeysHandler:
|
||||
|
||||
return
|
||||
|
||||
@cancellable
|
||||
async def get_cross_signing_keys_from_cache(
|
||||
self, query: Iterable[str], from_user_id: Optional[str]
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
@@ -426,7 +393,6 @@ class E2eKeysHandler:
|
||||
}
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def query_local_devices(
|
||||
self, query: Mapping[str, Optional[List[str]]]
|
||||
) -> Dict[str, Dict[str, dict]]:
|
||||
|
||||
@@ -26,7 +26,6 @@ from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomResponse
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, Requester, StreamKeyType
|
||||
@@ -424,7 +423,6 @@ class PaginationHandler:
|
||||
pagin_config: PaginationConfig,
|
||||
as_client_event: bool = True,
|
||||
event_filter: Optional[Filter] = None,
|
||||
use_admin_priviledge: bool = False,
|
||||
) -> JsonDict:
|
||||
"""Get messages in a room.
|
||||
|
||||
@@ -434,16 +432,10 @@ class PaginationHandler:
|
||||
pagin_config: The pagination config rules to apply, if any.
|
||||
as_client_event: True to get events in client-server format.
|
||||
event_filter: Filter to apply to results or None
|
||||
use_admin_priviledge: if `True`, return all events, regardless
|
||||
of whether `user` has access to them. To be used **ONLY**
|
||||
from the admin API.
|
||||
|
||||
Returns:
|
||||
Pagination API results
|
||||
"""
|
||||
if use_admin_priviledge:
|
||||
await assert_user_is_admin(self.auth, requester)
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
if pagin_config.from_token:
|
||||
@@ -466,14 +458,12 @@ class PaginationHandler:
|
||||
room_token = from_token.room_key
|
||||
|
||||
async with self.pagination_lock.read(room_id):
|
||||
(membership, member_event_id) = (None, None)
|
||||
if not use_admin_priviledge:
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = await self.auth.check_user_in_room_or_world_readable(
|
||||
room_id, requester, allow_departed_users=True
|
||||
)
|
||||
|
||||
if pagin_config.direction == "b":
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
@@ -485,7 +475,7 @@ class PaginationHandler:
|
||||
room_id, room_token.stream
|
||||
)
|
||||
|
||||
if not use_admin_priviledge and membership == Membership.LEAVE:
|
||||
if membership == Membership.LEAVE:
|
||||
# If they have left the room then clamp the token to be before
|
||||
# they left the room, to save the effort of loading from the
|
||||
# database.
|
||||
@@ -538,13 +528,12 @@ class PaginationHandler:
|
||||
if event_filter:
|
||||
events = await event_filter.filter(events)
|
||||
|
||||
if not use_admin_priviledge:
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
events = await filter_events_for_client(
|
||||
self._storage_controllers,
|
||||
user_id,
|
||||
events,
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
|
||||
# if after the filter applied there are no more events
|
||||
# return immediately - but there might be more in next_token batch
|
||||
|
||||
@@ -453,6 +453,7 @@ class RoomSummaryHandler:
|
||||
"type": e.type,
|
||||
"state_key": e.state_key,
|
||||
"content": e.content,
|
||||
"room_id": e.room_id,
|
||||
"sender": e.sender,
|
||||
"origin_server_ts": e.origin_server_ts,
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import itertools
|
||||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
@@ -1414,10 +1413,10 @@ class SyncHandler:
|
||||
async def _generate_sync_entry_for_device_list(
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
newly_joined_rooms: AbstractSet[str],
|
||||
newly_joined_or_invited_or_knocked_users: AbstractSet[str],
|
||||
newly_left_rooms: AbstractSet[str],
|
||||
newly_left_users: AbstractSet[str],
|
||||
newly_joined_rooms: Set[str],
|
||||
newly_joined_or_invited_or_knocked_users: Set[str],
|
||||
newly_left_rooms: Set[str],
|
||||
newly_left_users: Set[str],
|
||||
) -> DeviceListUpdates:
|
||||
"""Generate the DeviceListUpdates section of sync
|
||||
|
||||
@@ -1435,7 +1434,8 @@ class SyncHandler:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
since_token = sync_result_builder.since_token
|
||||
|
||||
# Take a copy since these fields will be mutated later.
|
||||
# We're going to mutate these fields, so lets copy them rather than
|
||||
# assume they won't get used later.
|
||||
newly_joined_or_invited_or_knocked_users = set(
|
||||
newly_joined_or_invited_or_knocked_users
|
||||
)
|
||||
@@ -1635,8 +1635,8 @@ class SyncHandler:
|
||||
async def _generate_sync_entry_for_presence(
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
newly_joined_rooms: AbstractSet[str],
|
||||
newly_joined_or_invited_users: AbstractSet[str],
|
||||
newly_joined_rooms: Set[str],
|
||||
newly_joined_or_invited_users: Set[str],
|
||||
) -> None:
|
||||
"""Generates the presence portion of the sync response. Populates the
|
||||
`sync_result_builder` with the result.
|
||||
@@ -1694,7 +1694,7 @@ class SyncHandler:
|
||||
self,
|
||||
sync_result_builder: "SyncResultBuilder",
|
||||
account_data_by_room: Dict[str, Dict[str, JsonDict]],
|
||||
) -> Tuple[AbstractSet[str], AbstractSet[str], AbstractSet[str], AbstractSet[str]]:
|
||||
) -> Tuple[Set[str], Set[str], Set[str], Set[str]]:
|
||||
"""Generates the rooms portion of the sync response. Populates the
|
||||
`sync_result_builder` with the result.
|
||||
|
||||
@@ -2534,7 +2534,7 @@ class SyncResultBuilder:
|
||||
archived: List[ArchivedSyncResult] = attr.Factory(list)
|
||||
to_device: List[JsonDict] = attr.Factory(list)
|
||||
|
||||
def calculate_user_changes(self) -> Tuple[AbstractSet[str], AbstractSet[str]]:
|
||||
def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]:
|
||||
"""Work out which other users have joined or left rooms we are joined to.
|
||||
|
||||
This data only is only useful for an incremental sync.
|
||||
|
||||
@@ -28,8 +28,7 @@ from typing import (
|
||||
overload,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel, MissingError, PydanticValueError, ValidationError
|
||||
from pydantic.error_wrappers import ErrorWrapper
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from typing_extensions import Literal
|
||||
|
||||
from twisted.web.server import Request
|
||||
@@ -715,21 +714,7 @@ def parse_and_validate_json_object_from_request(
|
||||
try:
|
||||
instance = model_type.parse_obj(content)
|
||||
except ValidationError as e:
|
||||
# Choose a matrix error code. The catch-all is BAD_JSON, but we try to find a
|
||||
# more specific error if possible (which occasionally helps us to be spec-
|
||||
# compliant) This is a bit awkward because the spec's error codes aren't very
|
||||
# clear-cut: BAD_JSON arguably overlaps with MISSING_PARAM and INVALID_PARAM.
|
||||
errcode = Codes.BAD_JSON
|
||||
|
||||
raw_errors = e.raw_errors
|
||||
if len(raw_errors) == 1 and isinstance(raw_errors[0], ErrorWrapper):
|
||||
raw_error = raw_errors[0].exc
|
||||
if isinstance(raw_error, MissingError):
|
||||
errcode = Codes.MISSING_PARAM
|
||||
elif isinstance(raw_error, PydanticValueError):
|
||||
errcode = Codes.INVALID_PARAM
|
||||
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=errcode)
|
||||
raise SynapseError(HTTPStatus.BAD_REQUEST, str(e), errcode=Codes.BAD_JSON)
|
||||
|
||||
return instance
|
||||
|
||||
|
||||
@@ -34,6 +34,8 @@ from prometheus_client.core import Sample
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.util import caches
|
||||
|
||||
CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
|
||||
|
||||
|
||||
@@ -91,11 +93,6 @@ LEGACY_METRIC_NAMES = {
|
||||
"synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
|
||||
"synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
|
||||
"synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
|
||||
"synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
|
||||
"synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
|
||||
"synapse_admin_mau_current": "synapse_admin_mau:current",
|
||||
"synapse_admin_mau_max": "synapse_admin_mau:max",
|
||||
"synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
|
||||
}
|
||||
|
||||
|
||||
@@ -105,6 +102,11 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt
|
||||
by prometheus-client.
|
||||
"""
|
||||
|
||||
# Trigger the cache metrics to be rescraped, which updates the common
|
||||
# metrics but do not produce metrics themselves
|
||||
for collector in caches.collectors_by_name.values():
|
||||
collector.collect()
|
||||
|
||||
output = []
|
||||
|
||||
for metric in registry.collect():
|
||||
|
||||
@@ -61,11 +61,9 @@ from synapse.rest.admin.rooms import (
|
||||
MakeRoomAdminRestServlet,
|
||||
RoomEventContextServlet,
|
||||
RoomMembersRestServlet,
|
||||
RoomMessagesRestServlet,
|
||||
RoomRestServlet,
|
||||
RoomRestV2Servlet,
|
||||
RoomStateRestServlet,
|
||||
RoomTimestampToEventRestServlet,
|
||||
)
|
||||
from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
|
||||
from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet
|
||||
@@ -273,8 +271,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
DestinationResetConnectionRestServlet(hs).register(http_server)
|
||||
DestinationRestServlet(hs).register(http_server)
|
||||
ListDestinationsRestServlet(hs).register(http_server)
|
||||
RoomMessagesRestServlet(hs).register(http_server)
|
||||
RoomTimestampToEventRestServlet(hs).register(http_server)
|
||||
|
||||
# Some servlets only get registered for the main process.
|
||||
if hs.config.worker.worker_app is None:
|
||||
|
||||
@@ -35,7 +35,6 @@ from synapse.rest.admin._base import (
|
||||
)
|
||||
from synapse.storage.databases.main.room import RoomSortOrder
|
||||
from synapse.storage.state import StateFilter
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, RoomID, UserID, create_requester
|
||||
from synapse.util import json_decoder
|
||||
|
||||
@@ -859,106 +858,3 @@ class BlockRoomRestServlet(RestServlet):
|
||||
await self._store.unblock_room(room_id)
|
||||
|
||||
return HTTPStatus.OK, {"block": block}
|
||||
|
||||
|
||||
class RoomMessagesRestServlet(RestServlet):
|
||||
"""
|
||||
Get messages list of a room.
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/messages$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._hs = hs
|
||||
self._clock = hs.get_clock()
|
||||
self._pagination_handler = hs.get_pagination_handler()
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester)
|
||||
|
||||
pagination_config = await PaginationConfig.from_request(
|
||||
self._store, request, default_limit=10
|
||||
)
|
||||
# Twisted will have processed the args by now.
|
||||
assert request.args is not None
|
||||
as_client_event = b"raw" not in request.args
|
||||
filter_str = parse_string(request, "filter", encoding="utf-8")
|
||||
if filter_str:
|
||||
filter_json = urlparse.unquote(filter_str)
|
||||
event_filter: Optional[Filter] = Filter(
|
||||
self._hs, json_decoder.decode(filter_json)
|
||||
)
|
||||
if (
|
||||
event_filter
|
||||
and event_filter.filter_json.get("event_format", "client")
|
||||
== "federation"
|
||||
):
|
||||
as_client_event = False
|
||||
else:
|
||||
event_filter = None
|
||||
|
||||
msgs = await self._pagination_handler.get_messages(
|
||||
room_id=room_id,
|
||||
requester=requester,
|
||||
pagin_config=pagination_config,
|
||||
as_client_event=as_client_event,
|
||||
event_filter=event_filter,
|
||||
use_admin_priviledge=True,
|
||||
)
|
||||
|
||||
return HTTPStatus.OK, msgs
|
||||
|
||||
|
||||
class RoomTimestampToEventRestServlet(RestServlet):
|
||||
"""
|
||||
API endpoint to fetch the `event_id` of the closest event to the given
|
||||
timestamp (`ts` query parameter) in the given direction (`dir` query
|
||||
parameter).
|
||||
|
||||
Useful for cases like jump to date so you can start paginating messages from
|
||||
a given date in the archive.
|
||||
|
||||
`ts` is a timestamp in milliseconds where we will find the closest event in
|
||||
the given direction.
|
||||
|
||||
`dir` can be `f` or `b` to indicate forwards and backwards in time from the
|
||||
given timestamp.
|
||||
|
||||
GET /_synapse/admin/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
|
||||
{
|
||||
"event_id": ...
|
||||
}
|
||||
"""
|
||||
|
||||
PATTERNS = admin_patterns("/rooms/(?P<room_id>[^/]*)/timestamp_to_event$")
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._auth = hs.get_auth()
|
||||
self._store = hs.get_datastores().main
|
||||
self._timestamp_lookup_handler = hs.get_timestamp_lookup_handler()
|
||||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, room_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
requester = await self._auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self._auth, requester)
|
||||
|
||||
timestamp = parse_integer(request, "ts", required=True)
|
||||
direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"])
|
||||
|
||||
(
|
||||
event_id,
|
||||
origin_server_ts,
|
||||
) = await self._timestamp_lookup_handler.get_event_for_timestamp(
|
||||
requester, room_id, timestamp, direction
|
||||
)
|
||||
|
||||
return HTTPStatus.OK, {
|
||||
"event_id": event_id,
|
||||
"origin_server_ts": origin_server_ts,
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import random
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import StrictBool, StrictStr, constr
|
||||
@@ -41,11 +41,7 @@ from synapse.http.servlet import (
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.metrics import threepid_send_requests
|
||||
from synapse.push.mailer import Mailer
|
||||
from synapse.rest.client.models import (
|
||||
AuthenticationData,
|
||||
EmailRequestTokenBody,
|
||||
MsisdnRequestTokenBody,
|
||||
)
|
||||
from synapse.rest.client.models import AuthenticationData, EmailRequestTokenBody
|
||||
from synapse.rest.models import RequestBodyModel
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
@@ -404,16 +400,23 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
self.identity_handler = hs.get_identity_handler()
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
body = parse_and_validate_json_object_from_request(
|
||||
request, MsisdnRequestTokenBody
|
||||
body = parse_json_object_from_request(request)
|
||||
assert_params_in_dict(
|
||||
body, ["client_secret", "country", "phone_number", "send_attempt"]
|
||||
)
|
||||
msisdn = phone_number_to_msisdn(body.country, body.phone_number)
|
||||
client_secret = body["client_secret"]
|
||||
assert_valid_client_secret(client_secret)
|
||||
|
||||
country = body["country"]
|
||||
phone_number = body["phone_number"]
|
||||
send_attempt = body["send_attempt"]
|
||||
next_link = body.get("next_link") # Optional param
|
||||
|
||||
msisdn = phone_number_to_msisdn(country, phone_number)
|
||||
|
||||
if not await check_3pid_allowed(self.hs, "msisdn", msisdn):
|
||||
raise SynapseError(
|
||||
403,
|
||||
# TODO: is this error message accurate? Looks like we've only rejected
|
||||
# this phone number, not necessarily all phone numbers
|
||||
"Account phone numbers are not authorized on this server",
|
||||
Codes.THREEPID_DENIED,
|
||||
)
|
||||
@@ -422,9 +425,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
request, "msisdn", msisdn
|
||||
)
|
||||
|
||||
if body.next_link:
|
||||
if next_link:
|
||||
# Raise if the provided next_link value isn't valid
|
||||
assert_valid_next_link(self.hs, body.next_link)
|
||||
assert_valid_next_link(self.hs, next_link)
|
||||
|
||||
existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn)
|
||||
|
||||
@@ -451,15 +454,15 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
|
||||
ret = await self.identity_handler.requestMsisdnToken(
|
||||
self.hs.config.registration.account_threepid_delegate_msisdn,
|
||||
body.country,
|
||||
body.phone_number,
|
||||
body.client_secret,
|
||||
body.send_attempt,
|
||||
body.next_link,
|
||||
country,
|
||||
phone_number,
|
||||
client_secret,
|
||||
send_attempt,
|
||||
next_link,
|
||||
)
|
||||
|
||||
threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe(
|
||||
body.send_attempt
|
||||
send_attempt
|
||||
)
|
||||
|
||||
return 200, ret
|
||||
@@ -842,18 +845,17 @@ class AccountStatusRestServlet(RestServlet):
|
||||
self._auth = hs.get_auth()
|
||||
self._account_handler = hs.get_account_handler()
|
||||
|
||||
class PostBody(RequestBodyModel):
|
||||
# TODO: we could validate that each user id is an mxid here, and/or parse it
|
||||
# as a UserID
|
||||
user_ids: List[StrictStr]
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
await self._auth.get_user_by_req(request)
|
||||
|
||||
body = parse_and_validate_json_object_from_request(request, self.PostBody)
|
||||
body = parse_json_object_from_request(request)
|
||||
if "user_ids" not in body:
|
||||
raise SynapseError(
|
||||
400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM
|
||||
)
|
||||
|
||||
statuses, failures = await self._account_handler.get_account_statuses(
|
||||
body.user_ids,
|
||||
body["user_ids"],
|
||||
allow_remote=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ from synapse.http.servlet import (
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import log_kv, set_tag
|
||||
from synapse.rest.client._base import client_patterns, interactive_auth_handler
|
||||
from synapse.types import JsonDict, StreamToken
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
from ._base import client_patterns, interactive_auth_handler
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -156,7 +156,6 @@ class KeyQueryServlet(RestServlet):
|
||||
self.auth = hs.get_auth()
|
||||
self.e2e_keys_handler = hs.get_e2e_keys_handler()
|
||||
|
||||
@cancellable
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
user_id = requester.user.to_string()
|
||||
@@ -200,7 +199,6 @@ class KeyChangesServlet(RestServlet):
|
||||
self.device_handler = hs.get_device_handler()
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
@cancellable
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
|
||||
|
||||
@@ -25,8 +25,8 @@ class AuthenticationData(RequestBodyModel):
|
||||
|
||||
(The name "Authentication Data" is taken directly from the spec.)
|
||||
|
||||
Additional keys will be present, depending on the `type` field. Use
|
||||
`.dict(exclude_unset=True)` to access them.
|
||||
Additional keys will be present, depending on the `type` field. Use `.dict()` to
|
||||
access them.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
@@ -36,7 +36,7 @@ class AuthenticationData(RequestBodyModel):
|
||||
type: Optional[StrictStr] = None
|
||||
|
||||
|
||||
class ThreePidRequestTokenBody(RequestBodyModel):
|
||||
class EmailRequestTokenBody(RequestBodyModel):
|
||||
if TYPE_CHECKING:
|
||||
client_secret: StrictStr
|
||||
else:
|
||||
@@ -47,7 +47,7 @@ class ThreePidRequestTokenBody(RequestBodyModel):
|
||||
max_length=255,
|
||||
strict=True,
|
||||
)
|
||||
|
||||
email: StrictStr
|
||||
id_server: Optional[StrictStr]
|
||||
id_access_token: Optional[StrictStr]
|
||||
next_link: Optional[StrictStr]
|
||||
@@ -61,25 +61,9 @@ class ThreePidRequestTokenBody(RequestBodyModel):
|
||||
raise ValueError("id_access_token is required if an id_server is supplied.")
|
||||
return token
|
||||
|
||||
|
||||
class EmailRequestTokenBody(ThreePidRequestTokenBody):
|
||||
email: StrictStr
|
||||
|
||||
# Canonicalise the email address. The addresses are all stored canonicalised
|
||||
# in the database. This allows the user to reset his password without having to
|
||||
# know the exact spelling (eg. upper and lower case) of address in the database.
|
||||
# Without this, an email stored in the database as "foo@bar.com" would cause
|
||||
# user requests for "FOO@bar.com" to raise a Not Found error.
|
||||
_email_validator = validator("email", allow_reuse=True)(validate_email)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
ISO3116_1_Alpha_2 = StrictStr
|
||||
else:
|
||||
# Per spec: two-letter uppercase ISO-3166-1-alpha-2
|
||||
ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True)
|
||||
|
||||
|
||||
class MsisdnRequestTokenBody(ThreePidRequestTokenBody):
|
||||
country: ISO3116_1_Alpha_2
|
||||
phone_number: StrictStr
|
||||
|
||||
@@ -341,17 +341,7 @@ class HomeServer(metaclass=abc.ABCMeta):
|
||||
return domain_specific_string.domain == self.hostname
|
||||
|
||||
def is_mine_id(self, string: str) -> bool:
|
||||
"""Determines whether a user ID or room alias originates from this homeserver.
|
||||
|
||||
Returns:
|
||||
`True` if the hostname part of the user ID or room alias matches this
|
||||
homeserver.
|
||||
`False` otherwise, or if the user ID or room alias is malformed.
|
||||
"""
|
||||
localpart_hostname = string.split(":", 1)
|
||||
if len(localpart_hostname) < 2:
|
||||
return False
|
||||
return localpart_hostname[1] == self.hostname
|
||||
return string.split(":", 1)[1] == self.hostname
|
||||
|
||||
@cache_in_self
|
||||
def get_clock(self) -> Clock:
|
||||
|
||||
@@ -598,9 +598,9 @@ class EventsPersistenceStorageController:
|
||||
# room
|
||||
state_delta_for_room: Dict[str, DeltaState] = {}
|
||||
|
||||
# Set of remote users which were in rooms the server has left or who may
|
||||
# have left rooms the server is in. We should check if we still share any
|
||||
# rooms and if not we mark their device lists as stale.
|
||||
# Set of remote users which were in rooms the server has left. We
|
||||
# should check if we still share any rooms and if not we mark their
|
||||
# device lists as stale.
|
||||
potentially_left_users: Set[str] = set()
|
||||
|
||||
if not backfilled:
|
||||
@@ -725,20 +725,6 @@ class EventsPersistenceStorageController:
|
||||
current_state = {}
|
||||
delta.no_longer_in_room = True
|
||||
|
||||
# Add all remote users that might have left rooms.
|
||||
potentially_left_users.update(
|
||||
user_id
|
||||
for event_type, user_id in delta.to_delete
|
||||
if event_type == EventTypes.Member
|
||||
and not self.is_mine_id(user_id)
|
||||
)
|
||||
potentially_left_users.update(
|
||||
user_id
|
||||
for event_type, user_id in delta.to_insert.keys()
|
||||
if event_type == EventTypes.Member
|
||||
and not self.is_mine_id(user_id)
|
||||
)
|
||||
|
||||
state_delta_for_room[room_id] = delta
|
||||
|
||||
await self.persist_events_store._persist_events_and_state_updates(
|
||||
|
||||
@@ -36,7 +36,6 @@ from synapse.storage.util.partial_state_events_tracker import (
|
||||
PartialStateEventsTracker,
|
||||
)
|
||||
from synapse.types import MutableStateMap, StateMap
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -230,7 +229,6 @@ class StateStorageController:
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@cancellable
|
||||
async def get_state_ids_for_events(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
@@ -352,7 +350,6 @@ class StateStorageController:
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@cancellable
|
||||
async def get_state_group_for_events(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
@@ -401,7 +398,6 @@ class StateStorageController:
|
||||
event_id, room_id, prev_group, delta_ids, current_state_ids
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_current_state_ids(
|
||||
self,
|
||||
room_id: str,
|
||||
|
||||
@@ -533,14 +533,15 @@ class DatabasePool:
|
||||
if isinstance(self.engine, Sqlite3Engine):
|
||||
self._unsafe_to_upsert_tables.add("user_directory_search")
|
||||
|
||||
# Check ASAP (and then later, every 1s) to see if we have finished
|
||||
# background updates of tables that aren't safe to update.
|
||||
self._clock.call_later(
|
||||
0.0,
|
||||
run_as_background_process,
|
||||
"upsert_safety_check",
|
||||
self._check_safe_to_upsert,
|
||||
)
|
||||
if self.engine.can_native_upsert:
|
||||
# Check ASAP (and then later, every 1s) to see if we have finished
|
||||
# background updates of tables that aren't safe to update.
|
||||
self._clock.call_later(
|
||||
0.0,
|
||||
run_as_background_process,
|
||||
"upsert_safety_check",
|
||||
self._check_safe_to_upsert,
|
||||
)
|
||||
|
||||
def name(self) -> str:
|
||||
"Return the name of this database"
|
||||
@@ -1159,8 +1160,11 @@ class DatabasePool:
|
||||
attempts = 0
|
||||
while True:
|
||||
try:
|
||||
# We can autocommit if it is safe to upsert
|
||||
autocommit = table not in self._unsafe_to_upsert_tables
|
||||
# We can autocommit if we are going to use native upserts
|
||||
autocommit = (
|
||||
self.engine.can_native_upsert
|
||||
and table not in self._unsafe_to_upsert_tables
|
||||
)
|
||||
|
||||
return await self.runInteraction(
|
||||
desc,
|
||||
@@ -1195,7 +1199,7 @@ class DatabasePool:
|
||||
) -> bool:
|
||||
"""
|
||||
Pick the UPSERT method which works best on the platform. Either the
|
||||
native one (Pg9.5+, SQLite >= 3.24), or fall back to an emulated method.
|
||||
native one (Pg9.5+, recent SQLites), or fall back to an emulated method.
|
||||
|
||||
Args:
|
||||
txn: The transaction to use.
|
||||
@@ -1203,15 +1207,14 @@ class DatabasePool:
|
||||
keyvalues: The unique key tables and their new values
|
||||
values: The nonunique columns and their new values
|
||||
insertion_values: additional key/values to use only when inserting
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
lock: True to lock the table when doing the upsert.
|
||||
Returns:
|
||||
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||
not empty then this always returns True)
|
||||
"""
|
||||
insertion_values = insertion_values or {}
|
||||
|
||||
if table not in self._unsafe_to_upsert_tables:
|
||||
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
|
||||
return self.simple_upsert_txn_native_upsert(
|
||||
txn, table, keyvalues, values, insertion_values=insertion_values
|
||||
)
|
||||
@@ -1362,12 +1365,14 @@ class DatabasePool:
|
||||
value_names: The value column names
|
||||
value_values: A list of each row's value column values.
|
||||
Ignored if value_names is empty.
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
lock: True to lock the table when doing the upsert. Unused if the database engine
|
||||
supports native upserts.
|
||||
"""
|
||||
|
||||
# We can autocommit if it safe to upsert
|
||||
autocommit = table not in self._unsafe_to_upsert_tables
|
||||
# We can autocommit if we are going to use native upserts
|
||||
autocommit = (
|
||||
self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables
|
||||
)
|
||||
|
||||
await self.runInteraction(
|
||||
desc,
|
||||
@@ -1401,10 +1406,10 @@ class DatabasePool:
|
||||
value_names: The value column names
|
||||
value_values: A list of each row's value column values.
|
||||
Ignored if value_names is empty.
|
||||
lock: True to lock the table when doing the upsert. Unused when performing
|
||||
a native upsert.
|
||||
lock: True to lock the table when doing the upsert. Unused if the database engine
|
||||
supports native upserts.
|
||||
"""
|
||||
if table not in self._unsafe_to_upsert_tables:
|
||||
if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables:
|
||||
return self.simple_upsert_many_txn_native_upsert(
|
||||
txn, table, key_names, key_values, value_names, value_values
|
||||
)
|
||||
|
||||
@@ -53,7 +53,6 @@ from synapse.util import json_decoder, json_encoder
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.stringutils import shortstr
|
||||
|
||||
@@ -669,7 +668,6 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
|
||||
...
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def get_user_devices_from_cache(
|
||||
self, query_list: List[Tuple[str, Optional[str]]]
|
||||
) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]:
|
||||
@@ -745,7 +743,6 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
|
||||
|
||||
return self._device_list_stream_cache.get_all_entities_changed(from_key)
|
||||
|
||||
@cancellable
|
||||
async def get_users_whose_devices_changed(
|
||||
self,
|
||||
from_key: int,
|
||||
@@ -1224,7 +1221,6 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore):
|
||||
desc="get_min_device_lists_changes_in_room",
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_device_list_changes_in_rooms(
|
||||
self, room_ids: Collection[str], from_id: int
|
||||
) -> Optional[Set[str]]:
|
||||
|
||||
@@ -50,7 +50,6 @@ from synapse.storage.util.id_generators import StreamIdGenerator
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -136,7 +135,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
return now_stream_id, []
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def get_e2e_device_keys_for_cs_api(
|
||||
self, query_list: List[Tuple[str, Optional[str]]]
|
||||
) -> Dict[str, Dict[str, JsonDict]]:
|
||||
@@ -199,7 +197,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
...
|
||||
|
||||
@trace
|
||||
@cancellable
|
||||
async def get_e2e_device_keys_and_signatures(
|
||||
self,
|
||||
query_list: Collection[Tuple[str, Optional[str]]],
|
||||
@@ -890,7 +887,6 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
|
||||
return keys
|
||||
|
||||
@cancellable
|
||||
async def get_e2e_cross_signing_keys_bulk(
|
||||
self, user_ids: List[str], from_user_id: Optional[str] = None
|
||||
) -> Dict[str, Optional[Dict[str, JsonDict]]]:
|
||||
@@ -906,6 +902,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
|
||||
keys were not found, either their user ID will not be in the dict,
|
||||
or their user ID will map to None.
|
||||
"""
|
||||
|
||||
result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids)
|
||||
|
||||
if from_user_id:
|
||||
|
||||
@@ -48,7 +48,6 @@ from synapse.types import JsonDict
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -977,7 +976,6 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
||||
|
||||
return int(min_depth) if min_depth is not None else None
|
||||
|
||||
@cancellable
|
||||
async def get_forward_extremities_for_room_at_stream_ordering(
|
||||
self, room_id: str, stream_ordering: int
|
||||
) -> List[str]:
|
||||
@@ -1608,7 +1606,7 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
||||
logger.info("Invalid prev_events for %s", event_id)
|
||||
continue
|
||||
|
||||
if room_version.event_format == EventFormatVersions.ROOM_V1_V2:
|
||||
if room_version.event_format == EventFormatVersions.V1:
|
||||
for prev_event_tuple in prev_events:
|
||||
if (
|
||||
not isinstance(prev_event_tuple, list)
|
||||
|
||||
@@ -81,7 +81,6 @@ from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import ObservableDeferred, delay_cancellation
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.caches.lrucache import AsyncLruCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -340,7 +339,6 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
) -> Optional[EventBase]:
|
||||
...
|
||||
|
||||
@cancellable
|
||||
async def get_event(
|
||||
self,
|
||||
event_id: str,
|
||||
@@ -435,7 +433,6 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
@cancellable
|
||||
async def get_events_as_list(
|
||||
self,
|
||||
event_ids: Collection[str],
|
||||
@@ -587,7 +584,6 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
|
||||
return events
|
||||
|
||||
@cancellable
|
||||
async def _get_events_from_cache_or_db(
|
||||
self, event_ids: Iterable[str], allow_rejected: bool = False
|
||||
) -> Dict[str, EventCacheEntry]:
|
||||
@@ -1160,7 +1156,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
if format_version is None:
|
||||
# This means that we stored the event before we had the concept
|
||||
# of a event format version, so it must be a V1 event.
|
||||
format_version = EventFormatVersions.ROOM_V1_V2
|
||||
format_version = EventFormatVersions.V1
|
||||
|
||||
room_version_id = row.room_version_id
|
||||
|
||||
@@ -1190,10 +1186,10 @@ class EventsWorkerStore(SQLBaseStore):
|
||||
#
|
||||
# So, the following approximations should be adequate.
|
||||
|
||||
if format_version == EventFormatVersions.ROOM_V1_V2:
|
||||
if format_version == EventFormatVersions.V1:
|
||||
# if it's event format v1 then it must be room v1 or v2
|
||||
room_version = RoomVersions.V1
|
||||
elif format_version == EventFormatVersions.ROOM_V3:
|
||||
elif format_version == EventFormatVersions.V2:
|
||||
# if it's event format v2 then it must be room v3
|
||||
room_version = RoomVersions.V3
|
||||
else:
|
||||
|
||||
@@ -129,48 +129,91 @@ class LockStore(SQLBaseStore):
|
||||
now = self._clock.time_msec()
|
||||
token = random_string(6)
|
||||
|
||||
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
|
||||
# We take out the lock if either a) there is no row for the lock
|
||||
# already, b) the existing row has timed out, or c) the row is
|
||||
# for this instance (which means the process got killed and
|
||||
# restarted)
|
||||
sql = """
|
||||
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE
|
||||
SET
|
||||
token = EXCLUDED.token,
|
||||
instance_name = EXCLUDED.instance_name,
|
||||
last_renewed_ts = EXCLUDED.last_renewed_ts
|
||||
WHERE
|
||||
worker_locks.last_renewed_ts < ?
|
||||
OR worker_locks.instance_name = EXCLUDED.instance_name
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
),
|
||||
if self.db_pool.engine.can_native_upsert:
|
||||
|
||||
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
|
||||
# We take out the lock if either a) there is no row for the lock
|
||||
# already, b) the existing row has timed out, or c) the row is
|
||||
# for this instance (which means the process got killed and
|
||||
# restarted)
|
||||
sql = """
|
||||
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE
|
||||
SET
|
||||
token = EXCLUDED.token,
|
||||
instance_name = EXCLUDED.instance_name,
|
||||
last_renewed_ts = EXCLUDED.last_renewed_ts
|
||||
WHERE
|
||||
worker_locks.last_renewed_ts < ?
|
||||
OR worker_locks.instance_name = EXCLUDED.instance_name
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
),
|
||||
)
|
||||
|
||||
# We only acquired the lock if we inserted or updated the table.
|
||||
return bool(txn.rowcount)
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock",
|
||||
_try_acquire_lock_txn,
|
||||
# We can autocommit here as we're executing a single query, this
|
||||
# will avoid serialization errors.
|
||||
db_autocommit=True,
|
||||
)
|
||||
if not did_lock:
|
||||
return None
|
||||
|
||||
else:
|
||||
# If we're on an old SQLite we emulate the above logic by first
|
||||
# clearing out any existing stale locks and then upserting.
|
||||
|
||||
def _try_acquire_lock_emulated_txn(txn: LoggingTransaction) -> bool:
|
||||
sql = """
|
||||
DELETE FROM worker_locks
|
||||
WHERE
|
||||
lock_name = ?
|
||||
AND lock_key = ?
|
||||
AND (last_renewed_ts < ? OR instance_name = ?)
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(lock_name, lock_key, now - _LOCK_TIMEOUT_MS, self._instance_name),
|
||||
)
|
||||
|
||||
inserted = self.db_pool.simple_upsert_txn_emulated(
|
||||
txn,
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
},
|
||||
values={},
|
||||
insertion_values={
|
||||
"token": token,
|
||||
"last_renewed_ts": self._clock.time_msec(),
|
||||
"instance_name": self._instance_name,
|
||||
},
|
||||
)
|
||||
|
||||
return inserted
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock_emulated", _try_acquire_lock_emulated_txn
|
||||
)
|
||||
|
||||
# We only acquired the lock if we inserted or updated the table.
|
||||
return bool(txn.rowcount)
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock",
|
||||
_try_acquire_lock_txn,
|
||||
# We can autocommit here as we're executing a single query, this
|
||||
# will avoid serialization errors.
|
||||
db_autocommit=True,
|
||||
)
|
||||
if not did_lock:
|
||||
return None
|
||||
if not did_lock:
|
||||
return None
|
||||
|
||||
lock = Lock(
|
||||
self._reactor,
|
||||
|
||||
@@ -812,7 +812,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
# FIXME: This shouldn't invalidate the whole cache
|
||||
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
|
||||
|
||||
self.db_pool.simple_upsert_txn(
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="receipts_graph",
|
||||
keyvalues={
|
||||
@@ -820,13 +820,17 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
||||
"receipt_type": receipt_type,
|
||||
"user_id": user_id,
|
||||
},
|
||||
)
|
||||
self.db_pool.simple_insert_txn(
|
||||
txn,
|
||||
table="receipts_graph",
|
||||
values={
|
||||
"room_id": room_id,
|
||||
"receipt_type": receipt_type,
|
||||
"user_id": user_id,
|
||||
"event_ids": json_encoder.encode(event_ids),
|
||||
"data": json_encoder.encode(data),
|
||||
},
|
||||
# receipts_graph has a unique constraint on
|
||||
# (user_id, room_id, receipt_type), so no need to lock
|
||||
lock=False,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -175,7 +175,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
"is_guest",
|
||||
"admin",
|
||||
"consent_version",
|
||||
"consent_ts",
|
||||
"consent_server_notice_sent",
|
||||
"appservice_id",
|
||||
"creation_ts",
|
||||
@@ -2228,10 +2227,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
txn,
|
||||
table="users",
|
||||
keyvalues={"name": user_id},
|
||||
updatevalues={
|
||||
"consent_version": consent_version,
|
||||
"consent_ts": self._clock.time_msec(),
|
||||
},
|
||||
updatevalues={"consent_version": consent_version},
|
||||
)
|
||||
self._invalidate_cache_and_stream(txn, self.get_user_by_id, (user_id,))
|
||||
|
||||
|
||||
@@ -55,7 +55,6 @@ from synapse.types import JsonDict, PersistedEventPosition, StateMap, get_domain
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches import intern_string
|
||||
from synapse.util.caches.descriptors import _CacheContext, cached, cachedList
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
@@ -192,15 +191,8 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
(aka. with the lowest depth). This is done to match the sort in
|
||||
`get_current_hosts_in_room()` and so we can re-use the cache but it's
|
||||
not horrible to have here either.
|
||||
|
||||
Uses `m.room.member`s in the room state at the current forward extremities to
|
||||
determine which users are in the room.
|
||||
|
||||
Will return inaccurate results for rooms with partial state, since the state for
|
||||
the forward extremities of those rooms will exclude most members. We may also
|
||||
calculate room state incorrectly for such rooms and believe that a member is or
|
||||
is not in the room when the opposite is true.
|
||||
"""
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_users_in_room", self.get_users_in_room_txn, room_id
|
||||
)
|
||||
@@ -778,7 +770,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
_get_users_server_still_shares_room_with_txn,
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def get_rooms_for_user(
|
||||
self, user_id: str, on_invalidate: Optional[Callable[[], None]] = None
|
||||
) -> FrozenSet[str]:
|
||||
@@ -1029,14 +1020,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
longest is good because they're most likely to have anything we ask
|
||||
about.
|
||||
|
||||
Uses `m.room.member`s in the room state at the current forward extremities to
|
||||
determine which hosts are in the room.
|
||||
|
||||
Will return inaccurate results for rooms with partial state, since the state for
|
||||
the forward extremities of those rooms will exclude most members. We may also
|
||||
calculate room state incorrectly for such rooms and believe that a host is or
|
||||
is not in the room when the opposite is true.
|
||||
|
||||
Returns:
|
||||
Returns a list of servers sorted by longest in the room first. (aka.
|
||||
sorted by join with the lowest depth first).
|
||||
@@ -1059,8 +1042,6 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
# We use a `Set` just for fast lookups
|
||||
domain_set: Set[str] = set()
|
||||
for u in users:
|
||||
if ":" not in u:
|
||||
continue
|
||||
domain = get_domain_from_id(u)
|
||||
if domain not in domain_set:
|
||||
domain_set.add(domain)
|
||||
@@ -1094,8 +1075,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
|
||||
ORDER BY min(e.depth) ASC;
|
||||
"""
|
||||
txn.execute(sql, (room_id,))
|
||||
# `server_domain` will be `NULL` for malformed MXIDs with no colons.
|
||||
return [d for d, in txn if d is not None]
|
||||
return [d for d, in txn]
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_current_hosts_in_room", get_current_hosts_in_room_txn
|
||||
|
||||
@@ -23,7 +23,6 @@ from synapse.api.errors import NotFoundError, UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
@@ -37,7 +36,6 @@ from synapse.storage.state import StateFilter
|
||||
from synapse.types import JsonDict, JsonMapping, StateMap
|
||||
from synapse.util.caches import intern_string
|
||||
from synapse.util.caches.descriptors import cached, cachedList
|
||||
from synapse.util.cancellation import cancellable
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -144,7 +142,6 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return room_version
|
||||
|
||||
@trace
|
||||
async def get_metadata_for_events(
|
||||
self, event_ids: Collection[str]
|
||||
) -> Dict[str, EventMetadata]:
|
||||
@@ -284,7 +281,6 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
)
|
||||
|
||||
# FIXME: how should this be cached?
|
||||
@cancellable
|
||||
async def get_partial_filtered_current_state_ids(
|
||||
self, room_id: str, state_filter: Optional[StateFilter] = None
|
||||
) -> StateMap[str]:
|
||||
|
||||
@@ -446,41 +446,59 @@ class StatsStore(StateDeltasStore):
|
||||
absolutes: Absolute (set) fields
|
||||
additive_relatives: Fields that will be added onto if existing row present.
|
||||
"""
|
||||
absolute_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s" % {"field": field}
|
||||
for field in absolutes.keys()
|
||||
]
|
||||
if self.database_engine.can_native_upsert:
|
||||
absolute_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s" % {"field": field}
|
||||
for field in absolutes.keys()
|
||||
]
|
||||
|
||||
relative_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
|
||||
% {"table": table, "field": field}
|
||||
for field in additive_relatives.keys()
|
||||
]
|
||||
relative_updates = [
|
||||
"%(field)s = EXCLUDED.%(field)s + COALESCE(%(table)s.%(field)s, 0)"
|
||||
% {"table": table, "field": field}
|
||||
for field in additive_relatives.keys()
|
||||
]
|
||||
|
||||
insert_cols = []
|
||||
qargs = []
|
||||
insert_cols = []
|
||||
qargs = []
|
||||
|
||||
for (key, val) in chain(
|
||||
keyvalues.items(), absolutes.items(), additive_relatives.items()
|
||||
):
|
||||
insert_cols.append(key)
|
||||
qargs.append(val)
|
||||
for (key, val) in chain(
|
||||
keyvalues.items(), absolutes.items(), additive_relatives.items()
|
||||
):
|
||||
insert_cols.append(key)
|
||||
qargs.append(val)
|
||||
|
||||
sql = """
|
||||
INSERT INTO %(table)s (%(insert_cols_cs)s)
|
||||
VALUES (%(insert_vals_qs)s)
|
||||
ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
|
||||
""" % {
|
||||
"table": table,
|
||||
"insert_cols_cs": ", ".join(insert_cols),
|
||||
"insert_vals_qs": ", ".join(
|
||||
["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
|
||||
),
|
||||
"key_columns": ", ".join(keyvalues),
|
||||
"updates": ", ".join(chain(absolute_updates, relative_updates)),
|
||||
}
|
||||
sql = """
|
||||
INSERT INTO %(table)s (%(insert_cols_cs)s)
|
||||
VALUES (%(insert_vals_qs)s)
|
||||
ON CONFLICT (%(key_columns)s) DO UPDATE SET %(updates)s
|
||||
""" % {
|
||||
"table": table,
|
||||
"insert_cols_cs": ", ".join(insert_cols),
|
||||
"insert_vals_qs": ", ".join(
|
||||
["?"] * (len(keyvalues) + len(absolutes) + len(additive_relatives))
|
||||
),
|
||||
"key_columns": ", ".join(keyvalues),
|
||||
"updates": ", ".join(chain(absolute_updates, relative_updates)),
|
||||
}
|
||||
|
||||
txn.execute(sql, qargs)
|
||||
txn.execute(sql, qargs)
|
||||
else:
|
||||
self.database_engine.lock_table(txn, table)
|
||||
retcols = list(chain(absolutes.keys(), additive_relatives.keys()))
|
||||
current_row = self.db_pool.simple_select_one_txn(
|
||||
txn, table, keyvalues, retcols, allow_none=True
|
||||
)
|
||||
if current_row is None:
|
||||
merged_dict = {**keyvalues, **absolutes, **additive_relatives}
|
||||
self.db_pool.simple_insert_txn(txn, table, merged_dict)
|
||||
else:
|
||||
for (key, val) in additive_relatives.items():
|
||||
if current_row[key] is None:
|
||||
current_row[key] = val
|
||||
else:
|
||||
current_row[key] += val
|
||||
current_row.update(absolutes)
|
||||
self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row)
|
||||
|
||||
async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None:
|
||||
"""Calculate and insert an entry into room_stats_current.
|
||||
|
||||
@@ -72,7 +72,6 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator
|
||||
from synapse.types import PersistedEventPosition, RoomStreamToken
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -598,7 +597,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||
|
||||
return ret, key
|
||||
|
||||
@cancellable
|
||||
async def get_membership_changes_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
|
||||
@@ -221,15 +221,25 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
retry_interval: how long until next retry in ms
|
||||
"""
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_native,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
retry_interval,
|
||||
db_autocommit=True, # Safe as it's a single upsert
|
||||
)
|
||||
if self.database_engine.can_native_upsert:
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_native,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
retry_interval,
|
||||
db_autocommit=True, # Safe as its a single upsert
|
||||
)
|
||||
else:
|
||||
await self.db_pool.runInteraction(
|
||||
"set_destination_retry_timings",
|
||||
self._set_destination_retry_timings_emulated,
|
||||
destination,
|
||||
failure_ts,
|
||||
retry_last_ts,
|
||||
retry_interval,
|
||||
)
|
||||
|
||||
def _set_destination_retry_timings_native(
|
||||
self,
|
||||
@@ -239,6 +249,8 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
|
||||
retry_last_ts: int,
|
||||
retry_interval: int,
|
||||
) -> None:
|
||||
assert self.database_engine.can_native_upsert
|
||||
|
||||
# Upsert retry time interval if retry_interval is zero (i.e. we're
|
||||
# resetting it) or greater than the existing retry interval.
|
||||
#
|
||||
|
||||
@@ -31,7 +31,6 @@ from synapse.storage.util.sequence import build_sequence_generator
|
||||
from synapse.types import MutableStateMap, StateKey, StateMap
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.dictionary_cache import DictionaryCache
|
||||
from synapse.util.cancellation import cancellable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -157,7 +156,6 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||
"get_state_group_delta", _get_state_group_delta_txn
|
||||
)
|
||||
|
||||
@cancellable
|
||||
async def _get_state_groups_from_groups(
|
||||
self, groups: List[int], state_filter: StateFilter
|
||||
) -> Dict[int, StateMap[str]]:
|
||||
@@ -237,7 +235,6 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||
|
||||
return state_filter.filter_state(state_dict_ids), not missing_types
|
||||
|
||||
@cancellable
|
||||
async def _get_state_for_groups(
|
||||
self, groups: Iterable[int], state_filter: Optional[StateFilter] = None
|
||||
) -> Dict[int, MutableStateMap[str]]:
|
||||
|
||||
@@ -43,6 +43,14 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
|
||||
def single_threaded(self) -> bool:
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def can_native_upsert(self) -> bool:
|
||||
"""
|
||||
Do we support native UPSERTs?
|
||||
"""
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def supports_using_any_list(self) -> bool:
|
||||
|
||||
@@ -158,6 +158,13 @@ class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]):
|
||||
cursor.close()
|
||||
db_conn.commit()
|
||||
|
||||
@property
|
||||
def can_native_upsert(self) -> bool:
|
||||
"""
|
||||
Can we use native UPSERTs?
|
||||
"""
|
||||
return True
|
||||
|
||||
@property
|
||||
def supports_using_any_list(self) -> bool:
|
||||
"""Do we support using `a = ANY(?)` and passing a list"""
|
||||
|
||||
@@ -48,6 +48,14 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
|
||||
def single_threaded(self) -> bool:
|
||||
return True
|
||||
|
||||
@property
|
||||
def can_native_upsert(self) -> bool:
|
||||
"""
|
||||
Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
|
||||
more work we haven't done yet to tell what was inserted vs updated.
|
||||
"""
|
||||
return sqlite3.sqlite_version_info >= (3, 24, 0)
|
||||
|
||||
@property
|
||||
def supports_using_any_list(self) -> bool:
|
||||
"""Do we support using `a = ANY(?)` and passing a list"""
|
||||
@@ -62,11 +70,12 @@ class Sqlite3Engine(BaseDatabaseEngine[sqlite3.Connection]):
|
||||
self, db_conn: sqlite3.Connection, allow_outdated_version: bool = False
|
||||
) -> None:
|
||||
if not allow_outdated_version:
|
||||
version = sqlite3.sqlite_version_info
|
||||
# Synapse is untested against older SQLite versions, and we don't want
|
||||
# to let users upgrade to a version of Synapse with broken support for their
|
||||
# sqlite version, because it risks leaving them with a half-upgraded db.
|
||||
if sqlite3.sqlite_version_info < (3, 27, 0):
|
||||
raise RuntimeError("Synapse requires sqlite 3.27 or above.")
|
||||
if version < (3, 22, 0):
|
||||
raise RuntimeError("Synapse requires sqlite 3.22 or above.")
|
||||
|
||||
def check_new_database(self, txn: Cursor) -> None:
|
||||
"""Gets called when setting up a brand new database. This allows us to
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
/* Copyright 2022 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
ALTER TABLE users ADD consent_ts bigint;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user