Compare commits
2 Commits
release-v1
...
erikj/fede
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0c2ab9c90a | ||
|
|
a77aab60ae |
68
.github/workflows/docs.yaml
vendored
68
.github/workflows/docs.yaml
vendored
@@ -7,8 +7,6 @@ on:
|
||||
- develop
|
||||
# For documentation specific to a release
|
||||
- 'release-v*'
|
||||
# stable docs
|
||||
- master
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
@@ -25,42 +23,42 @@ jobs:
|
||||
mdbook-version: '0.4.9'
|
||||
|
||||
- name: Build the documentation
|
||||
# mdbook will only create an index.html if we're including docs/README.md in SUMMARY.md.
|
||||
# However, we're using docs/README.md for other purposes and need to pick a new page
|
||||
# as the default. Let's opt for the welcome page instead.
|
||||
run: |
|
||||
mdbook build
|
||||
cp book/welcome_and_overview.html book/index.html
|
||||
run: mdbook build
|
||||
|
||||
# Figure out the target directory.
|
||||
#
|
||||
# The target directory depends on the name of the branch
|
||||
#
|
||||
- name: Get the target directory name
|
||||
id: vars
|
||||
run: |
|
||||
# first strip the 'refs/heads/' prefix with some shell foo
|
||||
branch="${GITHUB_REF#refs/heads/}"
|
||||
|
||||
case $branch in
|
||||
release-*)
|
||||
# strip 'release-' from the name for release branches.
|
||||
branch="${branch#release-}"
|
||||
;;
|
||||
master)
|
||||
# deploy to "latest" for the master branch.
|
||||
branch="latest"
|
||||
;;
|
||||
esac
|
||||
|
||||
# finally, set the 'branch-version' var.
|
||||
echo "::set-output name=branch-version::$branch"
|
||||
|
||||
# Deploy to the target directory.
|
||||
- name: Deploy to gh pages
|
||||
# Deploy to the latest documentation directories
|
||||
- name: Deploy latest documentation
|
||||
uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
keep_files: true
|
||||
publish_dir: ./book
|
||||
destination_dir: ./${{ steps.vars.outputs.branch-version }}
|
||||
destination_dir: ./develop
|
||||
|
||||
- name: Get the current Synapse version
|
||||
id: vars
|
||||
# The $GITHUB_REF value for a branch looks like `refs/heads/release-v1.2`. We do some
|
||||
# shell magic to remove the "refs/heads/release-v" bit from this, to end up with "1.2",
|
||||
# our major/minor version number, and set this to a var called `branch-version`.
|
||||
#
|
||||
# We then use some python to get Synapse's full version string, which may look
|
||||
# like "1.2.3rc4". We set this to a var called `synapse-version`. We use this
|
||||
# to determine if this release is still an RC, and if so block deployment.
|
||||
run: |
|
||||
echo ::set-output name=branch-version::${GITHUB_REF#refs/heads/release-v}
|
||||
echo ::set-output name=synapse-version::`python3 -c 'import synapse; print(synapse.__version__)'`
|
||||
|
||||
# Deploy to the version-specific directory
|
||||
- name: Deploy release-specific documentation
|
||||
# We only carry out this step if we're running on a release branch,
|
||||
# and the current Synapse version does not have "rc" in the name.
|
||||
#
|
||||
# The result is that only full releases are deployed, but can be
|
||||
# updated if the release branch gets retroactive fixes.
|
||||
if: ${{ startsWith( github.ref, 'refs/heads/release-v' ) && !contains( steps.vars.outputs.synapse-version, 'rc') }}
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
keep_files: true
|
||||
publish_dir: ./book
|
||||
# The resulting documentation will end up in a directory named `vX.Y`.
|
||||
destination_dir: ./v${{ steps.vars.outputs.branch-version }}
|
||||
|
||||
90
.github/workflows/release-artifacts.yml
vendored
90
.github/workflows/release-artifacts.yml
vendored
@@ -1,90 +0,0 @@
|
||||
# GitHub actions workflow which builds the release artifacts.
|
||||
|
||||
name: Build release artifacts
|
||||
|
||||
on:
|
||||
push:
|
||||
# we build on develop and release branches to (hopefully) get early warning
|
||||
# of things breaking
|
||||
branches: ["develop", "release-*"]
|
||||
|
||||
# we also rebuild on tags, so that we can be sure of picking the artifacts
|
||||
# from the right tag.
|
||||
tags: ["v*"]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
# first get the list of distros to build for.
|
||||
get-distros:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- id: set-distros
|
||||
run: |
|
||||
echo "::set-output name=distros::$(scripts-dev/build_debian_packages --show-dists-json)"
|
||||
# map the step outputs to job outputs
|
||||
outputs:
|
||||
distros: ${{ steps.set-distros.outputs.distros }}
|
||||
|
||||
# now build the packages with a matrix build.
|
||||
build-debs:
|
||||
needs: get-distros
|
||||
name: "Build .deb packages"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
path: src
|
||||
- uses: actions/setup-python@v2
|
||||
- run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}"
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: debs
|
||||
path: debs/*
|
||||
|
||||
build-sdist:
|
||||
name: "Build pypi distribution files"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
- run: pip install wheel
|
||||
- run: |
|
||||
python setup.py sdist bdist_wheel
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: python-dist
|
||||
path: dist/*
|
||||
|
||||
# if it's a tag, create a release and attach the artifacts to it
|
||||
attach-assets:
|
||||
name: "Attach assets to release"
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
needs:
|
||||
- build-debs
|
||||
- build-sdist
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all workflow run artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
- name: Build a tarball for the debs
|
||||
run: tar -cvJf debs.tar.xz debs
|
||||
- name: Attach to release
|
||||
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
files: |
|
||||
python-dist/*
|
||||
debs.tar.xz
|
||||
# if it's not already published, keep the release as a draft.
|
||||
draft: true
|
||||
# mark it as a prerelease if the tag contains 'rc'.
|
||||
prerelease: ${{ contains(github.ref, 'rc') }}
|
||||
129
CHANGES.md
129
CHANGES.md
@@ -1,120 +1,10 @@
|
||||
Synapse 1.38.1 (2021-07-22)
|
||||
===========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Always include `device_one_time_keys_count` key in `/sync` response to work around a bug in Element Android that broke encryption for new devices. ([\#10457](https://github.com/matrix-org/synapse/issues/10457))
|
||||
|
||||
|
||||
Synapse 1.38.0 (2021-07-13)
|
||||
===========================
|
||||
|
||||
This release includes a database schema update which could result in elevated disk usage. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1380) for more information.
|
||||
|
||||
No significant changes since 1.38.0rc3.
|
||||
|
||||
|
||||
Synapse 1.38.0rc3 (2021-07-13)
|
||||
Synapse 1.37.0rc1 (2021-06-24)
|
||||
==============================
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Build the Debian packages in CI. ([\#10247](https://github.com/matrix-org/synapse/issues/10247), [\#10379](https://github.com/matrix-org/synapse/issues/10379))
|
||||
|
||||
|
||||
Synapse 1.38.0rc2 (2021-07-09)
|
||||
==============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where inbound federation in a room could be delayed due to not correctly dropping a lock. Introduced in v1.37.1. ([\#10336](https://github.com/matrix-org/synapse/issues/10336))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Update links to documentation in the sample config. Contributed by @dklimpel. ([\#10287](https://github.com/matrix-org/synapse/issues/10287))
|
||||
- Fix broken links in [INSTALL.md](INSTALL.md). Contributed by @dklimpel. ([\#10331](https://github.com/matrix-org/synapse/issues/10331))
|
||||
|
||||
|
||||
Synapse 1.38.0rc1 (2021-07-06)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Implement refresh tokens as specified by [MSC2918](https://github.com/matrix-org/matrix-doc/pull/2918). ([\#9450](https://github.com/matrix-org/synapse/issues/9450))
|
||||
- Add support for evicting cache entries based on last access time. ([\#10205](https://github.com/matrix-org/synapse/issues/10205))
|
||||
- Omit empty fields from the `/sync` response. Contributed by @deepbluev7. ([\#10214](https://github.com/matrix-org/synapse/issues/10214))
|
||||
- Improve validation on federation `send_{join,leave,knock}` endpoints. ([\#10225](https://github.com/matrix-org/synapse/issues/10225), [\#10243](https://github.com/matrix-org/synapse/issues/10243))
|
||||
- Add SSO `external_ids` to the Query User Account admin API. ([\#10261](https://github.com/matrix-org/synapse/issues/10261))
|
||||
- Mark events received over federation which fail a spam check as "soft-failed". ([\#10263](https://github.com/matrix-org/synapse/issues/10263))
|
||||
- Add metrics for new inbound federation staging area. ([\#10284](https://github.com/matrix-org/synapse/issues/10284))
|
||||
- Add script to print information about recently registered users. ([\#10290](https://github.com/matrix-org/synapse/issues/10290))
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix a long-standing bug which meant that invite rejections and knocks were not sent out over federation in a timely manner. ([\#10223](https://github.com/matrix-org/synapse/issues/10223))
|
||||
- Fix a bug introduced in v1.26.0 where only users who have set profile information could be deactivated with erasure enabled. ([\#10252](https://github.com/matrix-org/synapse/issues/10252))
|
||||
- Fix a long-standing bug where Synapse would return errors after 2<sup>31</sup> events were handled by the server. ([\#10264](https://github.com/matrix-org/synapse/issues/10264), [\#10267](https://github.com/matrix-org/synapse/issues/10267), [\#10282](https://github.com/matrix-org/synapse/issues/10282), [\#10286](https://github.com/matrix-org/synapse/issues/10286), [\#10291](https://github.com/matrix-org/synapse/issues/10291), [\#10314](https://github.com/matrix-org/synapse/issues/10314), [\#10326](https://github.com/matrix-org/synapse/issues/10326))
|
||||
- Fix the prometheus `synapse_federation_server_pdu_process_time` metric. Broke in v1.37.1. ([\#10279](https://github.com/matrix-org/synapse/issues/10279))
|
||||
- Ensure that inbound events from federation that were being processed when Synapse was restarted get promptly processed on start up. ([\#10303](https://github.com/matrix-org/synapse/issues/10303))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Move the upgrade notes to [docs/upgrade.md](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md) and convert them to markdown. ([\#10166](https://github.com/matrix-org/synapse/issues/10166))
|
||||
- Choose Welcome & Overview as the default page for synapse documentation website. ([\#10242](https://github.com/matrix-org/synapse/issues/10242))
|
||||
- Adjust the URL in the README.rst file to point to irc.libera.chat. ([\#10258](https://github.com/matrix-org/synapse/issues/10258))
|
||||
- Fix homeserver config option name in presence router documentation. ([\#10288](https://github.com/matrix-org/synapse/issues/10288))
|
||||
- Fix link pointing at the wrong section in the modules documentation page. ([\#10302](https://github.com/matrix-org/synapse/issues/10302))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Drop `Origin` and `Accept` from the value of the `Access-Control-Allow-Headers` response header. ([\#10114](https://github.com/matrix-org/synapse/issues/10114))
|
||||
- Add type hints to the federation servlets. ([\#10213](https://github.com/matrix-org/synapse/issues/10213))
|
||||
- Improve the reliability of auto-joining remote rooms. ([\#10237](https://github.com/matrix-org/synapse/issues/10237))
|
||||
- Update the release script to use the semver terminology and determine the release branch based on the next version. ([\#10239](https://github.com/matrix-org/synapse/issues/10239))
|
||||
- Fix type hints for computing auth events. ([\#10253](https://github.com/matrix-org/synapse/issues/10253))
|
||||
- Improve the performance of the spaces summary endpoint by only recursing into spaces (and not rooms in general). ([\#10256](https://github.com/matrix-org/synapse/issues/10256))
|
||||
- Move event authentication methods from `Auth` to `EventAuthHandler`. ([\#10268](https://github.com/matrix-org/synapse/issues/10268))
|
||||
- Re-enable a SyTest after it has been fixed. ([\#10292](https://github.com/matrix-org/synapse/issues/10292))
|
||||
|
||||
|
||||
Synapse 1.37.1 (2021-06-30)
|
||||
===========================
|
||||
|
||||
This release resolves issues (such as [#9490](https://github.com/matrix-org/synapse/issues/9490)) where one busy room could cause head-of-line blocking, starving Synapse from processing events in other rooms, and causing all federated traffic to fall behind. Synapse 1.37.1 processes inbound federation traffic asynchronously, ensuring that one busy room won't impact others. Please upgrade to Synapse 1.37.1 as soon as possible, in order to increase resilience to other traffic spikes.
|
||||
|
||||
No significant changes since v1.37.1rc1.
|
||||
|
||||
|
||||
Synapse 1.37.1rc1 (2021-06-29)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Handle inbound events from federation asynchronously. ([\#10269](https://github.com/matrix-org/synapse/issues/10269), [\#10272](https://github.com/matrix-org/synapse/issues/10272))
|
||||
|
||||
|
||||
Synapse 1.37.0 (2021-06-29)
|
||||
===========================
|
||||
|
||||
This release deprecates the current spam checker interface. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#deprecation-of-the-current-spam-checker-interface) for more information on how to update to the new generic module interface.
|
||||
|
||||
This release also removes support for fetching and renewing TLS certificates using the ACME v1 protocol, which has been fully decommissioned by Let's Encrypt on June 1st 2021. Admins previously using this feature should use a [reverse proxy](https://matrix-org.github.io/synapse/develop/reverse_proxy.html) to handle TLS termination, or use an external ACME client (such as [certbot](https://certbot.eff.org/)) to retrieve a certificate and key and provide them to Synapse using the `tls_certificate_path` and `tls_private_key_path` configuration settings.
|
||||
|
||||
Synapse 1.37.0rc1 (2021-06-24)
|
||||
==============================
|
||||
|
||||
Features
|
||||
--------
|
||||
@@ -1266,10 +1156,7 @@ Crucially, this means __we will not produce .deb packages for Debian 9 (Stretch)
|
||||
|
||||
The website https://endoflife.date/ has convenient summaries of the support schedules for projects like [Python](https://endoflife.date/python) and [PostgreSQL](https://endoflife.date/postgresql).
|
||||
|
||||
If you are unable to upgrade your environment to a supported version of Python or
|
||||
Postgres, we encourage you to consider using the
|
||||
[Synapse Docker images](https://matrix-org.github.io/synapse/latest/setup/installation.html#docker-images-and-ansible-playbooks)
|
||||
instead.
|
||||
If you are unable to upgrade your environment to a supported version of Python or Postgres, we encourage you to consider using the [Synapse Docker images](./INSTALL.md#docker-images-and-ansible-playbooks) instead.
|
||||
|
||||
### Transition Period
|
||||
|
||||
@@ -1412,11 +1299,11 @@ To upgrade Synapse along with the cryptography package:
|
||||
* Administrators using the [`matrix.org` Docker
|
||||
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
|
||||
packages from
|
||||
`matrix.org`](https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages)
|
||||
`matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
|
||||
should ensure that they have version 1.24.0 or 1.23.1 installed: these images include
|
||||
the updated packages.
|
||||
* Administrators who have [installed Synapse from
|
||||
source](https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source)
|
||||
source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
|
||||
should upgrade the cryptography package within their virtualenv by running:
|
||||
```sh
|
||||
<path_to_virtualenv>/bin/pip install 'cryptography>=3.3'
|
||||
@@ -1458,11 +1345,11 @@ To upgrade Synapse along with the cryptography package:
|
||||
* Administrators using the [`matrix.org` Docker
|
||||
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
|
||||
packages from
|
||||
`matrix.org`](https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages)
|
||||
`matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
|
||||
should ensure that they have version 1.24.0 or 1.23.1 installed: these images include
|
||||
the updated packages.
|
||||
* Administrators who have [installed Synapse from
|
||||
source](https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source)
|
||||
source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
|
||||
should upgrade the cryptography package within their virtualenv by running:
|
||||
```sh
|
||||
<path_to_virtualenv>/bin/pip install 'cryptography>=3.3'
|
||||
@@ -3041,11 +2928,11 @@ installation remains secure.
|
||||
* Administrators using the [`matrix.org` Docker
|
||||
image](https://hub.docker.com/r/matrixdotorg/synapse/) or the [Debian/Ubuntu
|
||||
packages from
|
||||
`matrix.org`](https://matrix-org.github.io/synapse/latest/setup/installation.html#matrixorg-packages)
|
||||
`matrix.org`](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#matrixorg-packages)
|
||||
should ensure that they have version 1.12.0 installed: these images include
|
||||
Twisted 20.3.0.
|
||||
* Administrators who have [installed Synapse from
|
||||
source](https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source)
|
||||
source](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#installing-from-source)
|
||||
should upgrade Twisted within their virtualenv by running:
|
||||
```sh
|
||||
<path_to_virtualenv>/bin/pip install 'Twisted>=20.3.0'
|
||||
|
||||
594
INSTALL.md
594
INSTALL.md
@@ -1,7 +1,593 @@
|
||||
# Installation Instructions
|
||||
|
||||
This document has moved to the
|
||||
[Synapse documentation website](https://matrix-org.github.io/synapse/latest/setup/installation.html).
|
||||
Please update your links.
|
||||
There are 3 steps to follow under **Installation Instructions**.
|
||||
|
||||
The markdown source is available in [docs/setup/installation.md](docs/setup/installation.md).
|
||||
- [Installation Instructions](#installation-instructions)
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-specific prerequisites](#platform-specific-prerequisites)
|
||||
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [CentOS/Fedora](#centosfedora)
|
||||
- [macOS](#macos)
|
||||
- [OpenSUSE](#opensuse)
|
||||
- [OpenBSD](#openbsd)
|
||||
- [Windows](#windows)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
|
||||
- [Debian/Ubuntu](#debianubuntu)
|
||||
- [Matrix.org packages](#matrixorg-packages)
|
||||
- [Downstream Debian packages](#downstream-debian-packages)
|
||||
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
|
||||
- [Fedora](#fedora)
|
||||
- [OpenSUSE](#opensuse-1)
|
||||
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
|
||||
- [ArchLinux](#archlinux-1)
|
||||
- [Void Linux](#void-linux)
|
||||
- [FreeBSD](#freebsd)
|
||||
- [OpenBSD](#openbsd-1)
|
||||
- [NixOS](#nixos)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [Using PostgreSQL](#using-postgresql)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
|
||||
## Choosing your server name
|
||||
|
||||
It is important to choose the name for your server before you install Synapse,
|
||||
because it cannot be changed later.
|
||||
|
||||
The server name determines the "domain" part of user-ids for users on your
|
||||
server: these will all be of the format `@user:my.domain.name`. It also
|
||||
determines how other matrix servers will reach yours for federation.
|
||||
|
||||
For a test configuration, set this to the hostname of your server. For a more
|
||||
production-ready setup, you will probably want to specify your domain
|
||||
(`example.com`) rather than a matrix-specific hostname here (in the same way
|
||||
that your email address is probably `user@example.com` rather than
|
||||
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||
[Setting up Federation](docs/federate.md).
|
||||
|
||||
## Installing Synapse
|
||||
|
||||
### Installing from source
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5.2 or later, up to Python 3.9.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
mkdir -p ~/synapse
|
||||
virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse
|
||||
```
|
||||
|
||||
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
||||
and install it, along with the python libraries it uses, into a virtual environment
|
||||
under `~/synapse/env`. Feel free to pick a different directory if you
|
||||
prefer.
|
||||
|
||||
This Synapse installation can then be later upgraded by using pip again with the
|
||||
update flag:
|
||||
|
||||
```sh
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse
|
||||
```
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before):
|
||||
|
||||
```sh
|
||||
cd ~/synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
```
|
||||
|
||||
... substituting an appropriate value for `--server-name`.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
identify itself to other homeserver, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. `~/synapse`), and:
|
||||
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
#### Platform-specific prerequisites
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions.
|
||||
|
||||
##### Debian/Ubuntu/Raspbian
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian:
|
||||
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
|
||||
Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS or Fedora Linux:
|
||||
|
||||
```sh
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
##### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
```sh
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
```
|
||||
|
||||
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||
via brew and inform `pip` about it so that `psycopg2` builds:
|
||||
|
||||
```sh
|
||||
brew install openssl@1.1
|
||||
export LDFLAGS="-L/usr/local/opt/openssl/lib"
|
||||
export CPPFLAGS="-I/usr/local/opt/openssl/include"
|
||||
```
|
||||
|
||||
##### OpenSUSE
|
||||
|
||||
Installing prerequisites on openSUSE:
|
||||
|
||||
```sh
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
|
||||
A port of Synapse is available under `net/synapse`. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||
and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
To be able to build Synapse's dependency on python the `WRKOBJDIR`
|
||||
(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
|
||||
mounted with `wxallowed` (cf. `mount(8)`).
|
||||
|
||||
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
|
||||
default OpenBSD installation is mounted with `wxallowed`):
|
||||
|
||||
```sh
|
||||
doas mkdir /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
|
||||
configured in `/etc/mk.conf`:
|
||||
|
||||
```sh
|
||||
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Setting the `WRKOBJDIR` for building python:
|
||||
|
||||
```sh
|
||||
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
|
||||
```
|
||||
|
||||
Building Synapse:
|
||||
|
||||
```sh
|
||||
cd /usr/ports/net/synapse
|
||||
make install
|
||||
```
|
||||
|
||||
##### Windows
|
||||
|
||||
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||
Debian, Fedora, or source installation methods. More information about WSL can
|
||||
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
|
||||
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
||||
for Windows Server.
|
||||
|
||||
### Prebuilt packages
|
||||
|
||||
As an alternative to installing from source, prebuilt packages are available
|
||||
for a number of platforms.
|
||||
|
||||
#### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further
|
||||
information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||
ma1sd, SSL support, etc.).
|
||||
For more details, see
|
||||
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
||||
|
||||
#### Debian/Ubuntu
|
||||
|
||||
##### Matrix.org packages
|
||||
|
||||
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
|
||||
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
|
||||
|
||||
```sh
|
||||
sudo apt install -y lsb-release wget apt-transport-https
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||
sudo apt update
|
||||
sudo apt install matrix-synapse-py3
|
||||
```
|
||||
|
||||
**Note**: if you followed a previous version of these instructions which
|
||||
recommended using `apt-key add` to add an old key from
|
||||
`https://matrix.org/packages/debian/`, you should note that this key has been
|
||||
revoked. You should remove the old key with `sudo apt-key remove
|
||||
C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
|
||||
update your configuration.
|
||||
|
||||
The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
##### Downstream Debian packages
|
||||
|
||||
We do not recommend using the packages from the default Debian `buster`
|
||||
repository at this time, as they are old and suffer from known security
|
||||
vulnerabilities. You can install the latest version of Synapse from
|
||||
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
||||
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
||||
for information on how to use backports.
|
||||
|
||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||
repositories and it should be possible to install it simply with:
|
||||
|
||||
```sh
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
##### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo zypper install matrix-synapse
|
||||
```
|
||||
|
||||
#### SUSE Linux Enterprise Server
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
```sh
|
||||
sudo pip install --upgrade pip
|
||||
```
|
||||
|
||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv):
|
||||
|
||||
```sh
|
||||
sudo pip uninstall py-bcrypt
|
||||
sudo pip install py-bcrypt
|
||||
```
|
||||
|
||||
#### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
```sh
|
||||
xbps-install -Su
|
||||
xbps-install -S synapse
|
||||
```
|
||||
|
||||
#### FreeBSD
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
|
||||
#### OpenBSD
|
||||
|
||||
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||
and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
Installing Synapse:
|
||||
|
||||
```sh
|
||||
doas pkg_add synapse
|
||||
```
|
||||
|
||||
#### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||
|
||||
## Setting up Synapse
|
||||
|
||||
Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
### Using PostgreSQL
|
||||
|
||||
By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
|
||||
performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
|
||||
instead. Advantages include:
|
||||
|
||||
- significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
- allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL in Synapse, please see
|
||||
[docs/postgres.md](docs/postgres.md)
|
||||
|
||||
SQLite is only acceptable for testing purposes. SQLite should not be used in
|
||||
a production server. Synapse will perform poorly when using
|
||||
SQLite, especially when participating in large rooms.
|
||||
|
||||
### TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||
but for any practical use, you will need Synapse's APIs to be served
|
||||
over HTTPS.
|
||||
|
||||
The recommended way to do so is to set up a reverse proxy on port
|
||||
`8448`. You can find documentation on doing so in
|
||||
[docs/reverse_proxy.md](docs/reverse_proxy.md).
|
||||
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
- First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```yaml
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
- You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself.
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md).
|
||||
|
||||
### Client Well-Known URI
|
||||
|
||||
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||
which support well-known lookup to automatically configure the homeserver and
|
||||
identity server URLs. This is useful so that users don't have to memorize or think
|
||||
about the actual homeserver URL you are using.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||
the following format.
|
||||
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It can optionally contain identity server information as well.
|
||||
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
},
|
||||
"m.identity_server": {
|
||||
"base_url": "https://<identity.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To work in browser based clients, the file must be served with the appropriate
|
||||
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
|
||||
view it.
|
||||
|
||||
In nginx this would be something like:
|
||||
|
||||
```nginx
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||
default_type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
|
||||
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
|
||||
correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||
`base_url` above.
|
||||
|
||||
```yaml
|
||||
public_baseurl: "https://<matrix.example.com>"
|
||||
```
|
||||
|
||||
### Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. This allows
|
||||
Synapse to send password reset emails, send verifications when an email address
|
||||
is added to a user's account, and send email notifications to users when they
|
||||
receive new messages.
|
||||
|
||||
To configure an SMTP server for Synapse, modify the configuration section
|
||||
headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
|
||||
and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||
`smtp_pass`, and `require_transport_security`.
|
||||
|
||||
If email is not configured, password reset, registration and notifications via
|
||||
email will be disabled.
|
||||
|
||||
### Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
on the search path):
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start # if not already running
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
```
|
||||
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
the running Synapse to create the new user. For example:
|
||||
```
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
Make admin [no]:
|
||||
Success!
|
||||
```
|
||||
|
||||
This process uses a setting `registration_shared_secret` in
|
||||
`homeserver.yaml`, which is shared between Synapse itself and the
|
||||
`register_new_matrix_user` script. It doesn't matter what it is (a random
|
||||
value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users, including admin accounts,
|
||||
on your server even if `enable_registration` is `false`.
|
||||
|
||||
### Setting up a TURN server
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See [docs/turn-howto.md](docs/turn-howto.md) for details.
|
||||
|
||||
### URL previews
|
||||
|
||||
Synapse includes support for previewing URLs, which is disabled by default. To
|
||||
turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||
and explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||
previewing in the `url_preview_ip_range_blacklist` configuration parameter.
|
||||
This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional `lxml` python dependency to be installed. This
|
||||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.:
|
||||
|
||||
```sh
|
||||
pip install twisted
|
||||
```
|
||||
|
||||
If you have any other problems, feel free to ask in
|
||||
[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
|
||||
|
||||
14
README.rst
14
README.rst
@@ -25,7 +25,7 @@ The overall architecture is::
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||
via IRC bridge at irc://irc.libera.chat/matrix.
|
||||
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
@@ -94,8 +94,7 @@ Synapse Installation
|
||||
|
||||
.. _federation:
|
||||
|
||||
* For details on how to install synapse, see
|
||||
`Installation Instructions <https://matrix-org.github.io/synapse/latest/setup/installation.html>`_.
|
||||
* For details on how to install synapse, see `<INSTALL.md>`_.
|
||||
* For specific details on how to configure Synapse for federation see `docs/federate.md <docs/federate.md>`_
|
||||
|
||||
|
||||
@@ -107,8 +106,7 @@ from a web client.
|
||||
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see
|
||||
`TLS certificates <https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates>`_.
|
||||
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||
|
||||
An easy way to get started is to login or register via Element at
|
||||
https://app.element.io/#/login or https://app.element.io/#/register respectively.
|
||||
@@ -267,7 +265,7 @@ Join our developer community on Matrix: `#synapse-dev:matrix.org <https://matrix
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source <https://matrix-org.github.io/synapse/latest/setup/installation.html#installing-from-source>`_.
|
||||
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
@@ -335,8 +333,8 @@ access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `SyTest installation
|
||||
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
|
||||
Platform dependencies
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Upgrading Synapse
|
||||
=================
|
||||
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/latest/upgrading>`_.
|
||||
This document has moved to the `Synapse documentation website <https://matrix-org.github.io/synapse/develop/upgrading>`_.
|
||||
Please update your links.
|
||||
|
||||
The markdown source is available in `docs/upgrade.md <docs/upgrade.md>`_.
|
||||
|
||||
12
book.toml
12
book.toml
@@ -34,14 +34,6 @@ additional-css = [
|
||||
"docs/website_files/table-of-contents.css",
|
||||
"docs/website_files/remove-nav-buttons.css",
|
||||
"docs/website_files/indent-section-headers.css",
|
||||
"docs/website_files/version-picker.css",
|
||||
]
|
||||
additional-js = [
|
||||
"docs/website_files/table-of-contents.js",
|
||||
"docs/website_files/version-picker.js",
|
||||
"docs/website_files/version.js",
|
||||
]
|
||||
theme = "docs/website_files/theme"
|
||||
|
||||
[preprocessor.schema_versions]
|
||||
command = "./scripts-dev/schema_versions.py"
|
||||
additional-js = ["docs/website_files/table-of-contents.js"]
|
||||
theme = "docs/website_files/theme"
|
||||
1
changelog.d/10114.misc
Normal file
1
changelog.d/10114.misc
Normal file
@@ -0,0 +1 @@
|
||||
Drop Origin and Accept from the value of the Access-Control-Allow-Headers response header.
|
||||
1
changelog.d/10166.doc
Normal file
1
changelog.d/10166.doc
Normal file
@@ -0,0 +1 @@
|
||||
Move the upgrade notes to [docs/upgrade.md](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md) and convert them to markdown.
|
||||
1
changelog.d/10214.feature
Normal file
1
changelog.d/10214.feature
Normal file
@@ -0,0 +1 @@
|
||||
Omit empty fields from the `/sync` response. Contributed by @deepbluev7.
|
||||
1
changelog.d/10223.bugfix
Normal file
1
changelog.d/10223.bugfix
Normal file
@@ -0,0 +1 @@
|
||||
Fix a long-standing bug which meant that invite rejections and knocks were not sent out over federation in a timely manner.
|
||||
1
changelog.d/10237.misc
Normal file
1
changelog.d/10237.misc
Normal file
@@ -0,0 +1 @@
|
||||
Improve the reliability of auto-joining remote rooms.
|
||||
1
changelog.d/10239.misc
Normal file
1
changelog.d/10239.misc
Normal file
@@ -0,0 +1 @@
|
||||
Update the release script to use the semver terminology and determine the release branch based on the next version.
|
||||
@@ -2,8 +2,7 @@
|
||||
This is a setup for managing synapse with a user contributed systemd unit
|
||||
file. It provides a `matrix-synapse` systemd unit file that should be tailored
|
||||
to accommodate your installation in accordance with the installation
|
||||
instructions provided in
|
||||
[installation instructions](https://matrix-org.github.io/synapse/latest/setup/installation.html).
|
||||
instructions provided in [installation instructions](../../INSTALL.md).
|
||||
|
||||
## Setup
|
||||
1. Under the service section, ensure the `User` variable matches which user
|
||||
|
||||
34
debian/changelog
vendored
34
debian/changelog
vendored
@@ -1,37 +1,3 @@
|
||||
matrix-synapse-py3 (1.38.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.38.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 22 Jul 2021 15:37:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.38.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.38.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jul 2021 13:20:56 +0100
|
||||
|
||||
matrix-synapse-py3 (1.38.0rc3) prerelease; urgency=medium
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Add synapse_review_recent_signups script
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.38.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jul 2021 11:53:56 +0100
|
||||
|
||||
matrix-synapse-py3 (1.37.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.37.1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 30 Jun 2021 12:24:06 +0100
|
||||
|
||||
matrix-synapse-py3 (1.37.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.37.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Jun 2021 10:15:25 +0100
|
||||
|
||||
matrix-synapse-py3 (1.36.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.36.0.
|
||||
|
||||
42
debian/hash_password.1
vendored
42
debian/hash_password.1
vendored
@@ -1,58 +1,90 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "HASH_PASSWORD" "1" "July 2021" "" ""
|
||||
.\" generated with Ronn/v0.7.3
|
||||
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||
.
|
||||
.TH "HASH_PASSWORD" "1" "February 2017" "" ""
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
|
||||
.
|
||||
.SH "SYNOPSIS"
|
||||
\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
|
||||
.
|
||||
.SH "DESCRIPTION"
|
||||
\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
|
||||
.
|
||||
.P
|
||||
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
||||
.
|
||||
.P
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
||||
.
|
||||
.P
|
||||
The hashed password is written on the \fBSTDOUT\fR\.
|
||||
.
|
||||
.SH "FILES"
|
||||
A sample YAML file accepted by \fBhash_password\fR is described below:
|
||||
.
|
||||
.P
|
||||
bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
|
||||
.
|
||||
.SH "OPTIONS"
|
||||
.
|
||||
.TP
|
||||
\fB\-p\fR, \fB\-\-password\fR
|
||||
Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
||||
.
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR
|
||||
Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
|
||||
.
|
||||
.SH "EXAMPLES"
|
||||
Hash from the command line:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ hash_password \-p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.P
|
||||
Hash from the STDIN:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.P
|
||||
Using a config file:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ hash_password \-c config\.yml
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
.
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1)
|
||||
|
||||
2
debian/hash_password.ronn
vendored
2
debian/hash_password.ronn
vendored
@@ -66,4 +66,4 @@ for Debian GNU/Linux distribution.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1)
|
||||
|
||||
1
debian/manpages
vendored
1
debian/manpages
vendored
@@ -1,5 +1,4 @@
|
||||
debian/hash_password.1
|
||||
debian/register_new_matrix_user.1
|
||||
debian/synapse_port_db.1
|
||||
debian/synapse_review_recent_signups.1
|
||||
debian/synctl.1
|
||||
|
||||
1
debian/matrix-synapse-py3.links
vendored
1
debian/matrix-synapse-py3.links
vendored
@@ -1,5 +1,4 @@
|
||||
opt/venvs/matrix-synapse/bin/hash_password usr/bin/hash_password
|
||||
opt/venvs/matrix-synapse/bin/register_new_matrix_user usr/bin/register_new_matrix_user
|
||||
opt/venvs/matrix-synapse/bin/synapse_port_db usr/bin/synapse_port_db
|
||||
opt/venvs/matrix-synapse/bin/synapse_review_recent_signups usr/bin/synapse_review_recent_signups
|
||||
opt/venvs/matrix-synapse/bin/synctl usr/bin/synctl
|
||||
|
||||
37
debian/register_new_matrix_user.1
vendored
37
debian/register_new_matrix_user.1
vendored
@@ -1,47 +1,72 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "REGISTER_NEW_MATRIX_USER" "1" "July 2021" "" ""
|
||||
.\" generated with Ronn/v0.7.3
|
||||
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||
.
|
||||
.TH "REGISTER_NEW_MATRIX_USER" "1" "February 2017" "" ""
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBregister_new_matrix_user\fR \- Used to register new users with a given home server when registration has been disabled
|
||||
.
|
||||
.SH "SYNOPSIS"
|
||||
\fBregister_new_matrix_user\fR options\|\.\|\.\|\.
|
||||
\fBregister_new_matrix_user\fR options\.\.\.
|
||||
.
|
||||
.SH "DESCRIPTION"
|
||||
\fBregister_new_matrix_user\fR registers new users with a given home server when registration has been disabled\. For this to work, the home server must be configured with the \'registration_shared_secret\' option set\.
|
||||
.
|
||||
.P
|
||||
This accepts the user credentials like the username, password, is user an admin or not and registers the user onto the homeserver database\. Also, a YAML file containing the shared secret can be provided\. If not, the shared secret can be provided via the command line\.
|
||||
.
|
||||
.P
|
||||
By default it assumes the home server URL to be \fBhttps://localhost:8448\fR\. This can be changed via the \fBserver_url\fR command line option\.
|
||||
.
|
||||
.SH "FILES"
|
||||
A sample YAML file accepted by \fBregister_new_matrix_user\fR is described below:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
registration_shared_secret: "s3cr3t"
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.SH "OPTIONS"
|
||||
.
|
||||
.TP
|
||||
\fB\-u\fR, \fB\-\-user\fR
|
||||
Local part of the new user\. Will prompt if omitted\.
|
||||
.
|
||||
.TP
|
||||
\fB\-p\fR, \fB\-\-password\fR
|
||||
New password for user\. Will prompt if omitted\. Supplying the password on the command line is not recommended\. Use the STDIN instead\.
|
||||
.
|
||||
.TP
|
||||
\fB\-a\fR, \fB\-\-admin\fR
|
||||
Register new user as an admin\. Will prompt if omitted\.
|
||||
.
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR
|
||||
Path to server config file containing the shared secret\.
|
||||
.
|
||||
.TP
|
||||
\fB\-k\fR, \fB\-\-shared\-secret\fR
|
||||
Shared secret as defined in server config file\. This is an optional parameter as it can be also supplied via the YAML file\.
|
||||
.
|
||||
.TP
|
||||
\fBserver_url\fR
|
||||
URL of the home server\. Defaults to \'https://localhost:8448\'\.
|
||||
.
|
||||
.SH "EXAMPLES"
|
||||
.
|
||||
.nf
|
||||
|
||||
$ register_new_matrix_user \-u user1 \-p p@ssword \-a \-c config\.yaml
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Rahul De <\fI\%mailto:rahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
.
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), synapse_port_db(1), hash_password(1), synapse_review_recent_signups(1)
|
||||
synctl(1), synapse_port_db(1), hash_password(1)
|
||||
|
||||
2
debian/register_new_matrix_user.ronn
vendored
2
debian/register_new_matrix_user.ronn
vendored
@@ -58,4 +58,4 @@ for Debian GNU/Linux distribution.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synctl(1), synapse_port_db(1), hash_password(1), synapse_review_recent_signups(1)
|
||||
synctl(1), synapse_port_db(1), hash_password(1)
|
||||
|
||||
59
debian/synapse_port_db.1
vendored
59
debian/synapse_port_db.1
vendored
@@ -1,56 +1,83 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "SYNAPSE_PORT_DB" "1" "July 2021" "" ""
|
||||
.\" generated with Ronn/v0.7.3
|
||||
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||
.
|
||||
.TH "SYNAPSE_PORT_DB" "1" "February 2017" "" ""
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBsynapse_port_db\fR \- A script to port an existing synapse SQLite database to a new PostgreSQL database\.
|
||||
.
|
||||
.SH "SYNOPSIS"
|
||||
\fBsynapse_port_db\fR [\-v] \-\-sqlite\-database=\fIdbfile\fR \-\-postgres\-config=\fIyamlconfig\fR [\-\-curses] [\-\-batch\-size=\fIbatch\-size\fR]
|
||||
.
|
||||
.SH "DESCRIPTION"
|
||||
\fBsynapse_port_db\fR ports an existing synapse SQLite database to a new PostgreSQL database\.
|
||||
.
|
||||
.P
|
||||
SQLite database is specified with \fB\-\-sqlite\-database\fR option and PostgreSQL configuration required to connect to PostgreSQL database is provided using \fB\-\-postgres\-config\fR configuration\. The configuration is specified in YAML format\.
|
||||
.
|
||||
.SH "OPTIONS"
|
||||
.
|
||||
.TP
|
||||
\fB\-v\fR
|
||||
Print log messages in \fBdebug\fR level instead of \fBinfo\fR level\.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-sqlite\-database\fR
|
||||
The snapshot of the SQLite database file\. This must not be currently used by a running synapse server\.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-postgres\-config\fR
|
||||
The database config file for the PostgreSQL database\.
|
||||
.
|
||||
.TP
|
||||
\fB\-\-curses\fR
|
||||
Display a curses based progress UI\.
|
||||
.
|
||||
.SH "CONFIG FILE"
|
||||
The postgres configuration file must be a valid YAML file with the following options\.
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBdatabase\fR: Database configuration section\. This section header can be ignored and the options below may be specified as top level keys\.
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBname\fR: Connector to use when connecting to the database\. This value must be \fBpsycopg2\fR\.
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBargs\fR: DB API 2\.0 compatible arguments to send to the \fBpsycopg2\fR module\.
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBdbname\fR \- the database name
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBuser\fR \- user name used to authenticate
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBpassword\fR \- password used to authenticate
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBhost\fR \- database host address (defaults to UNIX socket if not provided)
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBport\fR \- connection port number (defaults to 5432 if not provided)
|
||||
.
|
||||
.IP "" 0
|
||||
|
||||
.IP "\[ci]" 4
|
||||
.
|
||||
.IP "\(bu" 4
|
||||
\fBsynchronous_commit\fR: Optional\. Default is True\. If the value is \fBFalse\fR, enable asynchronous commit and don\'t wait for the server to call fsync before ending the transaction\. See: https://www\.postgresql\.org/docs/current/static/wal\-async\-commit\.html
|
||||
.
|
||||
.IP "" 0
|
||||
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.P
|
||||
Following example illustrates the configuration file format\.
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
args:
|
||||
@@ -59,9 +86,13 @@ database:
|
||||
password: ORohmi9Eet=ohphi
|
||||
host: localhost
|
||||
synchronous_commit: false
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Sunil Mohan Adapa <\fI\%mailto:sunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
||||
This man page was written by Sunil Mohan Adapa <\fIsunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
||||
.
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
synctl(1), hash_password(1), register_new_matrix_user(1)
|
||||
|
||||
8
debian/synapse_port_db.ronn
vendored
8
debian/synapse_port_db.ronn
vendored
@@ -47,7 +47,7 @@ following options.
|
||||
* `args`:
|
||||
DB API 2.0 compatible arguments to send to the `psycopg2` module.
|
||||
|
||||
* `dbname` - the database name
|
||||
* `dbname` - the database name
|
||||
|
||||
* `user` - user name used to authenticate
|
||||
|
||||
@@ -58,7 +58,7 @@ following options.
|
||||
|
||||
* `port` - connection port number (defaults to 5432 if not
|
||||
provided)
|
||||
|
||||
|
||||
|
||||
* `synchronous_commit`:
|
||||
Optional. Default is True. If the value is `False`, enable
|
||||
@@ -76,7 +76,7 @@ Following example illustrates the configuration file format.
|
||||
password: ORohmi9Eet=ohphi
|
||||
host: localhost
|
||||
synchronous_commit: false
|
||||
|
||||
|
||||
## COPYRIGHT
|
||||
|
||||
This man page was written by Sunil Mohan Adapa <<sunil@medhas.org>> for
|
||||
@@ -84,4 +84,4 @@ Debian GNU/Linux distribution.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synctl(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
synctl(1), hash_password(1), register_new_matrix_user(1)
|
||||
|
||||
26
debian/synapse_review_recent_signups.1
vendored
26
debian/synapse_review_recent_signups.1
vendored
@@ -1,26 +0,0 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "SYNAPSE_REVIEW_RECENT_SIGNUPS" "1" "July 2021" "" ""
|
||||
.SH "NAME"
|
||||
\fBsynapse_review_recent_signups\fR \- Print users that have recently registered on Synapse
|
||||
.SH "SYNOPSIS"
|
||||
\fBsynapse_review_recent_signups\fR \fB\-c\fR|\fB\-\-config\fR \fIfile\fR [\fB\-s\fR|\fB\-\-since\fR \fIperiod\fR] [\fB\-e\fR|\fB\-\-exclude\-emails\fR] [\fB\-u\fR|\fB\-\-only\-users\fR]
|
||||
.SH "DESCRIPTION"
|
||||
\fBsynapse_review_recent_signups\fR prints out recently registered users on a Synapse server, as well as some basic information about the user\.
|
||||
.P
|
||||
\fBsynapse_review_recent_signups\fR must be supplied with the config of the Synapse server, so that it can fetch the database config and connect to the database\.
|
||||
.SH "OPTIONS"
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR
|
||||
The config file(s) used by the Synapse server\.
|
||||
.TP
|
||||
\fB\-s\fR, \fB\-\-since\fR
|
||||
How far back to search for newly registered users\. Defaults to 7d, i\.e\. up to seven days in the past\. Valid units are \'s\', \'m\', \'h\', \'d\', \'w\', or \'y\'\.
|
||||
.TP
|
||||
\fB\-e\fR, \fB\-\-exclude\-emails\fR
|
||||
Do not print out users that have validated emails associated with their account\.
|
||||
.TP
|
||||
\fB\-u\fR, \fB\-\-only\-users\fR
|
||||
Only print out the user IDs of recently registered users, without any additional information
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), hash_password(1)
|
||||
37
debian/synapse_review_recent_signups.ronn
vendored
37
debian/synapse_review_recent_signups.ronn
vendored
@@ -1,37 +0,0 @@
|
||||
synapse_review_recent_signups(1) -- Print users that have recently registered on Synapse
|
||||
========================================================================================
|
||||
|
||||
## SYNOPSIS
|
||||
|
||||
`synapse_review_recent_signups` `-c`|`--config` <file> [`-s`|`--since` <period>] [`-e`|`--exclude-emails`] [`-u`|`--only-users`]
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
**synapse_review_recent_signups** prints out recently registered users on a
|
||||
Synapse server, as well as some basic information about the user.
|
||||
|
||||
`synapse_review_recent_signups` must be supplied with the config of the Synapse
|
||||
server, so that it can fetch the database config and connect to the database.
|
||||
|
||||
|
||||
## OPTIONS
|
||||
|
||||
* `-c`, `--config`:
|
||||
The config file(s) used by the Synapse server.
|
||||
|
||||
* `-s`, `--since`:
|
||||
How far back to search for newly registered users. Defaults to 7d, i.e. up
|
||||
to seven days in the past. Valid units are 's', 'm', 'h', 'd', 'w', or 'y'.
|
||||
|
||||
* `-e`, `--exclude-emails`:
|
||||
Do not print out users that have validated emails associated with their
|
||||
account.
|
||||
|
||||
* `-u`, `--only-users`:
|
||||
Only print out the user IDs of recently registered users, without any
|
||||
additional information
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1), hash_password(1)
|
||||
42
debian/synctl.1
vendored
42
debian/synctl.1
vendored
@@ -1,41 +1,63 @@
|
||||
.\" generated with Ronn-NG/v0.8.0
|
||||
.\" http://github.com/apjanke/ronn-ng/tree/0.8.0
|
||||
.TH "SYNCTL" "1" "July 2021" "" ""
|
||||
.\" generated with Ronn/v0.7.3
|
||||
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||
.
|
||||
.TH "SYNCTL" "1" "February 2017" "" ""
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBsynctl\fR \- Synapse server control interface
|
||||
.
|
||||
.SH "SYNOPSIS"
|
||||
Start, stop or restart synapse server\.
|
||||
.
|
||||
.P
|
||||
\fBsynctl\fR {start|stop|restart} [configfile] [\-w|\-\-worker=\fIWORKERCONFIG\fR] [\-a|\-\-all\-processes=\fIWORKERCONFIGDIR\fR]
|
||||
.
|
||||
.SH "DESCRIPTION"
|
||||
\fBsynctl\fR can be used to start, stop or restart Synapse server\. The control operation can be done on all processes or a single worker process\.
|
||||
.
|
||||
.SH "OPTIONS"
|
||||
.
|
||||
.TP
|
||||
\fBaction\fR
|
||||
The value of action should be one of \fBstart\fR, \fBstop\fR or \fBrestart\fR\.
|
||||
.
|
||||
.TP
|
||||
\fBconfigfile\fR
|
||||
Optional path of the configuration file to use\. Default value is \fBhomeserver\.yaml\fR\. The configuration file must exist for the operation to succeed\.
|
||||
.
|
||||
.TP
|
||||
\fB\-w\fR, \fB\-\-worker\fR:
|
||||
|
||||
.
|
||||
.IP
|
||||
Perform start, stop or restart operations on a single worker\. Incompatible with \fB\-a\fR|\fB\-\-all\-processes\fR\. Value passed must be a valid worker\'s configuration file\.
|
||||
.
|
||||
.TP
|
||||
\fB\-a\fR, \fB\-\-all\-processes\fR:
|
||||
|
||||
.
|
||||
.IP
|
||||
Perform start, stop or restart operations on all the workers in the given directory and the main synapse process\. Incompatible with \fB\-w\fR|\fB\-\-worker\fR\. Value passed must be a directory containing valid work configuration files\. All files ending with \fB\.yaml\fR extension shall be considered as configuration files and all other files in the directory are ignored\.
|
||||
.
|
||||
.SH "CONFIGURATION FILE"
|
||||
Configuration file may be generated as follows:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ python \-m synapse\.app\.homeserver \-c config\.yaml \-\-generate\-config \-\-server\-name=<server name>
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.SH "ENVIRONMENT"
|
||||
.
|
||||
.TP
|
||||
\fBSYNAPSE_CACHE_FACTOR\fR
|
||||
Synapse\'s architecture is quite RAM hungry currently \- we deliberately cache a lot of recent room data and metadata in RAM in order to speed up common requests\. We\'ll improve this in the future, but for now the easiest way to either reduce the RAM usage (at the risk of slowing things down) is to set the almost\-undocumented \fBSYNAPSE_CACHE_FACTOR\fR environment variable\. The default is 0\.5, which can be decreased to reduce RAM usage in memory constrained enviroments, or increased if performance starts to degrade\.
|
||||
.IP
|
||||
However, degraded performance due to a low cache factor, common on machines with slow disks, often leads to explosions in memory use due backlogged requests\. In this case, reducing the cache factor will make things worse\. Instead, try increasing it drastically\. 2\.0 is a good starting value\.
|
||||
Synapse\'s architecture is quite RAM hungry currently \- a lot of recent room data and metadata is deliberately cached in RAM in order to speed up common requests\. This will be improved in future, but for now the easiest way to either reduce the RAM usage (at the risk of slowing things down) is to set the SYNAPSE_CACHE_FACTOR environment variable\. Roughly speaking, a SYNAPSE_CACHE_FACTOR of 1\.0 will max out at around 3\-4GB of resident memory \- this is what we currently run the matrix\.org on\. The default setting is currently 0\.1, which is probably around a ~700MB footprint\. You can dial it down further to 0\.02 if desired, which targets roughly ~512MB\. Conversely you can dial it up if you need performance for lots of users and have a box with a lot of RAM\.
|
||||
.
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Sunil Mohan Adapa <\fI\%mailto:sunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
||||
This man page was written by Sunil Mohan Adapa <\fIsunil@medhas\.org\fR> for Debian GNU/Linux distribution\.
|
||||
.
|
||||
.SH "SEE ALSO"
|
||||
synapse_port_db(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
synapse_port_db(1), hash_password(1), register_new_matrix_user(1)
|
||||
|
||||
2
debian/synctl.ronn
vendored
2
debian/synctl.ronn
vendored
@@ -68,4 +68,4 @@ Debian GNU/Linux distribution.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synapse_port_db(1), hash_password(1), register_new_matrix_user(1), synapse_review_recent_signups(1)
|
||||
synapse_port_db(1), hash_password(1), register_new_matrix_user(1)
|
||||
|
||||
@@ -45,7 +45,7 @@ docker run -it --rm \
|
||||
```
|
||||
|
||||
For information on picking a suitable server name, see
|
||||
https://matrix-org.github.io/synapse/latest/setup/installation.html.
|
||||
https://github.com/matrix-org/synapse/blob/master/INSTALL.md.
|
||||
|
||||
The above command will generate a `homeserver.yaml` in (typically)
|
||||
`/var/lib/docker/volumes/synapse-data/_data`. You should check this file, and
|
||||
@@ -139,7 +139,7 @@ For documentation on using a reverse proxy, see
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
||||
|
||||
For more information on enabling TLS support in synapse itself, see
|
||||
https://matrix-org.github.io/synapse/latest/setup/installation.html#tls-certificates. Of
|
||||
https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
|
||||
course, you will need to expose the TLS port from the container with a `-p`
|
||||
argument to `docker run`.
|
||||
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
#
|
||||
# It is *not* intended to be copied and used as the basis for a real
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in
|
||||
# https://matrix-org.github.io/synapse/latest/setup/installation.html.
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
# Configuration options that take a time period can be set using a number
|
||||
# followed by a letter. Letters have the following meanings:
|
||||
|
||||
@@ -14,7 +14,7 @@ upgraded, however it may be of use to those with old installs returning to the
|
||||
project.
|
||||
|
||||
If you are setting up a server from scratch you almost certainly should look at
|
||||
the [installation guide](setup/installation.md) instead.
|
||||
the [installation guide](../INSTALL.md) instead.
|
||||
|
||||
## Introduction
|
||||
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
||||
|
||||
@@ -36,17 +36,7 @@ It returns a JSON body like the following:
|
||||
"creation_ts": 1560432506,
|
||||
"appservice_id": null,
|
||||
"consent_server_notice_sent": null,
|
||||
"consent_version": null,
|
||||
"external_ids": [
|
||||
{
|
||||
"auth_provider": "<provider1>",
|
||||
"external_id": "<user_id_provider_1>"
|
||||
},
|
||||
{
|
||||
"auth_provider": "<provider2>",
|
||||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
]
|
||||
"consent_version": null
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -194,7 +194,7 @@ In order to port a module that uses Synapse's old module interface, its author n
|
||||
|
||||
* ensure the module's callbacks are all asynchronous.
|
||||
* register their callbacks using one or more of the `register_[...]_callbacks` methods
|
||||
from the `ModuleApi` class in the module's `__init__` method (see [this section](#registering-a-callback)
|
||||
from the `ModuleApi` class in the module's `__init__` method (see [this section](#registering-a-web-resource)
|
||||
for more info).
|
||||
|
||||
Additionally, if the module is packaged with an additional web resource, the module
|
||||
|
||||
@@ -8,14 +8,14 @@ Synapse will require the python postgres client library in order to
|
||||
connect to a postgres database.
|
||||
|
||||
- If you are using the [matrix.org debian/ubuntu
|
||||
packages](setup/installation.md#matrixorg-packages), the necessary python
|
||||
packages](../INSTALL.md#matrixorg-packages), the necessary python
|
||||
library will already be installed, but you will need to ensure the
|
||||
low-level postgres library is installed, which you can do with
|
||||
`apt install libpq5`.
|
||||
- For other pre-built packages, please consult the documentation from
|
||||
the relevant package.
|
||||
- If you installed synapse [in a
|
||||
virtualenv](setup/installation.md#installing-from-source), you can install
|
||||
virtualenv](../INSTALL.md#installing-from-source), you can install
|
||||
the library with:
|
||||
|
||||
~/synapse/env/bin/pip install "matrix-synapse[postgres]"
|
||||
|
||||
@@ -222,9 +222,7 @@ Synapse, amend your homeserver config file with the following.
|
||||
|
||||
```yaml
|
||||
presence:
|
||||
enabled: true
|
||||
|
||||
presence_router:
|
||||
routing_module:
|
||||
module: my_module.ExamplePresenceRouter
|
||||
config:
|
||||
# Any configuration options for your module. The below is an example.
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
#
|
||||
# It is *not* intended to be copied and used as the basis for a real
|
||||
# homeserver.yaml. Instead, if you are starting from scratch, please generate
|
||||
# a fresh config using Synapse by following the instructions in
|
||||
# https://matrix-org.github.io/synapse/latest/setup/installation.html.
|
||||
# a fresh config using Synapse by following the instructions in INSTALL.md.
|
||||
|
||||
# Configuration options that take a time period can be set using a number
|
||||
# followed by a letter. Letters have the following meanings:
|
||||
@@ -37,7 +36,7 @@
|
||||
|
||||
# Server admins can expand Synapse's functionality with external modules.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/modules.html for more
|
||||
# See https://matrix-org.github.io/synapse/develop/modules.html for more
|
||||
# documentation on how to configure or create custom modules for Synapse.
|
||||
#
|
||||
modules:
|
||||
@@ -59,7 +58,7 @@ modules:
|
||||
# In most cases you should avoid using a matrix specific subdomain such as
|
||||
# matrix.example.com or synapse.example.com as the server_name for the same
|
||||
# reasons you wouldn't use user@email.example.com as your email address.
|
||||
# See https://matrix-org.github.io/synapse/latest/delegate.html
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md
|
||||
# for information on how to host Synapse on a subdomain while preserving
|
||||
# a clean server_name.
|
||||
#
|
||||
@@ -254,9 +253,9 @@ presence:
|
||||
# 'all local interfaces'.
|
||||
#
|
||||
# type: the type of listener. Normally 'http', but other valid options are:
|
||||
# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html),
|
||||
# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html),
|
||||
# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html).
|
||||
# 'manhole' (see docs/manhole.md),
|
||||
# 'metrics' (see docs/metrics-howto.md),
|
||||
# 'replication' (see docs/workers.md).
|
||||
#
|
||||
# tls: set to true to enable TLS for this listener. Will use the TLS
|
||||
# key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||
@@ -281,8 +280,8 @@ presence:
|
||||
# client: the client-server API (/_matrix/client), and the synapse admin
|
||||
# API (/_synapse/admin). Also implies 'media' and 'static'.
|
||||
#
|
||||
# consent: user consent forms (/_matrix/consent).
|
||||
# See https://matrix-org.github.io/synapse/latest/consent_tracking.html.
|
||||
# consent: user consent forms (/_matrix/consent). See
|
||||
# docs/consent_tracking.md.
|
||||
#
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
@@ -291,13 +290,12 @@ presence:
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
# metrics: the metrics interface.
|
||||
# See https://matrix-org.github.io/synapse/latest/metrics-howto.html.
|
||||
# metrics: the metrics interface. See docs/metrics-howto.md.
|
||||
#
|
||||
# openid: OpenID authentication.
|
||||
#
|
||||
# replication: the HTTP replication API (/_synapse/replication).
|
||||
# See https://matrix-org.github.io/synapse/latest/workers.html.
|
||||
# replication: the HTTP replication API (/_synapse/replication). See
|
||||
# docs/workers.md.
|
||||
#
|
||||
# static: static resources under synapse/static (/_matrix/static). (Mostly
|
||||
# useful for 'fallback authentication'.)
|
||||
@@ -321,7 +319,7 @@ listeners:
|
||||
# that unwraps TLS.
|
||||
#
|
||||
# If you plan to use a reverse proxy, please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
||||
#
|
||||
- port: 8008
|
||||
tls: false
|
||||
@@ -675,41 +673,35 @@ retention:
|
||||
#event_cache_size: 10K
|
||||
|
||||
caches:
|
||||
# Controls the global cache factor, which is the default cache factor
|
||||
# for all caches if a specific factor for that cache is not otherwise
|
||||
# set.
|
||||
#
|
||||
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
|
||||
# variable. Setting by environment variable takes priority over
|
||||
# setting through the config file.
|
||||
#
|
||||
# Defaults to 0.5, which will half the size of all caches.
|
||||
#
|
||||
#global_factor: 1.0
|
||||
# Controls the global cache factor, which is the default cache factor
|
||||
# for all caches if a specific factor for that cache is not otherwise
|
||||
# set.
|
||||
#
|
||||
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
|
||||
# variable. Setting by environment variable takes priority over
|
||||
# setting through the config file.
|
||||
#
|
||||
# Defaults to 0.5, which will half the size of all caches.
|
||||
#
|
||||
#global_factor: 1.0
|
||||
|
||||
# A dictionary of cache name to cache factor for that individual
|
||||
# cache. Overrides the global cache factor for a given cache.
|
||||
#
|
||||
# These can also be set through environment variables comprised
|
||||
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
|
||||
# letters and underscores. Setting by environment variable
|
||||
# takes priority over setting through the config file.
|
||||
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
|
||||
#
|
||||
# Some caches have '*' and other characters that are not
|
||||
# alphanumeric or underscores. These caches can be named with or
|
||||
# without the special characters stripped. For example, to specify
|
||||
# the cache factor for `*stateGroupCache*` via an environment
|
||||
# variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
|
||||
#
|
||||
per_cache_factors:
|
||||
#get_users_who_share_room_with_user: 2.0
|
||||
|
||||
# Controls how long an entry can be in a cache without having been
|
||||
# accessed before being evicted. Defaults to None, which means
|
||||
# entries are never evicted based on time.
|
||||
#
|
||||
#expiry_time: 30m
|
||||
# A dictionary of cache name to cache factor for that individual
|
||||
# cache. Overrides the global cache factor for a given cache.
|
||||
#
|
||||
# These can also be set through environment variables comprised
|
||||
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
|
||||
# letters and underscores. Setting by environment variable
|
||||
# takes priority over setting through the config file.
|
||||
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
|
||||
#
|
||||
# Some caches have '*' and other characters that are not
|
||||
# alphanumeric or underscores. These caches can be named with or
|
||||
# without the special characters stripped. For example, to specify
|
||||
# the cache factor for `*stateGroupCache*` via an environment
|
||||
# variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
|
||||
#
|
||||
per_cache_factors:
|
||||
#get_users_who_share_room_with_user: 2.0
|
||||
|
||||
|
||||
## Database ##
|
||||
@@ -749,8 +741,7 @@ caches:
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
# For more information on using Synapse with Postgres,
|
||||
# see https://matrix-org.github.io/synapse/latest/postgres.html.
|
||||
# For more information on using Synapse with Postgres, see `docs/postgres.md`.
|
||||
#
|
||||
database:
|
||||
name: sqlite3
|
||||
@@ -903,7 +894,7 @@ media_store_path: "DATADIR/media_store"
|
||||
#
|
||||
# If you are using a reverse proxy you may also need to set this value in
|
||||
# your reverse proxy's config. Notably Nginx has a small max body size by default.
|
||||
# See https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
# See https://matrix-org.github.io/synapse/develop/reverse_proxy.html.
|
||||
#
|
||||
#max_upload_size: 50M
|
||||
|
||||
@@ -1843,7 +1834,7 @@ saml2_config:
|
||||
#
|
||||
# module: The class name of a custom mapping module. Default is
|
||||
# 'synapse.handlers.oidc.JinjaOidcMappingProvider'.
|
||||
# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
# config: Configuration for the mapping provider module. This section will
|
||||
@@ -1894,7 +1885,7 @@ saml2_config:
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/openid.html
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
#
|
||||
# For backwards compatibility, it is also possible to configure a single OIDC
|
||||
@@ -2172,7 +2163,7 @@ sso:
|
||||
# Note that this is a non-standard login type and client support is
|
||||
# expected to be non-existent.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/jwt.html.
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
|
||||
#
|
||||
#jwt_config:
|
||||
# Uncomment the following to enable authorization using JSON web
|
||||
@@ -2472,7 +2463,7 @@ email:
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
@@ -2574,7 +2565,7 @@ user_directory:
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
@@ -2594,7 +2585,7 @@ user_directory:
|
||||
# User Consent configuration
|
||||
#
|
||||
# for detailed instructions, see
|
||||
# https://matrix-org.github.io/synapse/latest/consent_tracking.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
|
||||
#
|
||||
# Parts of this section are required if enabling the 'consent' resource under
|
||||
# 'listeners', in particular 'template_dir' and 'version'.
|
||||
@@ -2644,7 +2635,7 @@ user_directory:
|
||||
|
||||
|
||||
# Settings for local room and user statistics collection. See
|
||||
# https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html.
|
||||
# docs/room_and_user_statistics.md.
|
||||
#
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
@@ -2771,7 +2762,7 @@ opentracing:
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See https://matrix-org.github.io/synapse/latest/opentracing.html.
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md
|
||||
|
||||
version: 1
|
||||
|
||||
|
||||
@@ -1,596 +1,7 @@
|
||||
# Installation Instructions
|
||||
|
||||
There are 3 steps to follow under **Installation Instructions**.
|
||||
|
||||
- [Installation Instructions](#installation-instructions)
|
||||
- [Choosing your server name](#choosing-your-server-name)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-specific prerequisites](#platform-specific-prerequisites)
|
||||
- [Debian/Ubuntu/Raspbian](#debianubunturaspbian)
|
||||
- [ArchLinux](#archlinux)
|
||||
- [CentOS/Fedora](#centosfedora)
|
||||
- [macOS](#macos)
|
||||
- [OpenSUSE](#opensuse)
|
||||
- [OpenBSD](#openbsd)
|
||||
- [Windows](#windows)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Docker images and Ansible playbooks](#docker-images-and-ansible-playbooks)
|
||||
- [Debian/Ubuntu](#debianubuntu)
|
||||
- [Matrix.org packages](#matrixorg-packages)
|
||||
- [Downstream Debian packages](#downstream-debian-packages)
|
||||
- [Downstream Ubuntu packages](#downstream-ubuntu-packages)
|
||||
- [Fedora](#fedora)
|
||||
- [OpenSUSE](#opensuse-1)
|
||||
- [SUSE Linux Enterprise Server](#suse-linux-enterprise-server)
|
||||
- [ArchLinux](#archlinux-1)
|
||||
- [Void Linux](#void-linux)
|
||||
- [FreeBSD](#freebsd)
|
||||
- [OpenBSD](#openbsd-1)
|
||||
- [NixOS](#nixos)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [Using PostgreSQL](#using-postgresql)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Client Well-Known URI](#client-well-known-uri)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
|
||||
|
||||
## Choosing your server name
|
||||
|
||||
It is important to choose the name for your server before you install Synapse,
|
||||
because it cannot be changed later.
|
||||
|
||||
The server name determines the "domain" part of user-ids for users on your
|
||||
server: these will all be of the format `@user:my.domain.name`. It also
|
||||
determines how other matrix servers will reach yours for federation.
|
||||
|
||||
For a test configuration, set this to the hostname of your server. For a more
|
||||
production-ready setup, you will probably want to specify your domain
|
||||
(`example.com`) rather than a matrix-specific hostname here (in the same way
|
||||
that your email address is probably `user@example.com` rather than
|
||||
`user@email.example.com`) - but doing so may require more advanced setup: see
|
||||
[Setting up Federation](../federate.md).
|
||||
|
||||
## Installing Synapse
|
||||
|
||||
### Installing from source
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
When installing from source please make sure that the [Platform-specific prerequisites](#platform-specific-prerequisites) are already installed.
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5.2 or later, up to Python 3.9.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```sh
|
||||
mkdir -p ~/synapse
|
||||
virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse
|
||||
```
|
||||
|
||||
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
||||
and install it, along with the python libraries it uses, into a virtual environment
|
||||
under `~/synapse/env`. Feel free to pick a different directory if you
|
||||
prefer.
|
||||
|
||||
This Synapse installation can then be later upgraded by using pip again with the
|
||||
update flag:
|
||||
|
||||
```sh
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse
|
||||
```
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before):
|
||||
|
||||
```sh
|
||||
cd ~/synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
```
|
||||
|
||||
... substituting an appropriate value for `--server-name`.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your homeserver to
|
||||
identify itself to other homeserver, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your homeserver's keys, you may find that other homeserver have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the [spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys) for more information on key management).
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. `~/synapse`), and:
|
||||
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
```
|
||||
|
||||
#### Platform-specific prerequisites
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions.
|
||||
|
||||
##### Debian/Ubuntu/Raspbian
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian:
|
||||
|
||||
```sh
|
||||
sudo apt install build-essential python3-dev libffi-dev \
|
||||
python3-pip python3-setuptools sqlite3 \
|
||||
libssl-dev virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
##### ArchLinux
|
||||
|
||||
Installing prerequisites on ArchLinux:
|
||||
|
||||
```sh
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
##### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS or Fedora Linux:
|
||||
|
||||
```sh
|
||||
sudo dnf install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
libwebp-devel libxml2-devel libxslt-devel libpq-devel \
|
||||
python3-virtualenv libffi-devel openssl-devel python3-devel
|
||||
sudo dnf groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
##### macOS
|
||||
|
||||
Installing prerequisites on macOS:
|
||||
|
||||
```sh
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
```
|
||||
|
||||
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
|
||||
via brew and inform `pip` about it so that `psycopg2` builds:
|
||||
|
||||
```sh
|
||||
brew install openssl@1.1
|
||||
export LDFLAGS="-L/usr/local/opt/openssl/lib"
|
||||
export CPPFLAGS="-I/usr/local/opt/openssl/include"
|
||||
```
|
||||
|
||||
##### OpenSUSE
|
||||
|
||||
Installing prerequisites on openSUSE:
|
||||
|
||||
```sh
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
##### OpenBSD
|
||||
|
||||
A port of Synapse is available under `net/synapse`. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||
and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
To be able to build Synapse's dependency on python the `WRKOBJDIR`
|
||||
(cf. `bsd.port.mk(5)`) for building python, too, needs to be on a filesystem
|
||||
mounted with `wxallowed` (cf. `mount(8)`).
|
||||
|
||||
Creating a `WRKOBJDIR` for building python under `/usr/local` (which on a
|
||||
default OpenBSD installation is mounted with `wxallowed`):
|
||||
|
||||
```sh
|
||||
doas mkdir /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Assuming `PORTS_PRIVSEP=Yes` (cf. `bsd.port.mk(5)`) and `SUDO=doas` are
|
||||
configured in `/etc/mk.conf`:
|
||||
|
||||
```sh
|
||||
doas chown _pbuild:_pbuild /usr/local/pobj_wxallowed
|
||||
```
|
||||
|
||||
Setting the `WRKOBJDIR` for building python:
|
||||
|
||||
```sh
|
||||
echo WRKOBJDIR_lang/python/3.7=/usr/local/pobj_wxallowed \\nWRKOBJDIR_lang/python/2.7=/usr/local/pobj_wxallowed >> /etc/mk.conf
|
||||
```
|
||||
|
||||
Building Synapse:
|
||||
|
||||
```sh
|
||||
cd /usr/ports/net/synapse
|
||||
make install
|
||||
```
|
||||
|
||||
##### Windows
|
||||
|
||||
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||
Debian, Fedora, or source installation methods. More information about WSL can
|
||||
be found at <https://docs.microsoft.com/en-us/windows/wsl/install-win10> for
|
||||
Windows 10 and <https://docs.microsoft.com/en-us/windows/wsl/install-on-server>
|
||||
for Windows Server.
|
||||
|
||||
### Prebuilt packages
|
||||
|
||||
As an alternative to installing from source, prebuilt packages are available
|
||||
for a number of platforms.
|
||||
|
||||
#### Docker images and Ansible playbooks
|
||||
|
||||
There is an official synapse image available at
|
||||
<https://hub.docker.com/r/matrixdotorg/synapse> which can be used with
|
||||
the docker-compose file available at
|
||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker).
|
||||
Further information on this including configuration options is available in the README
|
||||
on hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
<https://hub.docker.com/r/avhost/docker-matrix/tags/>
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, Element, coturn,
|
||||
ma1sd, SSL support, etc.).
|
||||
For more details, see
|
||||
<https://github.com/spantaleev/matrix-docker-ansible-deploy>
|
||||
|
||||
#### Debian/Ubuntu
|
||||
|
||||
##### Matrix.org packages
|
||||
|
||||
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
|
||||
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
|
||||
|
||||
```sh
|
||||
sudo apt install -y lsb-release wget apt-transport-https
|
||||
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" |
|
||||
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||
sudo apt update
|
||||
sudo apt install matrix-synapse-py3
|
||||
```
|
||||
|
||||
**Note**: if you followed a previous version of these instructions which
|
||||
recommended using `apt-key add` to add an old key from
|
||||
`https://matrix.org/packages/debian/`, you should note that this key has been
|
||||
revoked. You should remove the old key with `sudo apt-key remove
|
||||
C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
|
||||
update your configuration.
|
||||
|
||||
The fingerprint of the repository signing key (as shown by `gpg
|
||||
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
|
||||
`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`.
|
||||
|
||||
##### Downstream Debian packages
|
||||
|
||||
We do not recommend using the packages from the default Debian `buster`
|
||||
repository at this time, as they are old and suffer from known security
|
||||
vulnerabilities. You can install the latest version of Synapse from
|
||||
[our repository](#matrixorg-packages) or from `buster-backports`. Please
|
||||
see the [Debian documentation](https://backports.debian.org/Instructions/)
|
||||
for information on how to use backports.
|
||||
|
||||
If you are using Debian `sid` or testing, Synapse is available in the default
|
||||
repositories and it should be possible to install it simply with:
|
||||
|
||||
```sh
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
##### Downstream Ubuntu packages
|
||||
|
||||
We do not recommend using the packages in the default Ubuntu repository
|
||||
at this time, as they are old and suffer from known security vulnerabilities.
|
||||
The latest version of Synapse can be installed from [our repository](#matrixorg-packages).
|
||||
|
||||
#### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
<https://obs.infoserver.lv/project/monitor/matrix-synapse>
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```sh
|
||||
sudo zypper install matrix-synapse
|
||||
```
|
||||
|
||||
#### SUSE Linux Enterprise Server
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
<https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/>
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
<https://www.archlinux.org/packages/community/any/matrix-synapse/>, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
```sh
|
||||
sudo pip install --upgrade pip
|
||||
```
|
||||
|
||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv):
|
||||
|
||||
```sh
|
||||
sudo pip uninstall py-bcrypt
|
||||
sudo pip install py-bcrypt
|
||||
```
|
||||
|
||||
#### Void Linux
|
||||
|
||||
Synapse can be found in the void repositories as 'synapse':
|
||||
|
||||
```sh
|
||||
xbps-install -Su
|
||||
xbps-install -S synapse
|
||||
```
|
||||
|
||||
#### FreeBSD
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py37-matrix-synapse`
|
||||
|
||||
#### OpenBSD
|
||||
|
||||
As of OpenBSD 6.7 Synapse is available as a pre-compiled binary. The filesystem
|
||||
underlying the homeserver directory (defaults to `/var/synapse`) has to be
|
||||
mounted with `wxallowed` (cf. `mount(8)`), so creating a separate filesystem
|
||||
and mounting it to `/var/synapse` should be taken into consideration.
|
||||
|
||||
Installing Synapse:
|
||||
|
||||
```sh
|
||||
doas pkg_add synapse
|
||||
```
|
||||
|
||||
#### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
|
||||
|
||||
## Setting up Synapse
|
||||
|
||||
Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
### Using PostgreSQL
|
||||
|
||||
By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades
|
||||
performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org)
|
||||
instead. Advantages include:
|
||||
|
||||
- significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
- allowing the DB to be run on separate hardware
|
||||
|
||||
For information on how to install and use PostgreSQL in Synapse, please see
|
||||
[docs/postgres.md](../postgres.md)
|
||||
|
||||
SQLite is only acceptable for testing purposes. SQLite should not be used in
|
||||
a production server. Synapse will perform poorly when using
|
||||
SQLite, especially when participating in large rooms.
|
||||
|
||||
### TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port on the local
|
||||
interface: `http://localhost:8008`. It is suitable for local testing,
|
||||
but for any practical use, you will need Synapse's APIs to be served
|
||||
over HTTPS.
|
||||
|
||||
The recommended way to do so is to set up a reverse proxy on port
|
||||
`8448`. You can find documentation on doing so in
|
||||
[docs/reverse_proxy.md](../reverse_proxy.md).
|
||||
|
||||
Alternatively, you can configure Synapse to expose an HTTPS port. To do
|
||||
so, you will need to edit `homeserver.yaml`, as follows:
|
||||
|
||||
- First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```yaml
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
|
||||
- You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You will need to manage
|
||||
provisioning of these certificates yourself.
|
||||
|
||||
If you are using your own certificate, be sure to use a `.pem` file that
|
||||
includes the full certificate chain including any intermediate certificates
|
||||
(for instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](../federate.md).
|
||||
|
||||
### Client Well-Known URI
|
||||
|
||||
Setting up the client Well-Known URI is optional but if you set it up, it will
|
||||
allow users to enter their full username (e.g. `@user:<server_name>`) into clients
|
||||
which support well-known lookup to automatically configure the homeserver and
|
||||
identity server URLs. This is useful so that users don't have to memorize or think
|
||||
about the actual homeserver URL you are using.
|
||||
|
||||
The URL `https://<server_name>/.well-known/matrix/client` should return JSON in
|
||||
the following format.
|
||||
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
It can optionally contain identity server information as well.
|
||||
|
||||
```json
|
||||
{
|
||||
"m.homeserver": {
|
||||
"base_url": "https://<matrix.example.com>"
|
||||
},
|
||||
"m.identity_server": {
|
||||
"base_url": "https://<identity.example.com>"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To work in browser based clients, the file must be served with the appropriate
|
||||
Cross-Origin Resource Sharing (CORS) headers. A recommended value would be
|
||||
`Access-Control-Allow-Origin: *` which would allow all browser based clients to
|
||||
view it.
|
||||
|
||||
In nginx this would be something like:
|
||||
|
||||
```nginx
|
||||
location /.well-known/matrix/client {
|
||||
return 200 '{"m.homeserver": {"base_url": "https://<matrix.example.com>"}}';
|
||||
default_type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
}
|
||||
```
|
||||
|
||||
You should also ensure the `public_baseurl` option in `homeserver.yaml` is set
|
||||
correctly. `public_baseurl` should be set to the URL that clients will use to
|
||||
connect to your server. This is the same URL you put for the `m.homeserver`
|
||||
`base_url` above.
|
||||
|
||||
```yaml
|
||||
public_baseurl: "https://<matrix.example.com>"
|
||||
```
|
||||
|
||||
### Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. This allows
|
||||
Synapse to send password reset emails, send verifications when an email address
|
||||
is added to a user's account, and send email notifications to users when they
|
||||
receive new messages.
|
||||
|
||||
To configure an SMTP server for Synapse, modify the configuration section
|
||||
headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
|
||||
and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||
`smtp_pass`, and `require_transport_security`.
|
||||
|
||||
If email is not configured, password reset, registration and notifications via
|
||||
email will be disabled.
|
||||
|
||||
### Registering a user
|
||||
|
||||
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
|
||||
|
||||
Alternatively, you can do so from the command line. This can be done as follows:
|
||||
|
||||
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
|
||||
installed via a prebuilt package, `register_new_matrix_user` should already be
|
||||
on the search path):
|
||||
```sh
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start # if not already running
|
||||
```
|
||||
2. Run the following command:
|
||||
```sh
|
||||
register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
```
|
||||
|
||||
This will prompt you to add details for the new user, and will then connect to
|
||||
the running Synapse to create the new user. For example:
|
||||
```
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
Make admin [no]:
|
||||
Success!
|
||||
```
|
||||
|
||||
This process uses a setting `registration_shared_secret` in
|
||||
`homeserver.yaml`, which is shared between Synapse itself and the
|
||||
`register_new_matrix_user` script. It doesn't matter what it is (a random
|
||||
value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users, including admin accounts,
|
||||
on your server even if `enable_registration` is `false`.
|
||||
|
||||
### Setting up a TURN server
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See
|
||||
[docs/turn-howto.md](../turn-howto.md)
|
||||
for details.
|
||||
|
||||
### URL previews
|
||||
|
||||
Synapse includes support for previewing URLs, which is disabled by default. To
|
||||
turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||
and explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||
previewing in the `url_preview_ip_range_blacklist` configuration parameter.
|
||||
This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional `lxml` python dependency to be installed. This
|
||||
in turn requires the `libxml2` library to be available - on Debian/Ubuntu this
|
||||
means `apt-get install libxml2-dev`, or equivalent for your OS.
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
`pip` seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.:
|
||||
|
||||
```sh
|
||||
pip install twisted
|
||||
```
|
||||
|
||||
If you have any other problems, feel free to ask in
|
||||
[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org).
|
||||
<!--
|
||||
Include the contents of INSTALL.md from the project root without moving it, which may
|
||||
break links around the internet. Additionally, note that SUMMARY.md is unable to
|
||||
directly link to content outside of the docs/ directory. So we use this file as a
|
||||
redirection.
|
||||
-->
|
||||
{{#include ../../INSTALL.md}}
|
||||
@@ -16,7 +16,7 @@ this document.
|
||||
summaries.
|
||||
|
||||
- If Synapse was installed using [prebuilt
|
||||
packages](setup/installation.md#prebuilt-packages), you will need to follow the
|
||||
packages](../setup/INSTALL.md#prebuilt-packages), you will need to follow the
|
||||
normal process for upgrading those packages.
|
||||
|
||||
- If Synapse was installed from source, then:
|
||||
@@ -84,45 +84,7 @@ process, for example:
|
||||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
|
||||
# Upgrading to v1.38.0
|
||||
|
||||
## Re-indexing of `events` table on Postgres databases
|
||||
|
||||
This release includes a database schema update which requires re-indexing one of
|
||||
the larger tables in the database, `events`. This could result in increased
|
||||
disk I/O for several hours or days after upgrading while the migration
|
||||
completes. Furthermore, because we have to keep the old indexes until the new
|
||||
indexes are ready, it could result in a significant, temporary, increase in
|
||||
disk space.
|
||||
|
||||
To get a rough idea of the disk space required, check the current size of one
|
||||
of the indexes. For example, from a `psql` shell, run the following sql:
|
||||
|
||||
```sql
|
||||
SELECT pg_size_pretty(pg_relation_size('events_order_room'));
|
||||
```
|
||||
|
||||
We need to rebuild **four** indexes, so you will need to multiply this result
|
||||
by four to give an estimate of the disk space required. For example, on one
|
||||
particular server:
|
||||
|
||||
```
|
||||
synapse=# select pg_size_pretty(pg_relation_size('events_order_room'));
|
||||
pg_size_pretty
|
||||
----------------
|
||||
288 MB
|
||||
(1 row)
|
||||
```
|
||||
|
||||
On this server, it would be wise to ensure that at least 1152MB are free.
|
||||
|
||||
The additional disk space will be freed once the migration completes.
|
||||
|
||||
SQLite databases are unaffected by this change.
|
||||
|
||||
|
||||
|
||||
# Upgrading to v1.37.0
|
||||
|
||||
## Deprecation of the current spam checker interface
|
||||
|
||||
@@ -24,11 +24,6 @@ Finally, we also stylise the chapter titles in the left sidebar by indenting the
|
||||
slightly so that they are more visually distinguishable from the section headers
|
||||
(the bold titles). This is done through the `indent-section-headers.css` file.
|
||||
|
||||
In addition to these modifications, we have added a version picker to the documentation.
|
||||
Users can switch between documentations for different versions of Synapse.
|
||||
This functionality was implemented through the `version-picker.js` and
|
||||
`version-picker.css` files.
|
||||
|
||||
More information can be found in mdbook's official documentation for
|
||||
[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html)
|
||||
and
|
||||
|
||||
@@ -131,18 +131,6 @@
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
{{/if}}
|
||||
<div class="version-picker">
|
||||
<div class="dropdown">
|
||||
<div class="select">
|
||||
<span></span>
|
||||
<i class="fa fa-chevron-down"></i>
|
||||
</div>
|
||||
<input type="hidden" name="version">
|
||||
<ul class="dropdown-menu">
|
||||
<!-- Versions will be added dynamically in version-picker.js -->
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">{{ book_title }}</h1>
|
||||
@@ -321,4 +309,4 @@
|
||||
{{/if}}
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
@@ -1,78 +0,0 @@
|
||||
.version-picker {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.version-picker .dropdown {
|
||||
width: 130px;
|
||||
max-height: 29px;
|
||||
margin-left: 10px;
|
||||
display: inline-block;
|
||||
border-radius: 4px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
position: relative;
|
||||
font-size: 13px;
|
||||
color: var(--fg);
|
||||
height: 100%;
|
||||
text-align: left;
|
||||
}
|
||||
.version-picker .dropdown .select {
|
||||
cursor: pointer;
|
||||
display: block;
|
||||
padding: 5px 2px 5px 15px;
|
||||
}
|
||||
.version-picker .dropdown .select > i {
|
||||
font-size: 10px;
|
||||
color: var(--fg);
|
||||
cursor: pointer;
|
||||
float: right;
|
||||
line-height: 20px !important;
|
||||
}
|
||||
.version-picker .dropdown:hover {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
}
|
||||
.version-picker .dropdown:active {
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active:hover,
|
||||
.version-picker .dropdown.active {
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 2px 2px 0 0;
|
||||
background-color: var(--theme-popup-bg);
|
||||
}
|
||||
.version-picker .dropdown.active .select > i {
|
||||
transform: rotate(-180deg);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
position: absolute;
|
||||
background-color: var(--theme-popup-bg);
|
||||
width: 100%;
|
||||
left: -1px;
|
||||
right: 1px;
|
||||
margin-top: 1px;
|
||||
border: 1px solid var(--theme-popup-border);
|
||||
border-radius: 0 0 4px 4px;
|
||||
overflow: hidden;
|
||||
display: none;
|
||||
max-height: 300px;
|
||||
overflow-y: auto;
|
||||
z-index: 9;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li {
|
||||
font-size: 12px;
|
||||
padding: 6px 20px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu {
|
||||
padding: 0;
|
||||
list-style: none;
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li:hover {
|
||||
background-color: var(--theme-hover);
|
||||
}
|
||||
.version-picker .dropdown .dropdown-menu li.active::before {
|
||||
display: inline-block;
|
||||
content: "✓";
|
||||
margin-inline-start: -14px;
|
||||
width: 14px;
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
|
||||
const dropdown = document.querySelector('.version-picker .dropdown');
|
||||
const dropdownMenu = dropdown.querySelector('.dropdown-menu');
|
||||
|
||||
fetchVersions(dropdown, dropdownMenu).then(() => {
|
||||
initializeVersionDropdown(dropdown, dropdownMenu);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initialize the dropdown functionality for version selection.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
*/
|
||||
function initializeVersionDropdown(dropdown, dropdownMenu) {
|
||||
// Toggle the dropdown menu on click
|
||||
dropdown.addEventListener('click', function () {
|
||||
this.setAttribute('tabindex', 1);
|
||||
this.classList.toggle('active');
|
||||
dropdownMenu.style.display = (dropdownMenu.style.display === 'block') ? 'none' : 'block';
|
||||
});
|
||||
|
||||
// Remove the 'active' class and hide the dropdown menu on focusout
|
||||
dropdown.addEventListener('focusout', function () {
|
||||
this.classList.remove('active');
|
||||
dropdownMenu.style.display = 'none';
|
||||
});
|
||||
|
||||
// Handle item selection within the dropdown menu
|
||||
const dropdownMenuItems = dropdownMenu.querySelectorAll('li');
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.addEventListener('click', function () {
|
||||
dropdownMenuItems.forEach(function (item) {
|
||||
item.classList.remove('active');
|
||||
});
|
||||
this.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = this.textContent;
|
||||
dropdown.querySelector('input').value = this.getAttribute('id');
|
||||
|
||||
window.location.href = changeVersion(window.location.href, this.textContent);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* This function fetches the available versions from a GitHub repository
|
||||
* and inserts them into the version picker.
|
||||
*
|
||||
* @param {Element} dropdown - The dropdown element.
|
||||
* @param {Element} dropdownMenu - The dropdown menu element.
|
||||
* @returns {Promise<Array<string>>} A promise that resolves with an array of available versions.
|
||||
*/
|
||||
function fetchVersions(dropdown, dropdownMenu) {
|
||||
return new Promise((resolve, reject) => {
|
||||
window.addEventListener("load", () => {
|
||||
|
||||
fetch("https://api.github.com/repos/matrix-org/synapse/git/trees/gh-pages", {
|
||||
cache: "force-cache",
|
||||
}).then(res =>
|
||||
res.json()
|
||||
).then(resObject => {
|
||||
const excluded = ['dev-docs', 'v1.91.0', 'v1.80.0', 'v1.69.0'];
|
||||
const tree = resObject.tree.filter(item => item.type === "tree" && !excluded.includes(item.path));
|
||||
const versions = tree.map(item => item.path).sort(sortVersions);
|
||||
|
||||
// Create a list of <li> items for versions
|
||||
versions.forEach((version) => {
|
||||
const li = document.createElement("li");
|
||||
li.textContent = version;
|
||||
li.id = version;
|
||||
|
||||
if (window.SYNAPSE_VERSION === version) {
|
||||
li.classList.add('active');
|
||||
dropdown.querySelector('span').textContent = version;
|
||||
dropdown.querySelector('input').value = version;
|
||||
}
|
||||
|
||||
dropdownMenu.appendChild(li);
|
||||
});
|
||||
|
||||
resolve(versions);
|
||||
|
||||
}).catch(ex => {
|
||||
console.error("Failed to fetch version data", ex);
|
||||
reject(ex);
|
||||
})
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom sorting function to sort an array of version strings.
|
||||
*
|
||||
* @param {string} a - The first version string to compare.
|
||||
* @param {string} b - The second version string to compare.
|
||||
* @returns {number} - A negative number if a should come before b, a positive number if b should come before a, or 0 if they are equal.
|
||||
*/
|
||||
function sortVersions(a, b) {
|
||||
// Put 'develop' and 'latest' at the top
|
||||
if (a === 'develop' || a === 'latest') return -1;
|
||||
if (b === 'develop' || b === 'latest') return 1;
|
||||
|
||||
const versionA = (a.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
const versionB = (b.match(/v\d+(\.\d+)+/) || [])[0];
|
||||
|
||||
return versionB.localeCompare(versionA);
|
||||
}
|
||||
|
||||
/**
|
||||
* Change the version in a URL path.
|
||||
*
|
||||
* @param {string} url - The original URL to be modified.
|
||||
* @param {string} newVersion - The new version to replace the existing version in the URL.
|
||||
* @returns {string} The updated URL with the new version.
|
||||
*/
|
||||
function changeVersion(url, newVersion) {
|
||||
const parsedURL = new URL(url);
|
||||
const pathSegments = parsedURL.pathname.split('/');
|
||||
|
||||
// Modify the version
|
||||
pathSegments[2] = newVersion;
|
||||
|
||||
// Reconstruct the URL
|
||||
parsedURL.pathname = pathSegments.join('/');
|
||||
|
||||
return parsedURL.href;
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
window.SYNAPSE_VERSION = 'v1.38';
|
||||
1
mypy.ini
1
mypy.ini
@@ -75,7 +75,6 @@ files =
|
||||
synapse/util/daemonize.py,
|
||||
synapse/util/hash.py,
|
||||
synapse/util/iterutils.py,
|
||||
synapse/util/linked_list.py,
|
||||
synapse/util/metrics.py,
|
||||
synapse/util/macaroons.py,
|
||||
synapse/util/module_loader.py,
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
# can be passed on the commandline for debugging.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
@@ -35,8 +34,6 @@ By default, builds for all known distributions, but a list of distributions
|
||||
can be passed on the commandline for debugging.
|
||||
"""
|
||||
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
|
||||
class Builder(object):
|
||||
def __init__(self, redirect_stdout=False):
|
||||
@@ -60,6 +57,9 @@ class Builder(object):
|
||||
raise
|
||||
|
||||
def _inner_build(self, dist, skip_tests=False):
|
||||
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
os.chdir(projdir)
|
||||
|
||||
tag = dist.split(":", 1)[1]
|
||||
|
||||
# Make the dir where the debs will live.
|
||||
@@ -93,7 +93,6 @@ class Builder(object):
|
||||
],
|
||||
stdout=stdout,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=projdir,
|
||||
)
|
||||
|
||||
container_name = "synapse_build_" + tag
|
||||
@@ -180,11 +179,6 @@ if __name__ == "__main__":
|
||||
action="store_true",
|
||||
help="skip running tests after building",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--show-dists-json",
|
||||
action="store_true",
|
||||
help="instead of building the packages, just list the dists to build for, as a json array",
|
||||
)
|
||||
parser.add_argument(
|
||||
"dist",
|
||||
nargs="*",
|
||||
@@ -192,7 +186,4 @@ if __name__ == "__main__":
|
||||
help="a list of distributions to build for. Default: %(default)s",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
if args.show_dists_json:
|
||||
print(json.dumps(DISTS))
|
||||
else:
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
|
||||
|
||||
@@ -93,7 +93,6 @@ BOOLEAN_COLUMNS = {
|
||||
"local_media_repository": ["safe_from_quarantine"],
|
||||
"users": ["shadow_banned"],
|
||||
"e2e_fallback_keys_json": ["used"],
|
||||
"access_tokens": ["used"],
|
||||
}
|
||||
|
||||
|
||||
@@ -308,8 +307,7 @@ class Porter(object):
|
||||
information_schema.table_constraints AS tc
|
||||
INNER JOIN information_schema.constraint_column_usage AS ccu
|
||||
USING (table_schema, constraint_name)
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY'
|
||||
AND tc.table_name != ccu.table_name;
|
||||
WHERE tc.constraint_type = 'FOREIGN KEY';
|
||||
"""
|
||||
txn.execute(sql)
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse._scripts.review_recent_signups import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -47,7 +47,7 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.38.1"
|
||||
__version__ = "1.37.0rc1"
|
||||
|
||||
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
|
||||
# We import here so that we don't have to install a bunch of deps when
|
||||
|
||||
@@ -1,175 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2021 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import List
|
||||
|
||||
import attr
|
||||
|
||||
from synapse.config._base import RootConfig, find_config_files, read_config_files
|
||||
from synapse.config.database import DatabaseConfig
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn
|
||||
from synapse.storage.engines import create_engine
|
||||
|
||||
|
||||
class ReviewConfig(RootConfig):
|
||||
"A config class that just pulls out the database config"
|
||||
config_classes = [DatabaseConfig]
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class UserInfo:
|
||||
user_id: str
|
||||
creation_ts: int
|
||||
emails: List[str] = attr.Factory(list)
|
||||
private_rooms: List[str] = attr.Factory(list)
|
||||
public_rooms: List[str] = attr.Factory(list)
|
||||
ips: List[str] = attr.Factory(list)
|
||||
|
||||
|
||||
def get_recent_users(txn: LoggingTransaction, since_ms: int) -> List[UserInfo]:
|
||||
"""Fetches recently registered users and some info on them."""
|
||||
|
||||
sql = """
|
||||
SELECT name, creation_ts FROM users
|
||||
WHERE
|
||||
? <= creation_ts
|
||||
AND deactivated = 0
|
||||
"""
|
||||
|
||||
txn.execute(sql, (since_ms / 1000,))
|
||||
|
||||
user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn]
|
||||
|
||||
for user_info in user_infos:
|
||||
user_info.emails = DatabasePool.simple_select_onecol_txn(
|
||||
txn,
|
||||
table="user_threepids",
|
||||
keyvalues={"user_id": user_info.user_id, "medium": "email"},
|
||||
retcol="address",
|
||||
)
|
||||
|
||||
sql = """
|
||||
SELECT room_id, canonical_alias, name, join_rules
|
||||
FROM local_current_membership
|
||||
INNER JOIN room_stats_state USING (room_id)
|
||||
WHERE user_id = ? AND membership = 'join'
|
||||
"""
|
||||
|
||||
txn.execute(sql, (user_info.user_id,))
|
||||
for room_id, canonical_alias, name, join_rules in txn:
|
||||
if join_rules == "public":
|
||||
user_info.public_rooms.append(canonical_alias or name or room_id)
|
||||
else:
|
||||
user_info.private_rooms.append(canonical_alias or name or room_id)
|
||||
|
||||
user_info.ips = DatabasePool.simple_select_onecol_txn(
|
||||
txn,
|
||||
table="user_ips",
|
||||
keyvalues={"user_id": user_info.user_id},
|
||||
retcol="ip",
|
||||
)
|
||||
|
||||
return user_infos
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config-path",
|
||||
action="append",
|
||||
metavar="CONFIG_FILE",
|
||||
help="The config files for Synapse.",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s",
|
||||
"--since",
|
||||
metavar="duration",
|
||||
help="Specify how far back to review user registrations for, defaults to 7d (i.e. 7 days).",
|
||||
default="7d",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-e",
|
||||
"--exclude-emails",
|
||||
action="store_true",
|
||||
help="Exclude users that have validated email addresses",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--only-users",
|
||||
action="store_true",
|
||||
help="Only print user IDs that match.",
|
||||
)
|
||||
|
||||
config = ReviewConfig()
|
||||
|
||||
config_args = parser.parse_args(sys.argv[1:])
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
config_dict = read_config_files(config_files)
|
||||
config.parse_config_dict(
|
||||
config_dict,
|
||||
)
|
||||
|
||||
since_ms = time.time() * 1000 - config.parse_duration(config_args.since)
|
||||
exclude_users_with_email = config_args.exclude_emails
|
||||
include_context = not config_args.only_users
|
||||
|
||||
for database_config in config.database.databases:
|
||||
if "main" in database_config.databases:
|
||||
break
|
||||
|
||||
engine = create_engine(database_config.config)
|
||||
|
||||
with make_conn(database_config, engine, "review_recent_signups") as db_conn:
|
||||
user_infos = get_recent_users(db_conn.cursor(), since_ms)
|
||||
|
||||
for user_info in user_infos:
|
||||
if exclude_users_with_email and user_info.emails:
|
||||
continue
|
||||
|
||||
if include_context:
|
||||
print_public_rooms = ""
|
||||
if user_info.public_rooms:
|
||||
print_public_rooms = "(" + ", ".join(user_info.public_rooms[:3])
|
||||
|
||||
if len(user_info.public_rooms) > 3:
|
||||
print_public_rooms += ", ..."
|
||||
|
||||
print_public_rooms += ")"
|
||||
|
||||
print("# Created:", datetime.fromtimestamp(user_info.creation_ts))
|
||||
print("# Email:", ", ".join(user_info.emails) or "None")
|
||||
print("# IPs:", ", ".join(user_info.ips))
|
||||
print(
|
||||
"# Number joined public rooms:",
|
||||
len(user_info.public_rooms),
|
||||
print_public_rooms,
|
||||
)
|
||||
print("# Number joined private rooms:", len(user_info.private_rooms))
|
||||
print("#")
|
||||
|
||||
print(user_info.user_id)
|
||||
|
||||
if include_context:
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
||||
|
||||
import pymacaroons
|
||||
from netaddr import IPAddress
|
||||
@@ -28,6 +28,7 @@ from synapse.api.errors import (
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.appservice import ApplicationService
|
||||
from synapse.events import EventBase
|
||||
from synapse.http import get_request_user_agent
|
||||
@@ -37,6 +38,7 @@ from synapse.storage.databases.main.registration import TokenLookupResult
|
||||
from synapse.types import Requester, StateMap, UserID, create_requester
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -44,6 +46,15 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
AuthEventTypes = (
|
||||
EventTypes.Create,
|
||||
EventTypes.Member,
|
||||
EventTypes.PowerLevels,
|
||||
EventTypes.JoinRules,
|
||||
EventTypes.RoomHistoryVisibility,
|
||||
EventTypes.ThirdPartyInvite,
|
||||
)
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
@@ -54,7 +65,9 @@ class _InvalidMacaroonException(Exception):
|
||||
|
||||
class Auth:
|
||||
"""
|
||||
This class contains functions for authenticating users of our client-server API.
|
||||
FIXME: This class contains a mix of functions for authenticating users
|
||||
of our client-server API and authenticating events added to room graphs.
|
||||
The latter should be moved to synapse.handlers.event_auth.EventAuthHandler.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -76,6 +89,18 @@ class Auth:
|
||||
self._macaroon_secret_key = hs.config.macaroon_secret_key
|
||||
self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users
|
||||
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
) -> None:
|
||||
auth_event_ids = event.auth_event_ids()
|
||||
auth_events_by_id = await self.store.get_events(auth_event_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
async def check_user_in_room(
|
||||
self,
|
||||
room_id: str,
|
||||
@@ -126,6 +151,13 @@ class Auth:
|
||||
|
||||
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
|
||||
|
||||
async def check_host_in_room(self, room_id: str, host: str) -> bool:
|
||||
with Measure(self.clock, "check_host_in_room"):
|
||||
return await self.store.is_host_joined(room_id, host)
|
||||
|
||||
def get_public_keys(self, invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
async def get_user_by_req(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
@@ -213,11 +245,6 @@ class Auth:
|
||||
errcode=Codes.GUEST_ACCESS_FORBIDDEN,
|
||||
)
|
||||
|
||||
# Mark the token as used. This is used to invalidate old refresh
|
||||
# tokens after some time.
|
||||
if not user_info.token_used and token_id is not None:
|
||||
await self.store.mark_access_token_as_used(token_id)
|
||||
|
||||
requester = create_requester(
|
||||
user_info.user_id,
|
||||
token_id,
|
||||
@@ -456,6 +483,44 @@ class Auth:
|
||||
"""
|
||||
return await self.store.is_server_admin(user)
|
||||
|
||||
def compute_auth_events(
|
||||
self,
|
||||
event,
|
||||
current_state_ids: StateMap[str],
|
||||
for_verification: bool = False,
|
||||
) -> List[str]:
|
||||
"""Given an event and current state return the list of event IDs used
|
||||
to auth an event.
|
||||
|
||||
If `for_verification` is False then only return auth events that
|
||||
should be added to the event's `auth_events`.
|
||||
|
||||
Returns:
|
||||
List of event IDs.
|
||||
"""
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
|
||||
# Currently we ignore the `for_verification` flag even though there are
|
||||
# some situations where we can drop particular auth events when adding
|
||||
# to the event's `auth_events` (e.g. joins pointing to previous joins
|
||||
# when room is publicly joinable). Dropping event IDs has the
|
||||
# advantage that the auth chain for the room grows slower, but we use
|
||||
# the auth chain in state resolution v2 to order events, which means
|
||||
# care must be taken if dropping events to ensure that it doesn't
|
||||
# introduce undesirable "state reset" behaviour.
|
||||
#
|
||||
# All of which sounds a bit tricky so we don't bother for now.
|
||||
|
||||
auth_ids = []
|
||||
for etype, state_key in event_auth.auth_types_for_event(event):
|
||||
auth_ev_id = current_state_ids.get((etype, state_key))
|
||||
if auth_ev_id:
|
||||
auth_ids.append(auth_ev_id)
|
||||
|
||||
return auth_ids
|
||||
|
||||
async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool:
|
||||
"""Determine whether the user is allowed to edit the room's entry in the
|
||||
published room list.
|
||||
|
||||
@@ -201,12 +201,6 @@ class EventContentFields:
|
||||
)
|
||||
|
||||
|
||||
class RoomTypes:
|
||||
"""Understood values of the room_type field of m.room.create events."""
|
||||
|
||||
SPACE = "m.space"
|
||||
|
||||
|
||||
class RoomEncryptionAlgorithms:
|
||||
MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2"
|
||||
DEFAULT = MEGOLM_V1_AES_SHA2
|
||||
|
||||
@@ -21,7 +21,7 @@ import socket
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Iterable
|
||||
from typing import Awaitable, Callable, Iterable
|
||||
|
||||
from cryptography.utils import CryptographyDeprecationWarning
|
||||
from typing_extensions import NoReturn
|
||||
@@ -41,14 +41,10 @@ from synapse.events.spamcheck import load_legacy_spam_checkers
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.metrics.jemalloc import setup_jemalloc_stats
|
||||
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
|
||||
from synapse.util.daemonize import daemonize_process
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# list of tuples of function, args list, kwargs dict
|
||||
@@ -316,7 +312,7 @@ def refresh_certificate(hs):
|
||||
logger.info("Context factories updated.")
|
||||
|
||||
|
||||
async def start(hs: "HomeServer"):
|
||||
async def start(hs: "synapse.server.HomeServer"):
|
||||
"""
|
||||
Start a Synapse server or worker.
|
||||
|
||||
@@ -369,9 +365,6 @@ async def start(hs: "HomeServer"):
|
||||
|
||||
load_legacy_spam_checkers(hs)
|
||||
|
||||
# If we've configured an expiry time for caches, start the background job now.
|
||||
setup_expire_lru_cache_entries(hs)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening()
|
||||
hs.get_datastore().db_pool.start_profiling()
|
||||
|
||||
@@ -108,7 +108,6 @@ from synapse.server import HomeServer
|
||||
from synapse.storage.databases.main.censor_events import CensorEventsStore
|
||||
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
|
||||
from synapse.storage.databases.main.lock import LockStore
|
||||
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
|
||||
from synapse.storage.databases.main.metrics import ServerMetricsStore
|
||||
from synapse.storage.databases.main.monthly_active_users import (
|
||||
@@ -250,7 +249,6 @@ class GenericWorkerSlavedStore(
|
||||
ServerMetricsStore,
|
||||
SearchStore,
|
||||
TransactionWorkerStore,
|
||||
LockStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -5,7 +5,6 @@ from synapse.config import (
|
||||
api,
|
||||
appservice,
|
||||
auth,
|
||||
cache,
|
||||
captcha,
|
||||
cas,
|
||||
consent,
|
||||
@@ -89,7 +88,6 @@ class RootConfig:
|
||||
tracer: tracer.TracerConfig
|
||||
redis: redis.RedisConfig
|
||||
modules: modules.ModulesConfig
|
||||
caches: cache.CacheConfig
|
||||
federation: federation.FederationConfig
|
||||
|
||||
config_classes: List = ...
|
||||
|
||||
@@ -116,41 +116,35 @@ class CacheConfig(Config):
|
||||
#event_cache_size: 10K
|
||||
|
||||
caches:
|
||||
# Controls the global cache factor, which is the default cache factor
|
||||
# for all caches if a specific factor for that cache is not otherwise
|
||||
# set.
|
||||
#
|
||||
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
|
||||
# variable. Setting by environment variable takes priority over
|
||||
# setting through the config file.
|
||||
#
|
||||
# Defaults to 0.5, which will half the size of all caches.
|
||||
#
|
||||
#global_factor: 1.0
|
||||
# Controls the global cache factor, which is the default cache factor
|
||||
# for all caches if a specific factor for that cache is not otherwise
|
||||
# set.
|
||||
#
|
||||
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
|
||||
# variable. Setting by environment variable takes priority over
|
||||
# setting through the config file.
|
||||
#
|
||||
# Defaults to 0.5, which will half the size of all caches.
|
||||
#
|
||||
#global_factor: 1.0
|
||||
|
||||
# A dictionary of cache name to cache factor for that individual
|
||||
# cache. Overrides the global cache factor for a given cache.
|
||||
#
|
||||
# These can also be set through environment variables comprised
|
||||
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
|
||||
# letters and underscores. Setting by environment variable
|
||||
# takes priority over setting through the config file.
|
||||
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
|
||||
#
|
||||
# Some caches have '*' and other characters that are not
|
||||
# alphanumeric or underscores. These caches can be named with or
|
||||
# without the special characters stripped. For example, to specify
|
||||
# the cache factor for `*stateGroupCache*` via an environment
|
||||
# variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
|
||||
#
|
||||
per_cache_factors:
|
||||
#get_users_who_share_room_with_user: 2.0
|
||||
|
||||
# Controls how long an entry can be in a cache without having been
|
||||
# accessed before being evicted. Defaults to None, which means
|
||||
# entries are never evicted based on time.
|
||||
#
|
||||
#expiry_time: 30m
|
||||
# A dictionary of cache name to cache factor for that individual
|
||||
# cache. Overrides the global cache factor for a given cache.
|
||||
#
|
||||
# These can also be set through environment variables comprised
|
||||
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
|
||||
# letters and underscores. Setting by environment variable
|
||||
# takes priority over setting through the config file.
|
||||
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
|
||||
#
|
||||
# Some caches have '*' and other characters that are not
|
||||
# alphanumeric or underscores. These caches can be named with or
|
||||
# without the special characters stripped. For example, to specify
|
||||
# the cache factor for `*stateGroupCache*` via an environment
|
||||
# variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`.
|
||||
#
|
||||
per_cache_factors:
|
||||
#get_users_who_share_room_with_user: 2.0
|
||||
"""
|
||||
|
||||
def read_config(self, config, **kwargs):
|
||||
@@ -206,12 +200,6 @@ class CacheConfig(Config):
|
||||
e.message # noqa: B306, DependencyException.message is a property
|
||||
)
|
||||
|
||||
expiry_time = cache_config.get("expiry_time")
|
||||
if expiry_time:
|
||||
self.expiry_time_msec = self.parse_duration(expiry_time)
|
||||
else:
|
||||
self.expiry_time_msec = None
|
||||
|
||||
# Resize all caches (if necessary) with the new factors we've loaded
|
||||
self.resize_all_caches()
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ DEFAULT_CONFIG = """\
|
||||
# User Consent configuration
|
||||
#
|
||||
# for detailed instructions, see
|
||||
# https://matrix-org.github.io/synapse/latest/consent_tracking.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/consent_tracking.md
|
||||
#
|
||||
# Parts of this section are required if enabling the 'consent' resource under
|
||||
# 'listeners', in particular 'template_dir' and 'version'.
|
||||
|
||||
@@ -62,8 +62,7 @@ DEFAULT_CONFIG = """\
|
||||
# cp_min: 5
|
||||
# cp_max: 10
|
||||
#
|
||||
# For more information on using Synapse with Postgres,
|
||||
# see https://matrix-org.github.io/synapse/latest/postgres.html.
|
||||
# For more information on using Synapse with Postgres, see `docs/postgres.md`.
|
||||
#
|
||||
database:
|
||||
name: sqlite3
|
||||
|
||||
@@ -64,7 +64,7 @@ class JWTConfig(Config):
|
||||
# Note that this is a non-standard login type and client support is
|
||||
# expected to be non-existent.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/jwt.html.
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/jwt.md.
|
||||
#
|
||||
#jwt_config:
|
||||
# Uncomment the following to enable authorization using JSON web
|
||||
|
||||
@@ -49,7 +49,7 @@ DEFAULT_LOG_CONFIG = Template(
|
||||
# be ingested by ELK stacks. See [2] for details.
|
||||
#
|
||||
# [1]: https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema
|
||||
# [2]: https://matrix-org.github.io/synapse/latest/structured_logging.html
|
||||
# [2]: https://github.com/matrix-org/synapse/blob/master/docs/structured_logging.md
|
||||
|
||||
version: 1
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ class ModulesConfig(Config):
|
||||
|
||||
# Server admins can expand Synapse's functionality with external modules.
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/modules.html for more
|
||||
# See https://matrix-org.github.io/synapse/develop/modules.html for more
|
||||
# documentation on how to configure or create custom modules for Synapse.
|
||||
#
|
||||
modules:
|
||||
|
||||
@@ -166,7 +166,7 @@ class OIDCConfig(Config):
|
||||
#
|
||||
# module: The class name of a custom mapping module. Default is
|
||||
# {mapping_provider!r}.
|
||||
# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers
|
||||
# for information on implementing a custom mapping provider.
|
||||
#
|
||||
# config: Configuration for the mapping provider module. This section will
|
||||
@@ -217,7 +217,7 @@ class OIDCConfig(Config):
|
||||
# - attribute: groups
|
||||
# value: "admin"
|
||||
#
|
||||
# See https://matrix-org.github.io/synapse/latest/openid.html
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/openid.md
|
||||
# for information on how to configure these options.
|
||||
#
|
||||
# For backwards compatibility, it is also possible to configure a single OIDC
|
||||
|
||||
@@ -57,7 +57,7 @@ class PasswordAuthProviderConfig(Config):
|
||||
# ex. LDAP, external tokens, etc.
|
||||
#
|
||||
# For more information and known implementations, please see
|
||||
# https://matrix-org.github.io/synapse/latest/password_auth_providers.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/password_auth_providers.md
|
||||
#
|
||||
# Note: instances wishing to use SAML or CAS authentication should
|
||||
# instead use the `saml2_config` or `cas_config` options,
|
||||
|
||||
@@ -119,27 +119,6 @@ class RegistrationConfig(Config):
|
||||
session_lifetime = self.parse_duration(session_lifetime)
|
||||
self.session_lifetime = session_lifetime
|
||||
|
||||
# The `access_token_lifetime` applies for tokens that can be renewed
|
||||
# using a refresh token, as per MSC2918. If it is `None`, the refresh
|
||||
# token mechanism is disabled.
|
||||
#
|
||||
# Since it is incompatible with the `session_lifetime` mechanism, it is set to
|
||||
# `None` by default if a `session_lifetime` is set.
|
||||
access_token_lifetime = config.get(
|
||||
"access_token_lifetime", "5m" if session_lifetime is None else None
|
||||
)
|
||||
if access_token_lifetime is not None:
|
||||
access_token_lifetime = self.parse_duration(access_token_lifetime)
|
||||
self.access_token_lifetime = access_token_lifetime
|
||||
|
||||
if session_lifetime is not None and access_token_lifetime is not None:
|
||||
raise ConfigError(
|
||||
"The refresh token mechanism is incompatible with the "
|
||||
"`session_lifetime` option. Consider disabling the "
|
||||
"`session_lifetime` option or disabling the refresh token "
|
||||
"mechanism by removing the `access_token_lifetime` option."
|
||||
)
|
||||
|
||||
# The success template used during fallback auth.
|
||||
self.fallback_success_template = self.read_template("auth_success.html")
|
||||
|
||||
|
||||
@@ -250,7 +250,7 @@ class ContentRepositoryConfig(Config):
|
||||
#
|
||||
# If you are using a reverse proxy you may also need to set this value in
|
||||
# your reverse proxy's config. Notably Nginx has a small max body size by default.
|
||||
# See https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
# See https://matrix-org.github.io/synapse/develop/reverse_proxy.html.
|
||||
#
|
||||
#max_upload_size: 50M
|
||||
|
||||
|
||||
@@ -153,7 +153,7 @@ ROOM_COMPLEXITY_TOO_GREAT = (
|
||||
METRICS_PORT_WARNING = """\
|
||||
The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
|
||||
a listener. Please see
|
||||
https://matrix-org.github.io/synapse/latest/metrics-howto.html
|
||||
https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
|
||||
on how to configure the new listener.
|
||||
--------------------------------------------------------------------------------"""
|
||||
|
||||
@@ -811,7 +811,7 @@ class ServerConfig(Config):
|
||||
# In most cases you should avoid using a matrix specific subdomain such as
|
||||
# matrix.example.com or synapse.example.com as the server_name for the same
|
||||
# reasons you wouldn't use user@email.example.com as your email address.
|
||||
# See https://matrix-org.github.io/synapse/latest/delegate.html
|
||||
# See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md
|
||||
# for information on how to host Synapse on a subdomain while preserving
|
||||
# a clean server_name.
|
||||
#
|
||||
@@ -988,9 +988,9 @@ class ServerConfig(Config):
|
||||
# 'all local interfaces'.
|
||||
#
|
||||
# type: the type of listener. Normally 'http', but other valid options are:
|
||||
# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html),
|
||||
# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html),
|
||||
# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html).
|
||||
# 'manhole' (see docs/manhole.md),
|
||||
# 'metrics' (see docs/metrics-howto.md),
|
||||
# 'replication' (see docs/workers.md).
|
||||
#
|
||||
# tls: set to true to enable TLS for this listener. Will use the TLS
|
||||
# key/cert specified in tls_private_key_path / tls_certificate_path.
|
||||
@@ -1015,8 +1015,8 @@ class ServerConfig(Config):
|
||||
# client: the client-server API (/_matrix/client), and the synapse admin
|
||||
# API (/_synapse/admin). Also implies 'media' and 'static'.
|
||||
#
|
||||
# consent: user consent forms (/_matrix/consent).
|
||||
# See https://matrix-org.github.io/synapse/latest/consent_tracking.html.
|
||||
# consent: user consent forms (/_matrix/consent). See
|
||||
# docs/consent_tracking.md.
|
||||
#
|
||||
# federation: the server-server API (/_matrix/federation). Also implies
|
||||
# 'media', 'keys', 'openid'
|
||||
@@ -1025,13 +1025,12 @@ class ServerConfig(Config):
|
||||
#
|
||||
# media: the media API (/_matrix/media).
|
||||
#
|
||||
# metrics: the metrics interface.
|
||||
# See https://matrix-org.github.io/synapse/latest/metrics-howto.html.
|
||||
# metrics: the metrics interface. See docs/metrics-howto.md.
|
||||
#
|
||||
# openid: OpenID authentication.
|
||||
#
|
||||
# replication: the HTTP replication API (/_synapse/replication).
|
||||
# See https://matrix-org.github.io/synapse/latest/workers.html.
|
||||
# replication: the HTTP replication API (/_synapse/replication). See
|
||||
# docs/workers.md.
|
||||
#
|
||||
# static: static resources under synapse/static (/_matrix/static). (Mostly
|
||||
# useful for 'fallback authentication'.)
|
||||
@@ -1051,7 +1050,7 @@ class ServerConfig(Config):
|
||||
# that unwraps TLS.
|
||||
#
|
||||
# If you plan to use a reverse proxy, please see
|
||||
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
|
||||
#
|
||||
%(unsecure_http_bindings)s
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ LEGACY_SPAM_CHECKER_WARNING = """
|
||||
This server is using a spam checker module that is implementing the deprecated spam
|
||||
checker interface. Please check with the module's maintainer to see if a new version
|
||||
supporting Synapse's generic modules system is available.
|
||||
For more information, please see https://matrix-org.github.io/synapse/latest/modules.html
|
||||
For more information, please see https://matrix-org.github.io/synapse/develop/modules.html
|
||||
---------------------------------------------------------------------------------------"""
|
||||
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ class StatsConfig(Config):
|
||||
def generate_config_section(self, config_dir_path, server_name, **kwargs):
|
||||
return """
|
||||
# Settings for local room and user statistics collection. See
|
||||
# https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html.
|
||||
# docs/room_and_user_statistics.md.
|
||||
#
|
||||
stats:
|
||||
# Uncomment the following to disable room and user statistics. Note that doing
|
||||
|
||||
@@ -81,7 +81,7 @@ class TracerConfig(Config):
|
||||
#enabled: true
|
||||
|
||||
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||
# See https://matrix-org.github.io/synapse/latest/opentracing.html.
|
||||
# See docs/opentracing.rst.
|
||||
#
|
||||
# This is a list of regexes which are matched against the server_name of the
|
||||
# homeserver.
|
||||
|
||||
@@ -50,7 +50,7 @@ class UserDirectoryConfig(Config):
|
||||
#
|
||||
# If you set it true, you'll have to rebuild the user_directory search
|
||||
# indexes, see:
|
||||
# https://matrix-org.github.io/synapse/latest/user_directory.html
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
|
||||
#
|
||||
# Uncomment to return search results containing all known users, even if that
|
||||
# user does not share a room with the requester.
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
@@ -29,7 +29,6 @@ from synapse.api.room_versions import (
|
||||
RoomVersion,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.types import StateMap, UserID, get_domain_from_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -725,7 +724,7 @@ def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]:
|
||||
return public_keys
|
||||
|
||||
|
||||
def auth_types_for_event(event: Union[EventBase, EventBuilder]) -> Set[Tuple[str, str]]:
|
||||
def auth_types_for_event(event: EventBase) -> Set[Tuple[str, str]]:
|
||||
"""Given an event, return a list of (EventType, StateKey) that may be
|
||||
needed to auth the event. The returned list may be a superset of what
|
||||
would actually be required depending on the full state of the room.
|
||||
|
||||
@@ -118,7 +118,7 @@ class _EventInternalMetadata:
|
||||
proactively_send = DictProperty("proactively_send") # type: bool
|
||||
redacted = DictProperty("redacted") # type: bool
|
||||
txn_id = DictProperty("txn_id") # type: str
|
||||
token_id = DictProperty("token_id") # type: int
|
||||
token_id = DictProperty("token_id") # type: str
|
||||
historical = DictProperty("historical") # type: bool
|
||||
|
||||
# XXX: These are set by StreamWorkerStore._set_before_and_after.
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
import attr
|
||||
from nacl.signing import SigningKey
|
||||
|
||||
from synapse.api.auth import Auth
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import UnsupportedRoomVersionError
|
||||
from synapse.api.room_versions import (
|
||||
@@ -33,14 +34,10 @@ from synapse.types import EventID, JsonDict
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.event_auth import EventAuthHandler
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@attr.s(slots=True, cmp=False, frozen=True, auto_attribs=True)
|
||||
@attr.s(slots=True, cmp=False, frozen=True)
|
||||
class EventBuilder:
|
||||
"""A format independent event builder used to build up the event content
|
||||
before signing the event.
|
||||
@@ -65,30 +62,31 @@ class EventBuilder:
|
||||
_signing_key: The signing key to use to sign the event as the server
|
||||
"""
|
||||
|
||||
_state: StateHandler
|
||||
_event_auth_handler: "EventAuthHandler"
|
||||
_store: DataStore
|
||||
_clock: Clock
|
||||
_hostname: str
|
||||
_signing_key: SigningKey
|
||||
_state = attr.ib(type=StateHandler)
|
||||
_auth = attr.ib(type=Auth)
|
||||
_store = attr.ib(type=DataStore)
|
||||
_clock = attr.ib(type=Clock)
|
||||
_hostname = attr.ib(type=str)
|
||||
_signing_key = attr.ib(type=SigningKey)
|
||||
|
||||
room_version: RoomVersion
|
||||
room_version = attr.ib(type=RoomVersion)
|
||||
|
||||
room_id: str
|
||||
type: str
|
||||
sender: str
|
||||
room_id = attr.ib(type=str)
|
||||
type = attr.ib(type=str)
|
||||
sender = attr.ib(type=str)
|
||||
|
||||
content: JsonDict = attr.Factory(dict)
|
||||
unsigned: JsonDict = attr.Factory(dict)
|
||||
content = attr.ib(default=attr.Factory(dict), type=JsonDict)
|
||||
unsigned = attr.ib(default=attr.Factory(dict), type=JsonDict)
|
||||
|
||||
# These only exist on a subset of events, so they raise AttributeError if
|
||||
# someone tries to get them when they don't exist.
|
||||
_state_key: Optional[str] = None
|
||||
_redacts: Optional[str] = None
|
||||
_origin_server_ts: Optional[int] = None
|
||||
_state_key = attr.ib(default=None, type=Optional[str])
|
||||
_redacts = attr.ib(default=None, type=Optional[str])
|
||||
_origin_server_ts = attr.ib(default=None, type=Optional[int])
|
||||
|
||||
internal_metadata: _EventInternalMetadata = attr.Factory(
|
||||
lambda: _EventInternalMetadata({})
|
||||
internal_metadata = attr.ib(
|
||||
default=attr.Factory(lambda: _EventInternalMetadata({})),
|
||||
type=_EventInternalMetadata,
|
||||
)
|
||||
|
||||
@property
|
||||
@@ -125,9 +123,7 @@ class EventBuilder:
|
||||
state_ids = await self._state.get_current_state_ids(
|
||||
self.room_id, prev_event_ids
|
||||
)
|
||||
auth_event_ids = self._event_auth_handler.compute_auth_events(
|
||||
self, state_ids
|
||||
)
|
||||
auth_event_ids = self._auth.compute_auth_events(self, state_ids)
|
||||
|
||||
format_version = self.room_version.event_format
|
||||
if format_version == EventFormatVersions.V1:
|
||||
@@ -188,23 +184,24 @@ class EventBuilder:
|
||||
|
||||
|
||||
class EventBuilderFactory:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.hostname = hs.hostname
|
||||
self.signing_key = hs.signing_key
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
def new(self, room_version: str, key_values: dict) -> EventBuilder:
|
||||
def new(self, room_version, key_values):
|
||||
"""Generate an event builder appropriate for the given room version
|
||||
|
||||
Deprecated: use for_room_version with a RoomVersion object instead
|
||||
|
||||
Args:
|
||||
room_version: Version of the room that we're creating an event builder for
|
||||
key_values: Fields used as the basis of the new event
|
||||
room_version (str): Version of the room that we're creating an event builder
|
||||
for
|
||||
key_values (dict): Fields used as the basis of the new event
|
||||
|
||||
Returns:
|
||||
EventBuilder
|
||||
@@ -215,15 +212,13 @@ class EventBuilderFactory:
|
||||
raise UnsupportedRoomVersionError()
|
||||
return self.for_room_version(v, key_values)
|
||||
|
||||
def for_room_version(
|
||||
self, room_version: RoomVersion, key_values: dict
|
||||
) -> EventBuilder:
|
||||
def for_room_version(self, room_version, key_values):
|
||||
"""Generate an event builder appropriate for the given room version
|
||||
|
||||
Args:
|
||||
room_version:
|
||||
room_version (synapse.api.room_versions.RoomVersion):
|
||||
Version of the room that we're creating an event builder for
|
||||
key_values: Fields used as the basis of the new event
|
||||
key_values (dict): Fields used as the basis of the new event
|
||||
|
||||
Returns:
|
||||
EventBuilder
|
||||
@@ -231,7 +226,7 @@ class EventBuilderFactory:
|
||||
return EventBuilder(
|
||||
store=self.store,
|
||||
state=self.state,
|
||||
event_auth_handler=self._event_auth_handler,
|
||||
auth=self.auth,
|
||||
clock=self.clock,
|
||||
hostname=self.hostname,
|
||||
signing_key=self.signing_key,
|
||||
@@ -291,15 +286,15 @@ def create_local_event_from_event_dict(
|
||||
_event_id_counter = 0
|
||||
|
||||
|
||||
def _create_event_id(clock: Clock, hostname: str) -> str:
|
||||
def _create_event_id(clock, hostname):
|
||||
"""Create a new event ID
|
||||
|
||||
Args:
|
||||
clock
|
||||
hostname: The server name for the event ID
|
||||
clock (Clock)
|
||||
hostname (str): The server name for the event ID
|
||||
|
||||
Returns:
|
||||
The new event ID
|
||||
str
|
||||
"""
|
||||
|
||||
global _event_id_counter
|
||||
|
||||
@@ -89,12 +89,12 @@ class FederationBase:
|
||||
result = await self.spam_checker.check_event_for_spam(pdu)
|
||||
|
||||
if result:
|
||||
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
|
||||
# we redact (to save disk space) as well as soft-failing (to stop
|
||||
# using the event in prev_events).
|
||||
redacted_event = prune_event(pdu)
|
||||
redacted_event.internal_metadata.soft_failed = True
|
||||
return redacted_event
|
||||
logger.warning(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(),
|
||||
)
|
||||
return prune_event(pdu)
|
||||
|
||||
return pdu
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ from twisted.internet import defer
|
||||
from twisted.internet.abstract import isIPAddress
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.constants import EduTypes, EventTypes, Membership
|
||||
from synapse.api.constants import EduTypes, EventTypes
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
@@ -44,9 +44,8 @@ from synapse.api.errors import (
|
||||
SynapseError,
|
||||
UnsupportedRoomVersionError,
|
||||
)
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
@@ -58,12 +57,10 @@ from synapse.logging.context import (
|
||||
)
|
||||
from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
)
|
||||
from synapse.storage.databases.main.lock import Lock
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import glob_to_regex, json_decoder, unwrapFirstError
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
@@ -99,18 +96,13 @@ last_pdu_ts_metric = Gauge(
|
||||
)
|
||||
|
||||
|
||||
# The name of the lock to use when process events in a room received over
|
||||
# federation.
|
||||
_INBOUND_EVENT_HANDLING_LOCK_NAME = "federation_inbound_pdu"
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
self.handler = hs.get_federation_handler()
|
||||
self.state = hs.get_state_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
|
||||
self.device_handler = hs.get_device_handler()
|
||||
|
||||
@@ -148,41 +140,6 @@ class FederationServer(FederationBase):
|
||||
|
||||
self._room_prejoin_state_types = hs.config.api.room_prejoin_state
|
||||
|
||||
# Whether we have started handling old events in the staging area.
|
||||
self._started_handling_of_staged_events = False
|
||||
|
||||
@wrap_as_background_process("_handle_old_staged_events")
|
||||
async def _handle_old_staged_events(self) -> None:
|
||||
"""Handle old staged events by fetching all rooms that have staged
|
||||
events and start the processing of each of those rooms.
|
||||
"""
|
||||
|
||||
# Get all the rooms IDs with staged events.
|
||||
room_ids = await self.store.get_all_rooms_with_staged_incoming_events()
|
||||
|
||||
# We then shuffle them so that if there are multiple instances doing
|
||||
# this work they're less likely to collide.
|
||||
random.shuffle(room_ids)
|
||||
|
||||
for room_id in room_ids:
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
# Try and acquire the processing lock for the room, if we get it start a
|
||||
# background process for handling the events in the room.
|
||||
lock = await self.store.try_acquire_lock(
|
||||
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
|
||||
)
|
||||
if lock:
|
||||
logger.info("Handling old staged inbound events in %s", room_id)
|
||||
self._process_incoming_pdus_in_room_inner(
|
||||
room_id,
|
||||
room_version,
|
||||
lock,
|
||||
)
|
||||
|
||||
# We pause a bit so that we don't start handling all rooms at once.
|
||||
await self._clock.sleep(random.uniform(0, 0.1))
|
||||
|
||||
async def on_backfill_request(
|
||||
self, origin: str, room_id: str, versions: List[str], limit: int
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
@@ -201,12 +158,6 @@ class FederationServer(FederationBase):
|
||||
async def on_incoming_transaction(
|
||||
self, origin: str, transaction_data: JsonDict
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
# If we receive a transaction we should make sure that kick off handling
|
||||
# any old events in the staging area.
|
||||
if not self._started_handling_of_staged_events:
|
||||
self._started_handling_of_staged_events = True
|
||||
self._handle_old_staged_events()
|
||||
|
||||
# keep this as early as possible to make the calculated origin ts as
|
||||
# accurate as possible.
|
||||
request_time = self._clock.time_msec()
|
||||
@@ -410,21 +361,22 @@ class FederationServer(FederationBase):
|
||||
|
||||
async def process_pdu(pdu: EventBase) -> JsonDict:
|
||||
event_id = pdu.event_id
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
return {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
return {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
)
|
||||
return {"error": str(e)}
|
||||
with pdu_process_time.time():
|
||||
with nested_logging_context(event_id):
|
||||
try:
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
return {}
|
||||
except FederationError as e:
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
return {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
)
|
||||
return {"error": str(e)}
|
||||
|
||||
await concurrently_execute(
|
||||
process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
|
||||
@@ -461,7 +413,7 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
@@ -494,7 +446,7 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
@@ -585,21 +537,26 @@ class FederationServer(FederationBase):
|
||||
return {"event": ret_pdu.get_pdu_json(time_now)}
|
||||
|
||||
async def on_send_join_request(
|
||||
self, origin: str, content: JsonDict, room_id: str
|
||||
self, origin: str, content: JsonDict
|
||||
) -> Dict[str, Any]:
|
||||
context = await self._on_send_membership_event(
|
||||
origin, content, Membership.JOIN, room_id
|
||||
)
|
||||
logger.debug("on_send_join_request: content: %s", content)
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
state_ids = list(prev_state_ids.values())
|
||||
auth_chain = await self.store.get_auth_chain(room_id, state_ids)
|
||||
state = await self.store.get_events(state_ids)
|
||||
assert_params_in_dict(content, ["room_id"])
|
||||
room_version = await self.store.get_room_version(content["room_id"])
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
res_pdus = await self.handler.on_send_join_request(origin, pdu)
|
||||
time_now = self._clock.time_msec()
|
||||
return {
|
||||
"state": [p.get_pdu_json(time_now) for p in state.values()],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in auth_chain],
|
||||
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
|
||||
"auth_chain": [p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]],
|
||||
}
|
||||
|
||||
async def on_make_leave_request(
|
||||
@@ -614,11 +571,21 @@ class FederationServer(FederationBase):
|
||||
time_now = self._clock.time_msec()
|
||||
return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
|
||||
|
||||
async def on_send_leave_request(
|
||||
self, origin: str, content: JsonDict, room_id: str
|
||||
) -> dict:
|
||||
async def on_send_leave_request(self, origin: str, content: JsonDict) -> dict:
|
||||
logger.debug("on_send_leave_request: content: %s", content)
|
||||
await self._on_send_membership_event(origin, content, Membership.LEAVE, room_id)
|
||||
|
||||
assert_params_in_dict(content, ["room_id"])
|
||||
room_version = await self.store.get_room_version(content["room_id"])
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
await self.handler.on_send_leave_request(origin, pdu)
|
||||
return {}
|
||||
|
||||
async def on_make_knock_request(
|
||||
@@ -684,9 +651,29 @@ class FederationServer(FederationBase):
|
||||
Returns:
|
||||
The stripped room state.
|
||||
"""
|
||||
event_context = await self._on_send_membership_event(
|
||||
origin, content, Membership.KNOCK, room_id
|
||||
)
|
||||
logger.debug("on_send_knock_request: content: %s", content)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
# Check that this room supports knocking as defined by its room version
|
||||
if not room_version.msc2403_knocking:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This room version does not support knocking",
|
||||
errcode=Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
pdu = event_from_pdu_json(content, room_version)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, pdu.room_id)
|
||||
|
||||
logger.debug("on_send_knock_request: pdu sigs: %s", pdu.signatures)
|
||||
|
||||
pdu = await self._check_sigs_and_hash(room_version, pdu)
|
||||
|
||||
# Handle the event, and retrieve the EventContext
|
||||
event_context = await self.handler.on_send_knock_request(origin, pdu)
|
||||
|
||||
# Retrieve stripped state events from the room and send them back to the remote
|
||||
# server. This will allow the remote server's clients to display information
|
||||
@@ -698,63 +685,6 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
return {"knock_state_events": stripped_room_state}
|
||||
|
||||
async def _on_send_membership_event(
|
||||
self, origin: str, content: JsonDict, membership_type: str, room_id: str
|
||||
) -> EventContext:
|
||||
"""Handle an on_send_{join,leave,knock} request
|
||||
|
||||
Does some preliminary validation before passing the request on to the
|
||||
federation handler.
|
||||
|
||||
Args:
|
||||
origin: The (authenticated) requesting server
|
||||
content: The body of the send_* request - a complete membership event
|
||||
membership_type: The expected membership type (join or leave, depending
|
||||
on the endpoint)
|
||||
room_id: The room_id from the request, to be validated against the room_id
|
||||
in the event
|
||||
|
||||
Returns:
|
||||
The context of the event after inserting it into the room graph.
|
||||
|
||||
Raises:
|
||||
SynapseError if there is a problem with the request, including things like
|
||||
the room_id not matching or the event not being authorized.
|
||||
"""
|
||||
assert_params_in_dict(content, ["room_id"])
|
||||
if content["room_id"] != room_id:
|
||||
raise SynapseError(
|
||||
400,
|
||||
"Room ID in body does not match that in request path",
|
||||
Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
|
||||
if membership_type == Membership.KNOCK and not room_version.msc2403_knocking:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"This room version does not support knocking",
|
||||
errcode=Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
event = event_from_pdu_json(content, room_version)
|
||||
|
||||
if event.type != EventTypes.Member or not event.is_state():
|
||||
raise SynapseError(400, "Not an m.room.member event", Codes.BAD_JSON)
|
||||
|
||||
if event.content.get("membership") != membership_type:
|
||||
raise SynapseError(400, "Not a %s event" % membership_type, Codes.BAD_JSON)
|
||||
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, event.room_id)
|
||||
|
||||
logger.debug("_on_send_membership_event: pdu sigs: %s", event.signatures)
|
||||
|
||||
event = await self._check_sigs_and_hash(room_version, event)
|
||||
|
||||
return await self.handler.on_send_membership_event(origin, event)
|
||||
|
||||
async def on_event_auth(
|
||||
self, origin: str, room_id: str, event_id: str
|
||||
) -> Tuple[int, Dict[str, Any]]:
|
||||
@@ -904,105 +834,7 @@ class FederationServer(FederationBase):
|
||||
except SynapseError as e:
|
||||
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
|
||||
|
||||
# Add the event to our staging area
|
||||
await self.store.insert_received_event_to_staging(origin, pdu)
|
||||
|
||||
# Try and acquire the processing lock for the room, if we get it start a
|
||||
# background process for handling the events in the room.
|
||||
lock = await self.store.try_acquire_lock(
|
||||
_INBOUND_EVENT_HANDLING_LOCK_NAME, pdu.room_id
|
||||
)
|
||||
if lock:
|
||||
self._process_incoming_pdus_in_room_inner(
|
||||
pdu.room_id, room_version, lock, origin, pdu
|
||||
)
|
||||
|
||||
@wrap_as_background_process("_process_incoming_pdus_in_room_inner")
|
||||
async def _process_incoming_pdus_in_room_inner(
|
||||
self,
|
||||
room_id: str,
|
||||
room_version: RoomVersion,
|
||||
lock: Lock,
|
||||
latest_origin: Optional[str] = None,
|
||||
latest_event: Optional[EventBase] = None,
|
||||
) -> None:
|
||||
"""Process events in the staging area for the given room.
|
||||
|
||||
The latest_origin and latest_event args are the latest origin and event
|
||||
received (or None to simply pull the next event from the database).
|
||||
"""
|
||||
|
||||
# The common path is for the event we just received be the only event in
|
||||
# the room, so instead of pulling the event out of the DB and parsing
|
||||
# the event we just pull out the next event ID and check if that matches.
|
||||
if latest_event is not None and latest_origin is not None:
|
||||
(
|
||||
next_origin,
|
||||
next_event_id,
|
||||
) = await self.store.get_next_staged_event_id_for_room(room_id)
|
||||
if next_origin != latest_origin or next_event_id != latest_event.event_id:
|
||||
latest_origin = None
|
||||
latest_event = None
|
||||
|
||||
if latest_origin is None or latest_event is None:
|
||||
next = await self.store.get_next_staged_event_for_room(
|
||||
room_id, room_version
|
||||
)
|
||||
if not next:
|
||||
await lock.release()
|
||||
return
|
||||
|
||||
origin, event = next
|
||||
else:
|
||||
origin = latest_origin
|
||||
event = latest_event
|
||||
|
||||
# We loop round until there are no more events in the room in the
|
||||
# staging area, or we fail to get the lock (which means another process
|
||||
# has started processing).
|
||||
while True:
|
||||
async with lock:
|
||||
try:
|
||||
await self.handler.on_receive_pdu(
|
||||
origin, event, sent_to_us_directly=True
|
||||
)
|
||||
except FederationError as e:
|
||||
# XXX: Ideally we'd inform the remote we failed to process
|
||||
# the event, but we can't return an error in the transaction
|
||||
# response (as we've already responded).
|
||||
logger.warning("Error handling PDU %s: %s", event.event_id, e)
|
||||
except Exception:
|
||||
f = failure.Failure()
|
||||
logger.error(
|
||||
"Failed to handle PDU %s",
|
||||
event.event_id,
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
|
||||
)
|
||||
|
||||
received_ts = await self.store.remove_received_event_from_staging(
|
||||
origin, event.event_id
|
||||
)
|
||||
if received_ts is not None:
|
||||
pdu_process_time.observe(
|
||||
(self._clock.time_msec() - received_ts) / 1000
|
||||
)
|
||||
|
||||
# We need to do this check outside the lock to avoid a race between
|
||||
# a new event being inserted by another instance and it attempting
|
||||
# to acquire the lock.
|
||||
next = await self.store.get_next_staged_event_for_room(
|
||||
room_id, room_version
|
||||
)
|
||||
if not next:
|
||||
break
|
||||
|
||||
origin, event = next
|
||||
|
||||
lock = await self.store.try_acquire_lock(
|
||||
_INBOUND_EVENT_HANDLING_LOCK_NAME, room_id
|
||||
)
|
||||
if not lock:
|
||||
return
|
||||
await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "<ReplicationLayer(%s)>" % self.server_name
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -62,16 +62,9 @@ class AdminHandler(BaseHandler):
|
||||
if ret:
|
||||
profile = await self.store.get_profileinfo(user.localpart)
|
||||
threepids = await self.store.user_get_threepids(user.to_string())
|
||||
external_ids = [
|
||||
({"auth_provider": auth_provider, "external_id": external_id})
|
||||
for auth_provider, external_id in await self.store.get_external_ids_by_user(
|
||||
user.to_string()
|
||||
)
|
||||
]
|
||||
ret["displayname"] = profile.display_name
|
||||
ret["avatar_url"] = profile.avatar_url
|
||||
ret["threepids"] = threepids
|
||||
ret["external_ids"] = external_ids
|
||||
return ret
|
||||
|
||||
async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> Any:
|
||||
|
||||
@@ -30,7 +30,6 @@ from typing import (
|
||||
Optional,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
import attr
|
||||
@@ -73,7 +72,6 @@ from synapse.util.stringutils import base62_encode
|
||||
from synapse.util.threepids import canonicalise_email
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.client.v1.login import LoginResponse
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -779,108 +777,6 @@ class AuthHandler(BaseHandler):
|
||||
"params": params,
|
||||
}
|
||||
|
||||
async def refresh_token(
|
||||
self,
|
||||
refresh_token: str,
|
||||
valid_until_ms: Optional[int],
|
||||
) -> Tuple[str, str]:
|
||||
"""
|
||||
Consumes a refresh token and generate both a new access token and a new refresh token from it.
|
||||
|
||||
The consumed refresh token is considered invalid after the first use of the new access token or the new refresh token.
|
||||
|
||||
Args:
|
||||
refresh_token: The token to consume.
|
||||
valid_until_ms: The expiration timestamp of the new access token.
|
||||
|
||||
Returns:
|
||||
A tuple containing the new access token and refresh token
|
||||
"""
|
||||
|
||||
# Verify the token signature first before looking up the token
|
||||
if not self._verify_refresh_token(refresh_token):
|
||||
raise SynapseError(401, "invalid refresh token", Codes.UNKNOWN_TOKEN)
|
||||
|
||||
existing_token = await self.store.lookup_refresh_token(refresh_token)
|
||||
if existing_token is None:
|
||||
raise SynapseError(401, "refresh token does not exist", Codes.UNKNOWN_TOKEN)
|
||||
|
||||
if (
|
||||
existing_token.has_next_access_token_been_used
|
||||
or existing_token.has_next_refresh_token_been_refreshed
|
||||
):
|
||||
raise SynapseError(
|
||||
403, "refresh token isn't valid anymore", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
(
|
||||
new_refresh_token,
|
||||
new_refresh_token_id,
|
||||
) = await self.get_refresh_token_for_user_id(
|
||||
user_id=existing_token.user_id, device_id=existing_token.device_id
|
||||
)
|
||||
access_token = await self.get_access_token_for_user_id(
|
||||
user_id=existing_token.user_id,
|
||||
device_id=existing_token.device_id,
|
||||
valid_until_ms=valid_until_ms,
|
||||
refresh_token_id=new_refresh_token_id,
|
||||
)
|
||||
await self.store.replace_refresh_token(
|
||||
existing_token.token_id, new_refresh_token_id
|
||||
)
|
||||
return access_token, new_refresh_token
|
||||
|
||||
def _verify_refresh_token(self, token: str) -> bool:
|
||||
"""
|
||||
Verifies the shape of a refresh token.
|
||||
|
||||
Args:
|
||||
token: The refresh token to verify
|
||||
|
||||
Returns:
|
||||
Whether the token has the right shape
|
||||
"""
|
||||
parts = token.split("_", maxsplit=4)
|
||||
if len(parts) != 4:
|
||||
return False
|
||||
|
||||
type, localpart, rand, crc = parts
|
||||
|
||||
# Refresh tokens are prefixed by "syr_", let's check that
|
||||
if type != "syr":
|
||||
return False
|
||||
|
||||
# Check the CRC
|
||||
base = f"{type}_{localpart}_{rand}"
|
||||
expected_crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
if crc != expected_crc:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def get_refresh_token_for_user_id(
|
||||
self,
|
||||
user_id: str,
|
||||
device_id: str,
|
||||
) -> Tuple[str, int]:
|
||||
"""
|
||||
Creates a new refresh token for the user with the given user ID.
|
||||
|
||||
Args:
|
||||
user_id: canonical user ID
|
||||
device_id: the device ID to associate with the token.
|
||||
|
||||
Returns:
|
||||
The newly created refresh token and its ID in the database
|
||||
"""
|
||||
refresh_token = self.generate_refresh_token(UserID.from_string(user_id))
|
||||
refresh_token_id = await self.store.add_refresh_token_to_user(
|
||||
user_id=user_id,
|
||||
token=refresh_token,
|
||||
device_id=device_id,
|
||||
)
|
||||
return refresh_token, refresh_token_id
|
||||
|
||||
async def get_access_token_for_user_id(
|
||||
self,
|
||||
user_id: str,
|
||||
@@ -888,7 +784,6 @@ class AuthHandler(BaseHandler):
|
||||
valid_until_ms: Optional[int],
|
||||
puppets_user_id: Optional[str] = None,
|
||||
is_appservice_ghost: bool = False,
|
||||
refresh_token_id: Optional[int] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Creates a new access token for the user with the given user ID.
|
||||
@@ -906,8 +801,6 @@ class AuthHandler(BaseHandler):
|
||||
valid_until_ms: when the token is valid until. None for
|
||||
no expiry.
|
||||
is_appservice_ghost: Whether the user is an application ghost user
|
||||
refresh_token_id: the refresh token ID that will be associated with
|
||||
this access token.
|
||||
Returns:
|
||||
The access token for the user's session.
|
||||
Raises:
|
||||
@@ -943,7 +836,6 @@ class AuthHandler(BaseHandler):
|
||||
device_id=device_id,
|
||||
valid_until_ms=valid_until_ms,
|
||||
puppets_user_id=puppets_user_id,
|
||||
refresh_token_id=refresh_token_id,
|
||||
)
|
||||
|
||||
# the device *should* have been registered before we got here; however,
|
||||
@@ -1036,7 +928,7 @@ class AuthHandler(BaseHandler):
|
||||
self,
|
||||
login_submission: Dict[str, Any],
|
||||
ratelimit: bool = False,
|
||||
) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]:
|
||||
) -> Tuple[str, Optional[Callable[[Dict[str, str]], Awaitable[None]]]]:
|
||||
"""Authenticates the user for the /login API
|
||||
|
||||
Also used by the user-interactive auth flow to validate auth types which don't
|
||||
@@ -1181,7 +1073,7 @@ class AuthHandler(BaseHandler):
|
||||
self,
|
||||
username: str,
|
||||
login_submission: Dict[str, Any],
|
||||
) -> Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]:
|
||||
) -> Tuple[str, Optional[Callable[[Dict[str, str]], Awaitable[None]]]]:
|
||||
"""Helper for validate_login
|
||||
|
||||
Handles login, once we've mapped 3pids onto userids
|
||||
@@ -1259,7 +1151,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
async def check_password_provider_3pid(
|
||||
self, medium: str, address: str, password: str
|
||||
) -> Tuple[Optional[str], Optional[Callable[["LoginResponse"], Awaitable[None]]]]:
|
||||
) -> Tuple[Optional[str], Optional[Callable[[Dict[str, str]], Awaitable[None]]]]:
|
||||
"""Check if a password provider is able to validate a thirdparty login
|
||||
|
||||
Args:
|
||||
@@ -1323,19 +1215,6 @@ class AuthHandler(BaseHandler):
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
def generate_refresh_token(self, for_user: UserID) -> str:
|
||||
"""Generates an opaque string, for use as a refresh token"""
|
||||
|
||||
# we use the following format for refresh tokens:
|
||||
# syr_<base64 local part>_<random string>_<base62 crc check>
|
||||
|
||||
b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8"))
|
||||
random_string = stringutils.random_string(20)
|
||||
base = f"syr_{b64local}_{random_string}"
|
||||
|
||||
crc = base62_encode(crc32(base.encode("ascii")), minwidth=6)
|
||||
return f"{base}_{crc}"
|
||||
|
||||
async def validate_short_term_login_token(
|
||||
self, login_token: str
|
||||
) -> LoginTokenAttributes:
|
||||
@@ -1684,7 +1563,7 @@ class AuthHandler(BaseHandler):
|
||||
)
|
||||
respond_with_html(request, 200, html)
|
||||
|
||||
async def _sso_login_callback(self, login_result: "LoginResponse") -> None:
|
||||
async def _sso_login_callback(self, login_result: JsonDict) -> None:
|
||||
"""
|
||||
A login callback which might add additional attributes to the login response.
|
||||
|
||||
@@ -1698,8 +1577,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
extra_attributes = self._extra_attributes.get(login_result["user_id"])
|
||||
if extra_attributes:
|
||||
login_result_dict = cast(Dict[str, Any], login_result)
|
||||
login_result_dict.update(extra_attributes.extra_attributes)
|
||||
login_result.update(extra_attributes.extra_attributes)
|
||||
|
||||
def _expire_sso_extra_attributes(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -11,9 +11,8 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from typing import TYPE_CHECKING, Collection, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Collection, Optional
|
||||
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import (
|
||||
EventTypes,
|
||||
JoinRules,
|
||||
@@ -21,11 +20,9 @@ from synapse.api.constants import (
|
||||
RestrictedJoinRuleTypes,
|
||||
)
|
||||
from synapse.api.errors import AuthError
|
||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.builder import EventBuilder
|
||||
from synapse.types import StateMap
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
@@ -37,63 +34,8 @@ class EventAuthHandler:
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._clock = hs.get_clock()
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
async def check_from_context(
|
||||
self, room_version: str, event, context, do_sig_check=True
|
||||
) -> None:
|
||||
auth_event_ids = event.auth_event_ids()
|
||||
auth_events_by_id = await self._store.get_events(auth_event_ids)
|
||||
auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()}
|
||||
|
||||
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
|
||||
event_auth.check(
|
||||
room_version_obj, event, auth_events=auth_events, do_sig_check=do_sig_check
|
||||
)
|
||||
|
||||
def compute_auth_events(
|
||||
self,
|
||||
event: Union[EventBase, EventBuilder],
|
||||
current_state_ids: StateMap[str],
|
||||
for_verification: bool = False,
|
||||
) -> List[str]:
|
||||
"""Given an event and current state return the list of event IDs used
|
||||
to auth an event.
|
||||
|
||||
If `for_verification` is False then only return auth events that
|
||||
should be added to the event's `auth_events`.
|
||||
|
||||
Returns:
|
||||
List of event IDs.
|
||||
"""
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
return []
|
||||
|
||||
# Currently we ignore the `for_verification` flag even though there are
|
||||
# some situations where we can drop particular auth events when adding
|
||||
# to the event's `auth_events` (e.g. joins pointing to previous joins
|
||||
# when room is publicly joinable). Dropping event IDs has the
|
||||
# advantage that the auth chain for the room grows slower, but we use
|
||||
# the auth chain in state resolution v2 to order events, which means
|
||||
# care must be taken if dropping events to ensure that it doesn't
|
||||
# introduce undesirable "state reset" behaviour.
|
||||
#
|
||||
# All of which sounds a bit tricky so we don't bother for now.
|
||||
|
||||
auth_ids = []
|
||||
for etype, state_key in event_auth.auth_types_for_event(event):
|
||||
auth_ev_id = current_state_ids.get((etype, state_key))
|
||||
if auth_ev_id:
|
||||
auth_ids.append(auth_ev_id)
|
||||
|
||||
return auth_ids
|
||||
|
||||
async def check_host_in_room(self, room_id: str, host: str) -> bool:
|
||||
with Measure(self._clock, "check_host_in_room"):
|
||||
return await self._store.is_host_joined(room_id, host)
|
||||
|
||||
async def check_restricted_join_rules(
|
||||
self,
|
||||
state_ids: StateMap[str],
|
||||
|
||||
@@ -250,9 +250,7 @@ class FederationHandler(BaseHandler):
|
||||
#
|
||||
# Note that if we were never in the room then we would have already
|
||||
# dropped the event, since we wouldn't know the room version.
|
||||
is_in_room = await self._event_auth_handler.check_host_in_room(
|
||||
room_id, self.server_name
|
||||
)
|
||||
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Ignoring PDU from %s as we're not in the room",
|
||||
@@ -1676,9 +1674,7 @@ class FederationHandler(BaseHandler):
|
||||
room_version = await self.store.get_room_version_id(room_id)
|
||||
|
||||
# now check that we are *still* in the room
|
||||
is_in_room = await self._event_auth_handler.check_host_in_room(
|
||||
room_id, self.server_name
|
||||
)
|
||||
is_in_room = await self.auth.check_host_in_room(room_id, self.server_name)
|
||||
if not is_in_room:
|
||||
logger.info(
|
||||
"Got /make_join request for room %s we are no longer in",
|
||||
@@ -1709,12 +1705,86 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_join_request`
|
||||
await self._event_auth_handler.check_from_context(
|
||||
await self.auth.check_from_context(
|
||||
room_version, event, context, do_sig_check=False
|
||||
)
|
||||
|
||||
return event
|
||||
|
||||
async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict:
|
||||
"""We have received a join event for a room. Fully process it and
|
||||
respond with the current state and auth chains.
|
||||
"""
|
||||
event = pdu
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request from %s: Got event: %s, signatures: %s",
|
||||
origin,
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
if get_domain_from_id(event.sender) != origin:
|
||||
logger.info(
|
||||
"Got /send_join request for user %r from different origin %s",
|
||||
event.sender,
|
||||
origin,
|
||||
)
|
||||
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
||||
|
||||
event.internal_metadata.outlier = False
|
||||
# Send this event on behalf of the origin server.
|
||||
#
|
||||
# The reasons we have the destination server rather than the origin
|
||||
# server send it are slightly mysterious: the origin server should have
|
||||
# all the necessary state once it gets the response to the send_join,
|
||||
# so it could send the event itself if it wanted to. It may be that
|
||||
# doing it this way reduces failure modes, or avoids certain attacks
|
||||
# where a new server selectively tells a subset of the federation that
|
||||
# it has joined.
|
||||
#
|
||||
# The fact is that, as of the current writing, Synapse doesn't send out
|
||||
# the join event over federation after joining, and changing it now
|
||||
# would introduce the danger of backwards-compatibility problems.
|
||||
event.internal_metadata.send_on_behalf_of = origin
|
||||
|
||||
# Calculate the event context.
|
||||
context = await self.state_handler.compute_event_context(event)
|
||||
|
||||
# Get the state before the new event.
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
# Check if the user is already in the room or invited to the room.
|
||||
user_id = event.state_key
|
||||
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
|
||||
prev_member_event = None
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
|
||||
# Check if the member should be allowed access via membership in a space.
|
||||
await self._event_auth_handler.check_restricted_join_rules(
|
||||
prev_state_ids,
|
||||
event.room_version,
|
||||
user_id,
|
||||
prev_member_event,
|
||||
)
|
||||
|
||||
# Persist the event.
|
||||
await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
logger.debug(
|
||||
"on_send_join_request: After _auth_and_persist_event: %s, sigs: %s",
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
state_ids = list(prev_state_ids.values())
|
||||
auth_chain = await self.store.get_auth_chain(event.room_id, state_ids)
|
||||
|
||||
state = await self.store.get_events(list(prev_state_ids.values()))
|
||||
|
||||
return {"state": list(state.values()), "auth_chain": auth_chain}
|
||||
|
||||
async def on_invite_request(
|
||||
self, origin: str, event: EventBase, room_version: RoomVersion
|
||||
) -> EventBase:
|
||||
@@ -1881,7 +1951,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_leave_request`
|
||||
await self._event_auth_handler.check_from_context(
|
||||
await self.auth.check_from_context(
|
||||
room_version, event, context, do_sig_check=False
|
||||
)
|
||||
except AuthError as e:
|
||||
@@ -1890,6 +1960,44 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
return event
|
||||
|
||||
async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None:
|
||||
"""We have received a leave event for a room. Fully process it."""
|
||||
event = pdu
|
||||
|
||||
logger.debug(
|
||||
"on_send_leave_request: Got event: %s, signatures: %s",
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
if get_domain_from_id(event.sender) != origin:
|
||||
logger.info(
|
||||
"Got /send_leave request for user %r from different origin %s",
|
||||
event.sender,
|
||||
origin,
|
||||
)
|
||||
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
||||
|
||||
event.internal_metadata.outlier = False
|
||||
|
||||
# Send this event on behalf of the other server.
|
||||
#
|
||||
# The remote server isn't a full participant in the room at this point, so
|
||||
# may not have an up-to-date list of the other homeservers participating in
|
||||
# the room, so we send it on their behalf.
|
||||
event.internal_metadata.send_on_behalf_of = origin
|
||||
|
||||
context = await self.state_handler.compute_event_context(event)
|
||||
await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
logger.debug(
|
||||
"on_send_leave_request: After _auth_and_persist_event: %s, sigs: %s",
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
@log_function
|
||||
async def on_make_knock_request(
|
||||
self, origin: str, room_id: str, user_id: str
|
||||
@@ -1943,7 +2051,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
# The remote hasn't signed it yet, obviously. We'll do the full checks
|
||||
# when we get the event back in `on_send_knock_request`
|
||||
await self._event_auth_handler.check_from_context(
|
||||
await self.auth.check_from_context(
|
||||
room_version, event, context, do_sig_check=False
|
||||
)
|
||||
except AuthError as e:
|
||||
@@ -1953,54 +2061,35 @@ class FederationHandler(BaseHandler):
|
||||
return event
|
||||
|
||||
@log_function
|
||||
async def on_send_membership_event(
|
||||
async def on_send_knock_request(
|
||||
self, origin: str, event: EventBase
|
||||
) -> EventContext:
|
||||
"""
|
||||
We have received a join/leave/knock event for a room via send_join/leave/knock.
|
||||
|
||||
Verify that event and send it into the room on the remote homeserver's behalf.
|
||||
|
||||
This is quite similar to on_receive_pdu, with the following principal
|
||||
differences:
|
||||
* only membership events are permitted (and only events with
|
||||
sender==state_key -- ie, no kicks or bans)
|
||||
* *We* send out the event on behalf of the remote server.
|
||||
* We enforce the membership restrictions of restricted rooms.
|
||||
* Rejected events result in an exception rather than being stored.
|
||||
|
||||
There are also other differences, however it is not clear if these are by
|
||||
design or omission. In particular, we do not attempt to backfill any missing
|
||||
prev_events.
|
||||
We have received a knock event for a room. Verify that event and send it into the room
|
||||
on the knocking homeserver's behalf.
|
||||
|
||||
Args:
|
||||
origin: The homeserver of the remote (joining/invited/knocking) user.
|
||||
event: The member event that has been signed by the remote homeserver.
|
||||
origin: The remote homeserver of the knocking user.
|
||||
event: The knocking member event that has been signed by the remote homeserver.
|
||||
|
||||
Returns:
|
||||
The context of the event after inserting it into the room graph.
|
||||
|
||||
Raises:
|
||||
SynapseError if the event is not accepted into the room
|
||||
"""
|
||||
logger.debug(
|
||||
"on_send_membership_event: Got event: %s, signatures: %s",
|
||||
"on_send_knock_request: Got event: %s, signatures: %s",
|
||||
event.event_id,
|
||||
event.signatures,
|
||||
)
|
||||
|
||||
if get_domain_from_id(event.sender) != origin:
|
||||
logger.info(
|
||||
"Got send_membership request for user %r from different origin %s",
|
||||
"Got /send_knock request for user %r from different origin %s",
|
||||
event.sender,
|
||||
origin,
|
||||
)
|
||||
raise SynapseError(403, "User not from origin", Codes.FORBIDDEN)
|
||||
|
||||
if event.sender != event.state_key:
|
||||
raise SynapseError(400, "state_key and sender must match", Codes.BAD_JSON)
|
||||
|
||||
assert not event.internal_metadata.outlier
|
||||
event.internal_metadata.outlier = False
|
||||
|
||||
# Send this event on behalf of the other server.
|
||||
#
|
||||
@@ -2010,57 +2099,19 @@ class FederationHandler(BaseHandler):
|
||||
event.internal_metadata.send_on_behalf_of = origin
|
||||
|
||||
context = await self.state_handler.compute_event_context(event)
|
||||
context = await self._check_event_auth(origin, event, context)
|
||||
if context.rejected:
|
||||
raise SynapseError(
|
||||
403, f"{event.membership} event was rejected", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
# for joins, we need to check the restrictions of restricted rooms
|
||||
if event.membership == Membership.JOIN:
|
||||
await self._check_join_restrictions(context, event)
|
||||
|
||||
# for knock events, we run the third-party event rules. It's not entirely clear
|
||||
# why we don't do this for other sorts of membership events.
|
||||
if event.membership == Membership.KNOCK:
|
||||
event_allowed = await self.third_party_event_rules.check_event_allowed(
|
||||
event, context
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.info("Sending of knock %s forbidden by third-party rules", event)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
# all looks good, we can persist the event.
|
||||
await self._run_push_actions_and_persist_event(event, context)
|
||||
return context
|
||||
|
||||
async def _check_join_restrictions(
|
||||
self, context: EventContext, event: EventBase
|
||||
) -> None:
|
||||
"""Check that restrictions in restricted join rules are matched
|
||||
|
||||
Called when we receive a join event via send_join.
|
||||
|
||||
Raises an auth error if the restrictions are not matched.
|
||||
"""
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
|
||||
# Check if the user is already in the room or invited to the room.
|
||||
user_id = event.state_key
|
||||
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
|
||||
prev_member_event = None
|
||||
if prev_member_event_id:
|
||||
prev_member_event = await self.store.get_event(prev_member_event_id)
|
||||
|
||||
# Check if the member should be allowed access via membership in a space.
|
||||
await self._event_auth_handler.check_restricted_join_rules(
|
||||
prev_state_ids,
|
||||
event.room_version,
|
||||
user_id,
|
||||
prev_member_event,
|
||||
event_allowed = await self.third_party_event_rules.check_event_allowed(
|
||||
event, context
|
||||
)
|
||||
if not event_allowed:
|
||||
logger.info("Sending of knock %s forbidden by third-party rules", event)
|
||||
raise SynapseError(
|
||||
403, "This event is not allowed in this context", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
await self._auth_and_persist_event(origin, event, context)
|
||||
|
||||
return context
|
||||
|
||||
async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]:
|
||||
"""Returns the state at the event. i.e. not including said event."""
|
||||
@@ -2115,7 +2166,7 @@ class FederationHandler(BaseHandler):
|
||||
async def on_backfill_request(
|
||||
self, origin: str, room_id: str, pdu_list: List[str], limit: int
|
||||
) -> List[EventBase]:
|
||||
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
@@ -2150,9 +2201,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
if event:
|
||||
in_room = await self._event_auth_handler.check_host_in_room(
|
||||
event.room_id, origin
|
||||
)
|
||||
in_room = await self.auth.check_host_in_room(event.room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
@@ -2205,18 +2254,6 @@ class FederationHandler(BaseHandler):
|
||||
backfilled=backfilled,
|
||||
)
|
||||
|
||||
await self._run_push_actions_and_persist_event(event, context, backfilled)
|
||||
|
||||
async def _run_push_actions_and_persist_event(
|
||||
self, event: EventBase, context: EventContext, backfilled: bool = False
|
||||
):
|
||||
"""Run the push actions for a received event, and persist it.
|
||||
|
||||
Args:
|
||||
event: The event itself.
|
||||
context: The event context.
|
||||
backfilled: True if the event was backfilled.
|
||||
"""
|
||||
try:
|
||||
if (
|
||||
not event.internal_metadata.is_outlier()
|
||||
@@ -2505,7 +2542,7 @@ class FederationHandler(BaseHandler):
|
||||
latest_events: List[str],
|
||||
limit: int,
|
||||
) -> List[EventBase]:
|
||||
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
|
||||
in_room = await self.auth.check_host_in_room(room_id, origin)
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
@@ -2530,9 +2567,9 @@ class FederationHandler(BaseHandler):
|
||||
origin: str,
|
||||
event: EventBase,
|
||||
context: EventContext,
|
||||
state: Optional[Iterable[EventBase]] = None,
|
||||
auth_events: Optional[MutableStateMap[EventBase]] = None,
|
||||
backfilled: bool = False,
|
||||
state: Optional[Iterable[EventBase]],
|
||||
auth_events: Optional[MutableStateMap[EventBase]],
|
||||
backfilled: bool,
|
||||
) -> EventContext:
|
||||
"""
|
||||
Checks whether an event should be rejected (for failing auth checks).
|
||||
@@ -2568,7 +2605,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
if not auth_events:
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self._event_auth_handler.compute_auth_events(
|
||||
auth_events_ids = self.auth.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events_x = await self.store.get_events(auth_events_ids)
|
||||
@@ -2997,7 +3034,7 @@ class FederationHandler(BaseHandler):
|
||||
"state_key": target_user_id,
|
||||
}
|
||||
|
||||
if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
|
||||
if await self.auth.check_host_in_room(room_id, self.hs.hostname):
|
||||
room_version = await self.store.get_room_version_id(room_id)
|
||||
builder = self.event_builder_factory.new(room_version, event_dict)
|
||||
|
||||
@@ -3017,9 +3054,7 @@ class FederationHandler(BaseHandler):
|
||||
event.internal_metadata.send_on_behalf_of = self.hs.hostname
|
||||
|
||||
try:
|
||||
await self._event_auth_handler.check_from_context(
|
||||
room_version, event, context
|
||||
)
|
||||
await self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warning("Denying new third party invite %r because %s", event, e)
|
||||
raise e
|
||||
@@ -3062,9 +3097,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
try:
|
||||
await self._event_auth_handler.check_from_context(
|
||||
room_version, event, context
|
||||
)
|
||||
await self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warning("Denying third party invite %r because %s", event, e)
|
||||
raise e
|
||||
@@ -3152,7 +3185,7 @@ class FederationHandler(BaseHandler):
|
||||
last_exception = None # type: Optional[Exception]
|
||||
|
||||
# for each public key in the 3pid invite event
|
||||
for public_key_object in event_auth.get_public_keys(invite_event):
|
||||
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
|
||||
try:
|
||||
# for each sig on the third_party_invite block of the actual invite
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
|
||||
@@ -385,7 +385,6 @@ class EventCreationHandler:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state = hs.get_state_handler()
|
||||
@@ -510,8 +509,6 @@ class EventCreationHandler:
|
||||
Should normally be left as None, which will cause them to be calculated
|
||||
based on the room state at the prev_events.
|
||||
|
||||
If non-None, prev_event_ids must also be provided.
|
||||
|
||||
require_consent: Whether to check if the requester has
|
||||
consented to the privacy policy.
|
||||
|
||||
@@ -584,9 +581,6 @@ class EventCreationHandler:
|
||||
# Strip down the auth_event_ids to only what we need to auth the event.
|
||||
# For example, we don't need extra m.room.member that don't match event.sender
|
||||
if auth_event_ids is not None:
|
||||
# If auth events are provided, prev events must be also.
|
||||
assert prev_event_ids is not None
|
||||
|
||||
temp_event = await builder.build(
|
||||
prev_event_ids=prev_event_ids,
|
||||
auth_event_ids=auth_event_ids,
|
||||
@@ -598,7 +592,7 @@ class EventCreationHandler:
|
||||
(e.type, e.state_key): e.event_id for e in auth_events
|
||||
}
|
||||
# Actually strip down and use the necessary auth events
|
||||
auth_event_ids = self._event_auth_handler.compute_auth_events(
|
||||
auth_event_ids = self.auth.compute_auth_events(
|
||||
event=temp_event,
|
||||
current_state_ids=auth_event_state_map,
|
||||
for_verification=False,
|
||||
@@ -790,8 +784,6 @@ class EventCreationHandler:
|
||||
The event ids to use as the auth_events for the new event.
|
||||
Should normally be left as None, which will cause them to be calculated
|
||||
based on the room state at the prev_events.
|
||||
|
||||
If non-None, prev_event_ids must also be provided.
|
||||
ratelimit: Whether to rate limit this send.
|
||||
txn_id: The transaction ID.
|
||||
ignore_shadow_ban: True if shadow-banned users should be allowed to
|
||||
@@ -1057,9 +1049,7 @@ class EventCreationHandler:
|
||||
assert event.content["membership"] == Membership.LEAVE
|
||||
else:
|
||||
try:
|
||||
await self._event_auth_handler.check_from_context(
|
||||
room_version, event, context
|
||||
)
|
||||
await self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as err:
|
||||
logger.warning("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
@@ -1384,7 +1374,7 @@ class EventCreationHandler:
|
||||
raise AuthError(403, "Redacting server ACL events is not permitted")
|
||||
|
||||
prev_state_ids = await context.get_prev_state_ids()
|
||||
auth_events_ids = self._event_auth_handler.compute_auth_events(
|
||||
auth_events_ids = self.auth.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=True
|
||||
)
|
||||
auth_events_map = await self.store.get_events(auth_events_ids)
|
||||
|
||||
@@ -15,10 +15,9 @@
|
||||
"""Contains functions for registering clients."""
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
|
||||
|
||||
from prometheus_client import Counter
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from synapse import types
|
||||
from synapse.api.constants import MAX_USERID_LENGTH, EventTypes, JoinRules, LoginType
|
||||
@@ -55,16 +54,6 @@ login_counter = Counter(
|
||||
["guest", "auth_provider"],
|
||||
)
|
||||
|
||||
LoginDict = TypedDict(
|
||||
"LoginDict",
|
||||
{
|
||||
"device_id": str,
|
||||
"access_token": str,
|
||||
"valid_until_ms": Optional[int],
|
||||
"refresh_token": Optional[str],
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class RegistrationHandler(BaseHandler):
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
@@ -96,7 +85,6 @@ class RegistrationHandler(BaseHandler):
|
||||
self.pusher_pool = hs.get_pusherpool()
|
||||
|
||||
self.session_lifetime = hs.config.session_lifetime
|
||||
self.access_token_lifetime = hs.config.access_token_lifetime
|
||||
|
||||
async def check_username(
|
||||
self,
|
||||
@@ -708,8 +696,7 @@ class RegistrationHandler(BaseHandler):
|
||||
is_guest: bool = False,
|
||||
is_appservice_ghost: bool = False,
|
||||
auth_provider_id: Optional[str] = None,
|
||||
should_issue_refresh_token: bool = False,
|
||||
) -> Tuple[str, str, Optional[int], Optional[str]]:
|
||||
) -> Tuple[str, str]:
|
||||
"""Register a device for a user and generate an access token.
|
||||
|
||||
The access token will be limited by the homeserver's session_lifetime config.
|
||||
@@ -721,9 +708,8 @@ class RegistrationHandler(BaseHandler):
|
||||
is_guest: Whether this is a guest account
|
||||
auth_provider_id: The SSO IdP the user used, if any (just used for the
|
||||
prometheus metrics).
|
||||
should_issue_refresh_token: Whether it should also issue a refresh token
|
||||
Returns:
|
||||
Tuple of device ID, access token, access token expiration time and refresh token
|
||||
Tuple of device ID and access token
|
||||
"""
|
||||
res = await self._register_device_client(
|
||||
user_id=user_id,
|
||||
@@ -731,7 +717,6 @@ class RegistrationHandler(BaseHandler):
|
||||
initial_display_name=initial_display_name,
|
||||
is_guest=is_guest,
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
|
||||
login_counter.labels(
|
||||
@@ -739,12 +724,7 @@ class RegistrationHandler(BaseHandler):
|
||||
auth_provider=(auth_provider_id or ""),
|
||||
).inc()
|
||||
|
||||
return (
|
||||
res["device_id"],
|
||||
res["access_token"],
|
||||
res["valid_until_ms"],
|
||||
res["refresh_token"],
|
||||
)
|
||||
return res["device_id"], res["access_token"]
|
||||
|
||||
async def register_device_inner(
|
||||
self,
|
||||
@@ -753,8 +733,7 @@ class RegistrationHandler(BaseHandler):
|
||||
initial_display_name: Optional[str],
|
||||
is_guest: bool = False,
|
||||
is_appservice_ghost: bool = False,
|
||||
should_issue_refresh_token: bool = False,
|
||||
) -> LoginDict:
|
||||
) -> Dict[str, str]:
|
||||
"""Helper for register_device
|
||||
|
||||
Does the bits that need doing on the main process. Not for use outside this
|
||||
@@ -769,9 +748,6 @@ class RegistrationHandler(BaseHandler):
|
||||
)
|
||||
valid_until_ms = self.clock.time_msec() + self.session_lifetime
|
||||
|
||||
refresh_token = None
|
||||
refresh_token_id = None
|
||||
|
||||
registered_device_id = await self.device_handler.check_device_registered(
|
||||
user_id, device_id, initial_display_name
|
||||
)
|
||||
@@ -779,30 +755,14 @@ class RegistrationHandler(BaseHandler):
|
||||
assert valid_until_ms is None
|
||||
access_token = self.macaroon_gen.generate_guest_access_token(user_id)
|
||||
else:
|
||||
if should_issue_refresh_token:
|
||||
(
|
||||
refresh_token,
|
||||
refresh_token_id,
|
||||
) = await self._auth_handler.get_refresh_token_for_user_id(
|
||||
user_id,
|
||||
device_id=registered_device_id,
|
||||
)
|
||||
valid_until_ms = self.clock.time_msec() + self.access_token_lifetime
|
||||
|
||||
access_token = await self._auth_handler.get_access_token_for_user_id(
|
||||
user_id,
|
||||
device_id=registered_device_id,
|
||||
valid_until_ms=valid_until_ms,
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
refresh_token_id=refresh_token_id,
|
||||
)
|
||||
|
||||
return {
|
||||
"device_id": registered_device_id,
|
||||
"access_token": access_token,
|
||||
"valid_until_ms": valid_until_ms,
|
||||
"refresh_token": refresh_token,
|
||||
}
|
||||
return {"device_id": registered_device_id, "access_token": access_token}
|
||||
|
||||
async def post_registration_actions(
|
||||
self, user_id: str, auth_result: dict, access_token: Optional[str]
|
||||
|
||||
@@ -83,7 +83,6 @@ class RoomCreationHandler(BaseHandler):
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self.config = hs.config
|
||||
|
||||
# Room state based off defined presets
|
||||
@@ -227,7 +226,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
},
|
||||
)
|
||||
old_room_version = await self.store.get_room_version_id(old_room_id)
|
||||
await self._event_auth_handler.check_from_context(
|
||||
await self.auth.check_from_context(
|
||||
old_room_version, tombstone_event, tombstone_context
|
||||
)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ from synapse.api.constants import (
|
||||
EventTypes,
|
||||
HistoryVisibility,
|
||||
Membership,
|
||||
RoomTypes,
|
||||
)
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.utils import format_event_for_client_v2
|
||||
@@ -319,8 +318,7 @@ class SpaceSummaryHandler:
|
||||
|
||||
Returns:
|
||||
A tuple of:
|
||||
The room information, if the room should be returned to the
|
||||
user. None, otherwise.
|
||||
An iterable of a single value of the room.
|
||||
|
||||
An iterable of the sorted children events. This may be limited
|
||||
to a maximum size or may include all children.
|
||||
@@ -330,11 +328,7 @@ class SpaceSummaryHandler:
|
||||
|
||||
room_entry = await self._build_room_entry(room_id)
|
||||
|
||||
# If the room is not a space, return just the room information.
|
||||
if room_entry.get("room_type") != RoomTypes.SPACE:
|
||||
return room_entry, ()
|
||||
|
||||
# Otherwise, look for child rooms/spaces.
|
||||
# look for child rooms/spaces.
|
||||
child_events = await self._get_child_events(room_id)
|
||||
|
||||
if suggested_only:
|
||||
@@ -354,7 +348,6 @@ class SpaceSummaryHandler:
|
||||
event_format=format_event_for_client_v2,
|
||||
)
|
||||
)
|
||||
|
||||
return room_entry, events_result
|
||||
|
||||
async def _summarize_remote_room(
|
||||
@@ -472,7 +465,7 @@ class SpaceSummaryHandler:
|
||||
# If this is a request over federation, check if the host is in the room or
|
||||
# is in one of the spaces specified via the join rules.
|
||||
elif origin:
|
||||
if await self._event_auth_handler.check_host_in_room(room_id, origin):
|
||||
if await self._auth.check_host_in_room(room_id, origin):
|
||||
return True
|
||||
|
||||
# Alternately, if the host has a user in any of the spaces specified
|
||||
@@ -485,9 +478,7 @@ class SpaceSummaryHandler:
|
||||
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
|
||||
)
|
||||
for space_id in allowed_rooms:
|
||||
if await self._event_auth_handler.check_host_in_room(
|
||||
space_id, origin
|
||||
):
|
||||
if await self._auth.check_host_in_room(space_id, origin):
|
||||
return True
|
||||
|
||||
# otherwise, check if the room is peekable
|
||||
|
||||
@@ -84,8 +84,8 @@ class MatrixFederationAgent:
|
||||
|
||||
self._agent = Agent.usingEndpointFactory(
|
||||
self._reactor,
|
||||
MatrixHostnameEndpointFactory(
|
||||
reactor, tls_client_options_factory, _srv_resolver
|
||||
ProxyHostnameEndpointFactory(
|
||||
reactor._reactor,
|
||||
),
|
||||
pool=self._pool,
|
||||
)
|
||||
@@ -193,6 +193,18 @@ class MatrixFederationAgent:
|
||||
return res
|
||||
|
||||
|
||||
@implementer(IAgentEndpointFactory)
|
||||
class ProxyHostnameEndpointFactory:
|
||||
def __init__(
|
||||
self,
|
||||
reactor: IReactorCore,
|
||||
):
|
||||
self._reactor = reactor
|
||||
|
||||
def endpointForURI(self, parsed_uri):
|
||||
return HostnameEndpoint(self._reactor, "127.0.0.1", 3000)
|
||||
|
||||
|
||||
@implementer(IAgentEndpointFactory)
|
||||
class MatrixHostnameEndpointFactory:
|
||||
"""Factory for MatrixHostnameEndpoint for parsing to an Agent."""
|
||||
|
||||
@@ -109,22 +109,12 @@ def parse_boolean_from_args(args, name, default=None, required=False):
|
||||
return default
|
||||
|
||||
|
||||
@overload
|
||||
def parse_bytes_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Optional[bytes] = None,
|
||||
) -> Optional[bytes]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def parse_bytes_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Literal[None] = None,
|
||||
*,
|
||||
required: Literal[True],
|
||||
required: Literal[True] = True,
|
||||
) -> bytes:
|
||||
...
|
||||
|
||||
@@ -207,12 +197,7 @@ def parse_string(
|
||||
"""
|
||||
args = request.args # type: Dict[bytes, List[bytes]] # type: ignore
|
||||
return parse_string_from_args(
|
||||
args,
|
||||
name,
|
||||
default,
|
||||
required=required,
|
||||
allowed_values=allowed_values,
|
||||
encoding=encoding,
|
||||
args, name, default, required, allowed_values, encoding
|
||||
)
|
||||
|
||||
|
||||
@@ -242,20 +227,7 @@ def parse_strings_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Optional[List[str]] = None,
|
||||
*,
|
||||
allowed_values: Optional[Iterable[str]] = None,
|
||||
encoding: str = "ascii",
|
||||
) -> Optional[List[str]]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def parse_strings_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Optional[List[str]] = None,
|
||||
*,
|
||||
required: Literal[True],
|
||||
required: Literal[True] = True,
|
||||
allowed_values: Optional[Iterable[str]] = None,
|
||||
encoding: str = "ascii",
|
||||
) -> List[str]:
|
||||
@@ -267,7 +239,6 @@ def parse_strings_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Optional[List[str]] = None,
|
||||
*,
|
||||
required: bool = False,
|
||||
allowed_values: Optional[Iterable[str]] = None,
|
||||
encoding: str = "ascii",
|
||||
@@ -328,20 +299,7 @@ def parse_string_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Optional[str] = None,
|
||||
*,
|
||||
allowed_values: Optional[Iterable[str]] = None,
|
||||
encoding: str = "ascii",
|
||||
) -> Optional[str]:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def parse_string_from_args(
|
||||
args: Dict[bytes, List[bytes]],
|
||||
name: str,
|
||||
default: Optional[str] = None,
|
||||
*,
|
||||
required: Literal[True],
|
||||
required: Literal[True] = True,
|
||||
allowed_values: Optional[Iterable[str]] = None,
|
||||
encoding: str = "ascii",
|
||||
) -> str:
|
||||
|
||||
@@ -168,7 +168,7 @@ class ModuleApi:
|
||||
"Using deprecated ModuleApi.register which creates a dummy user device."
|
||||
)
|
||||
user_id = yield self.register_user(localpart, displayname, emails or [])
|
||||
_, access_token, _, _ = yield self.register_device(user_id)
|
||||
_, access_token = yield self.register_device(user_id)
|
||||
return user_id, access_token
|
||||
|
||||
def register_user(
|
||||
|
||||
@@ -104,7 +104,7 @@ class BulkPushRuleEvaluator:
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self._event_auth_handler = hs.get_event_auth_handler()
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
# Used by `RulesForRoom` to ensure only one thing mutates the cache at a
|
||||
# time. Keyed off room_id.
|
||||
@@ -172,7 +172,7 @@ class BulkPushRuleEvaluator:
|
||||
# not having a power level event is an extreme edge case
|
||||
auth_events = {POWER_KEY: await self.store.get_event(pl_event_id)}
|
||||
else:
|
||||
auth_events_ids = self._event_auth_handler.compute_auth_events(
|
||||
auth_events_ids = self.auth.compute_auth_events(
|
||||
event, prev_state_ids, for_verification=False
|
||||
)
|
||||
auth_events_dict = await self.store.get_events(auth_events_ids)
|
||||
|
||||
@@ -36,29 +36,20 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
|
||||
@staticmethod
|
||||
async def _serialize_payload(
|
||||
user_id,
|
||||
device_id,
|
||||
initial_display_name,
|
||||
is_guest,
|
||||
is_appservice_ghost,
|
||||
should_issue_refresh_token,
|
||||
user_id, device_id, initial_display_name, is_guest, is_appservice_ghost
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
user_id (int)
|
||||
device_id (str|None): Device ID to use, if None a new one is
|
||||
generated.
|
||||
initial_display_name (str|None)
|
||||
is_guest (bool)
|
||||
is_appservice_ghost (bool)
|
||||
should_issue_refresh_token (bool)
|
||||
"""
|
||||
return {
|
||||
"device_id": device_id,
|
||||
"initial_display_name": initial_display_name,
|
||||
"is_guest": is_guest,
|
||||
"is_appservice_ghost": is_appservice_ghost,
|
||||
"should_issue_refresh_token": should_issue_refresh_token,
|
||||
}
|
||||
|
||||
async def _handle_request(self, request, user_id):
|
||||
@@ -68,7 +59,6 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
initial_display_name = content["initial_display_name"]
|
||||
is_guest = content["is_guest"]
|
||||
is_appservice_ghost = content["is_appservice_ghost"]
|
||||
should_issue_refresh_token = content["should_issue_refresh_token"]
|
||||
|
||||
res = await self.registration_handler.register_device_inner(
|
||||
user_id,
|
||||
@@ -76,7 +66,6 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint):
|
||||
initial_display_name,
|
||||
is_guest,
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
|
||||
return 200, res
|
||||
|
||||
@@ -14,9 +14,7 @@
|
||||
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional
|
||||
|
||||
from typing_extensions import TypedDict
|
||||
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional
|
||||
|
||||
from synapse.api.errors import Codes, LoginError, SynapseError
|
||||
from synapse.api.ratelimiting import Ratelimiter
|
||||
@@ -27,8 +25,6 @@ from synapse.http import get_request_uri
|
||||
from synapse.http.server import HttpServer, finish_request
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_boolean,
|
||||
parse_bytes_from_args,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
@@ -44,21 +40,6 @@ if TYPE_CHECKING:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
LoginResponse = TypedDict(
|
||||
"LoginResponse",
|
||||
{
|
||||
"user_id": str,
|
||||
"access_token": str,
|
||||
"home_server": str,
|
||||
"expires_in_ms": Optional[int],
|
||||
"refresh_token": Optional[str],
|
||||
"device_id": str,
|
||||
"well_known": Optional[Dict[str, Any]],
|
||||
},
|
||||
total=False,
|
||||
)
|
||||
|
||||
|
||||
class LoginRestServlet(RestServlet):
|
||||
PATTERNS = client_patterns("/login$", v1=True)
|
||||
CAS_TYPE = "m.login.cas"
|
||||
@@ -67,7 +48,6 @@ class LoginRestServlet(RestServlet):
|
||||
JWT_TYPE = "org.matrix.login.jwt"
|
||||
JWT_TYPE_DEPRECATED = "m.login.jwt"
|
||||
APPSERVICE_TYPE = "uk.half-shot.msc2778.login.application_service"
|
||||
REFRESH_TOKEN_PARAM = "org.matrix.msc2918.refresh_token"
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
@@ -85,12 +65,9 @@ class LoginRestServlet(RestServlet):
|
||||
self.cas_enabled = hs.config.cas_enabled
|
||||
self.oidc_enabled = hs.config.oidc_enabled
|
||||
self._msc2858_enabled = hs.config.experimental.msc2858_enabled
|
||||
self._msc2918_enabled = hs.config.access_token_lifetime is not None
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.auth_handler = self.hs.get_auth_handler()
|
||||
self.registration_handler = hs.get_registration_handler()
|
||||
self._sso_handler = hs.get_sso_handler()
|
||||
@@ -161,15 +138,6 @@ class LoginRestServlet(RestServlet):
|
||||
async def on_POST(self, request: SynapseRequest):
|
||||
login_submission = parse_json_object_from_request(request)
|
||||
|
||||
if self._msc2918_enabled:
|
||||
# Check if this login should also issue a refresh token, as per
|
||||
# MSC2918
|
||||
should_issue_refresh_token = parse_boolean(
|
||||
request, name=LoginRestServlet.REFRESH_TOKEN_PARAM, default=False
|
||||
)
|
||||
else:
|
||||
should_issue_refresh_token = False
|
||||
|
||||
try:
|
||||
if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE:
|
||||
appservice = self.auth.get_appservice_by_req(request)
|
||||
@@ -179,32 +147,19 @@ class LoginRestServlet(RestServlet):
|
||||
None, request.getClientIP()
|
||||
)
|
||||
|
||||
result = await self._do_appservice_login(
|
||||
login_submission,
|
||||
appservice,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
result = await self._do_appservice_login(login_submission, appservice)
|
||||
elif self.jwt_enabled and (
|
||||
login_submission["type"] == LoginRestServlet.JWT_TYPE
|
||||
or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED
|
||||
):
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_jwt_login(
|
||||
login_submission,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
result = await self._do_jwt_login(login_submission)
|
||||
elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE:
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_token_login(
|
||||
login_submission,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
result = await self._do_token_login(login_submission)
|
||||
else:
|
||||
await self._address_ratelimiter.ratelimit(None, request.getClientIP())
|
||||
result = await self._do_other_login(
|
||||
login_submission,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
result = await self._do_other_login(login_submission)
|
||||
except KeyError:
|
||||
raise SynapseError(400, "Missing JSON keys.")
|
||||
|
||||
@@ -214,10 +169,7 @@ class LoginRestServlet(RestServlet):
|
||||
return 200, result
|
||||
|
||||
async def _do_appservice_login(
|
||||
self,
|
||||
login_submission: JsonDict,
|
||||
appservice: ApplicationService,
|
||||
should_issue_refresh_token: bool = False,
|
||||
self, login_submission: JsonDict, appservice: ApplicationService
|
||||
):
|
||||
identifier = login_submission.get("identifier")
|
||||
logger.info("Got appservice login request with identifier: %r", identifier)
|
||||
@@ -246,21 +198,14 @@ class LoginRestServlet(RestServlet):
|
||||
raise LoginError(403, "Invalid access_token", errcode=Codes.FORBIDDEN)
|
||||
|
||||
return await self._complete_login(
|
||||
qualified_user_id,
|
||||
login_submission,
|
||||
ratelimit=appservice.is_rate_limited(),
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
qualified_user_id, login_submission, ratelimit=appservice.is_rate_limited()
|
||||
)
|
||||
|
||||
async def _do_other_login(
|
||||
self, login_submission: JsonDict, should_issue_refresh_token: bool = False
|
||||
) -> LoginResponse:
|
||||
async def _do_other_login(self, login_submission: JsonDict) -> Dict[str, str]:
|
||||
"""Handle non-token/saml/jwt logins
|
||||
|
||||
Args:
|
||||
login_submission:
|
||||
should_issue_refresh_token: True if this login should issue
|
||||
a refresh token alongside the access token.
|
||||
|
||||
Returns:
|
||||
HTTP response
|
||||
@@ -279,10 +224,7 @@ class LoginRestServlet(RestServlet):
|
||||
login_submission, ratelimit=True
|
||||
)
|
||||
result = await self._complete_login(
|
||||
canonical_user_id,
|
||||
login_submission,
|
||||
callback,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
canonical_user_id, login_submission, callback
|
||||
)
|
||||
return result
|
||||
|
||||
@@ -290,12 +232,11 @@ class LoginRestServlet(RestServlet):
|
||||
self,
|
||||
user_id: str,
|
||||
login_submission: JsonDict,
|
||||
callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None,
|
||||
callback: Optional[Callable[[Dict[str, str]], Awaitable[None]]] = None,
|
||||
create_non_existent_users: bool = False,
|
||||
ratelimit: bool = True,
|
||||
auth_provider_id: Optional[str] = None,
|
||||
should_issue_refresh_token: bool = False,
|
||||
) -> LoginResponse:
|
||||
) -> Dict[str, str]:
|
||||
"""Called when we've successfully authed the user and now need to
|
||||
actually login them in (e.g. create devices). This gets called on
|
||||
all successful logins.
|
||||
@@ -312,8 +253,6 @@ class LoginRestServlet(RestServlet):
|
||||
ratelimit: Whether to ratelimit the login request.
|
||||
auth_provider_id: The SSO IdP the user used, if any (just used for the
|
||||
prometheus metrics).
|
||||
should_issue_refresh_token: True if this login should issue
|
||||
a refresh token alongside the access token.
|
||||
|
||||
Returns:
|
||||
result: Dictionary of account information after successful login.
|
||||
@@ -335,48 +274,28 @@ class LoginRestServlet(RestServlet):
|
||||
|
||||
device_id = login_submission.get("device_id")
|
||||
initial_display_name = login_submission.get("initial_device_display_name")
|
||||
(
|
||||
device_id,
|
||||
access_token,
|
||||
valid_until_ms,
|
||||
refresh_token,
|
||||
) = await self.registration_handler.register_device(
|
||||
user_id,
|
||||
device_id,
|
||||
initial_display_name,
|
||||
auth_provider_id=auth_provider_id,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
device_id, access_token = await self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, auth_provider_id=auth_provider_id
|
||||
)
|
||||
|
||||
result = LoginResponse(
|
||||
user_id=user_id,
|
||||
access_token=access_token,
|
||||
home_server=self.hs.hostname,
|
||||
device_id=device_id,
|
||||
)
|
||||
|
||||
if valid_until_ms is not None:
|
||||
expires_in_ms = valid_until_ms - self.clock.time_msec()
|
||||
result["expires_in_ms"] = expires_in_ms
|
||||
|
||||
if refresh_token is not None:
|
||||
result["refresh_token"] = refresh_token
|
||||
result = {
|
||||
"user_id": user_id,
|
||||
"access_token": access_token,
|
||||
"home_server": self.hs.hostname,
|
||||
"device_id": device_id,
|
||||
}
|
||||
|
||||
if callback is not None:
|
||||
await callback(result)
|
||||
|
||||
return result
|
||||
|
||||
async def _do_token_login(
|
||||
self, login_submission: JsonDict, should_issue_refresh_token: bool = False
|
||||
) -> LoginResponse:
|
||||
async def _do_token_login(self, login_submission: JsonDict) -> Dict[str, str]:
|
||||
"""
|
||||
Handle the final stage of SSO login.
|
||||
|
||||
Args:
|
||||
login_submission: The JSON request body.
|
||||
should_issue_refresh_token: True if this login should issue
|
||||
a refresh token alongside the access token.
|
||||
login_submission: The JSON request body.
|
||||
|
||||
Returns:
|
||||
The body of the JSON response.
|
||||
@@ -390,12 +309,9 @@ class LoginRestServlet(RestServlet):
|
||||
login_submission,
|
||||
self.auth_handler._sso_login_callback,
|
||||
auth_provider_id=res.auth_provider_id,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
|
||||
async def _do_jwt_login(
|
||||
self, login_submission: JsonDict, should_issue_refresh_token: bool = False
|
||||
) -> LoginResponse:
|
||||
async def _do_jwt_login(self, login_submission: JsonDict) -> Dict[str, str]:
|
||||
token = login_submission.get("token", None)
|
||||
if token is None:
|
||||
raise LoginError(
|
||||
@@ -426,10 +342,7 @@ class LoginRestServlet(RestServlet):
|
||||
|
||||
user_id = UserID(user, self.hs.hostname).to_string()
|
||||
result = await self._complete_login(
|
||||
user_id,
|
||||
login_submission,
|
||||
create_non_existent_users=True,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
user_id, login_submission, create_non_existent_users=True
|
||||
)
|
||||
return result
|
||||
|
||||
@@ -458,42 +371,6 @@ def _get_auth_flow_dict_for_idp(
|
||||
return e
|
||||
|
||||
|
||||
class RefreshTokenServlet(RestServlet):
|
||||
PATTERNS = client_patterns(
|
||||
"/org.matrix.msc2918.refresh_token/refresh$", releases=(), unstable=True
|
||||
)
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._clock = hs.get_clock()
|
||||
self.access_token_lifetime = hs.config.access_token_lifetime
|
||||
|
||||
async def on_POST(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
):
|
||||
refresh_submission = parse_json_object_from_request(request)
|
||||
|
||||
assert_params_in_dict(refresh_submission, ["refresh_token"])
|
||||
token = refresh_submission["refresh_token"]
|
||||
if not isinstance(token, str):
|
||||
raise SynapseError(400, "Invalid param: refresh_token", Codes.INVALID_PARAM)
|
||||
|
||||
valid_until_ms = self._clock.time_msec() + self.access_token_lifetime
|
||||
access_token, refresh_token = await self._auth_handler.refresh_token(
|
||||
token, valid_until_ms
|
||||
)
|
||||
expires_in_ms = valid_until_ms - self._clock.time_msec()
|
||||
return (
|
||||
200,
|
||||
{
|
||||
"access_token": access_token,
|
||||
"refresh_token": refresh_token,
|
||||
"expires_in_ms": expires_in_ms,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class SsoRedirectServlet(RestServlet):
|
||||
PATTERNS = list(client_patterns("/login/(cas|sso)/redirect$", v1=True)) + [
|
||||
re.compile(
|
||||
@@ -600,8 +477,6 @@ class CasTicketServlet(RestServlet):
|
||||
|
||||
def register_servlets(hs, http_server):
|
||||
LoginRestServlet(hs).register(http_server)
|
||||
if hs.config.access_token_lifetime is not None:
|
||||
RefreshTokenServlet(hs).register(http_server)
|
||||
SsoRedirectServlet(hs).register(http_server)
|
||||
if hs.config.cas_enabled:
|
||||
CasTicketServlet(hs).register(http_server)
|
||||
|
||||
@@ -41,13 +41,11 @@ from synapse.http.server import finish_request, respond_with_html
|
||||
from synapse.http.servlet import (
|
||||
RestServlet,
|
||||
assert_params_in_dict,
|
||||
parse_boolean,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
)
|
||||
from synapse.metrics import threepid_send_requests
|
||||
from synapse.push.mailer import Mailer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.msisdn import phone_number_to_msisdn
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.stringutils import assert_valid_client_secret, random_string
|
||||
@@ -401,7 +399,6 @@ class RegisterRestServlet(RestServlet):
|
||||
self.password_policy_handler = hs.get_password_policy_handler()
|
||||
self.clock = hs.get_clock()
|
||||
self._registration_enabled = self.hs.config.enable_registration
|
||||
self._msc2918_enabled = hs.config.access_token_lifetime is not None
|
||||
|
||||
self._registration_flows = _calculate_registration_flows(
|
||||
hs.config, self.auth_handler
|
||||
@@ -427,15 +424,6 @@ class RegisterRestServlet(RestServlet):
|
||||
"Do not understand membership kind: %s" % (kind.decode("utf8"),)
|
||||
)
|
||||
|
||||
if self._msc2918_enabled:
|
||||
# Check if this registration should also issue a refresh token, as
|
||||
# per MSC2918
|
||||
should_issue_refresh_token = parse_boolean(
|
||||
request, name="org.matrix.msc2918.refresh_token", default=False
|
||||
)
|
||||
else:
|
||||
should_issue_refresh_token = False
|
||||
|
||||
# Pull out the provided username and do basic sanity checks early since
|
||||
# the auth layer will store these in sessions.
|
||||
desired_username = None
|
||||
@@ -474,10 +462,7 @@ class RegisterRestServlet(RestServlet):
|
||||
raise SynapseError(400, "Desired Username is missing or not a string")
|
||||
|
||||
result = await self._do_appservice_registration(
|
||||
desired_username,
|
||||
access_token,
|
||||
body,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
desired_username, access_token, body
|
||||
)
|
||||
|
||||
return 200, result
|
||||
@@ -680,9 +665,7 @@ class RegisterRestServlet(RestServlet):
|
||||
registered = True
|
||||
|
||||
return_dict = await self._create_registration_details(
|
||||
registered_user_id,
|
||||
params,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
registered_user_id, params
|
||||
)
|
||||
|
||||
if registered:
|
||||
@@ -694,9 +677,7 @@ class RegisterRestServlet(RestServlet):
|
||||
|
||||
return 200, return_dict
|
||||
|
||||
async def _do_appservice_registration(
|
||||
self, username, as_token, body, should_issue_refresh_token: bool = False
|
||||
):
|
||||
async def _do_appservice_registration(self, username, as_token, body):
|
||||
user_id = await self.registration_handler.appservice_register(
|
||||
username, as_token
|
||||
)
|
||||
@@ -704,27 +685,19 @@ class RegisterRestServlet(RestServlet):
|
||||
user_id,
|
||||
body,
|
||||
is_appservice_ghost=True,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
|
||||
async def _create_registration_details(
|
||||
self,
|
||||
user_id: str,
|
||||
params: JsonDict,
|
||||
is_appservice_ghost: bool = False,
|
||||
should_issue_refresh_token: bool = False,
|
||||
self, user_id, params, is_appservice_ghost=False
|
||||
):
|
||||
"""Complete registration of newly-registered user
|
||||
|
||||
Allocates device_id if one was not given; also creates access_token.
|
||||
|
||||
Args:
|
||||
user_id: full canonical @user:id
|
||||
params: registration parameters, from which we pull device_id,
|
||||
initial_device_name and inhibit_login
|
||||
is_appservice_ghost
|
||||
should_issue_refresh_token: True if this registration should issue
|
||||
a refresh token alongside the access token.
|
||||
(str) user_id: full canonical @user:id
|
||||
(object) params: registration parameters, from which we pull
|
||||
device_id, initial_device_name and inhibit_login
|
||||
Returns:
|
||||
dictionary for response from /register
|
||||
"""
|
||||
@@ -732,29 +705,15 @@ class RegisterRestServlet(RestServlet):
|
||||
if not params.get("inhibit_login", False):
|
||||
device_id = params.get("device_id")
|
||||
initial_display_name = params.get("initial_device_display_name")
|
||||
(
|
||||
device_id,
|
||||
access_token,
|
||||
valid_until_ms,
|
||||
refresh_token,
|
||||
) = await self.registration_handler.register_device(
|
||||
device_id, access_token = await self.registration_handler.register_device(
|
||||
user_id,
|
||||
device_id,
|
||||
initial_display_name,
|
||||
is_guest=False,
|
||||
is_appservice_ghost=is_appservice_ghost,
|
||||
should_issue_refresh_token=should_issue_refresh_token,
|
||||
)
|
||||
|
||||
result.update({"access_token": access_token, "device_id": device_id})
|
||||
|
||||
if valid_until_ms is not None:
|
||||
expires_in_ms = valid_until_ms - self.clock.time_msec()
|
||||
result["expires_in_ms"] = expires_in_ms
|
||||
|
||||
if refresh_token is not None:
|
||||
result["refresh_token"] = refresh_token
|
||||
|
||||
return result
|
||||
|
||||
async def _do_guest_registration(self, params, address=None):
|
||||
@@ -768,30 +727,19 @@ class RegisterRestServlet(RestServlet):
|
||||
# we have nowhere to store it.
|
||||
device_id = synapse.api.auth.GUEST_DEVICE_ID
|
||||
initial_display_name = params.get("initial_device_display_name")
|
||||
(
|
||||
device_id,
|
||||
access_token,
|
||||
valid_until_ms,
|
||||
refresh_token,
|
||||
) = await self.registration_handler.register_device(
|
||||
device_id, access_token = await self.registration_handler.register_device(
|
||||
user_id, device_id, initial_display_name, is_guest=True
|
||||
)
|
||||
|
||||
result = {
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
"access_token": access_token,
|
||||
"home_server": self.hs.hostname,
|
||||
}
|
||||
|
||||
if valid_until_ms is not None:
|
||||
expires_in_ms = valid_until_ms - self.clock.time_msec()
|
||||
result["expires_in_ms"] = expires_in_ms
|
||||
|
||||
if refresh_token is not None:
|
||||
result["refresh_token"] = refresh_token
|
||||
|
||||
return 200, result
|
||||
return (
|
||||
200,
|
||||
{
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
"access_token": access_token,
|
||||
"home_server": self.hs.hostname,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def _calculate_registration_flows(
|
||||
|
||||
@@ -252,13 +252,10 @@ class SyncRestServlet(RestServlet):
|
||||
if sync_result.device_lists.left:
|
||||
response["device_lists"]["left"] = list(sync_result.device_lists.left)
|
||||
|
||||
# We always include this because https://github.com/vector-im/element-android/issues/3725
|
||||
# The spec isn't terribly clear on when this can be omitted and how a client would tell
|
||||
# the difference between "no keys present" and "nothing changed" in terms of whole field
|
||||
# absent / individual key type entry absent
|
||||
# Corresponding synapse issue: https://github.com/matrix-org/synapse/issues/10456
|
||||
response["device_one_time_keys_count"] = sync_result.device_one_time_keys_count
|
||||
|
||||
if sync_result.device_one_time_keys_count:
|
||||
response[
|
||||
"device_one_time_keys_count"
|
||||
] = sync_result.device_one_time_keys_count
|
||||
if sync_result.device_unused_fallback_key_types:
|
||||
response[
|
||||
"org.matrix.msc2732.device_unused_fallback_key_types"
|
||||
|
||||
@@ -111,7 +111,7 @@ def make_conn(
|
||||
db_config: DatabaseConnectionConfig,
|
||||
engine: BaseDatabaseEngine,
|
||||
default_txn_name: str,
|
||||
) -> "LoggingDatabaseConnection":
|
||||
) -> Connection:
|
||||
"""Make a new connection to the database and return it.
|
||||
|
||||
Returns:
|
||||
|
||||
@@ -46,7 +46,6 @@ from .events_forward_extremities import EventForwardExtremitiesStore
|
||||
from .filtering import FilteringStore
|
||||
from .group_server import GroupServerStore
|
||||
from .keys import KeyStore
|
||||
from .lock import LockStore
|
||||
from .media_repository import MediaRepositoryStore
|
||||
from .metrics import ServerMetricsStore
|
||||
from .monthly_active_users import MonthlyActiveUsersStore
|
||||
@@ -120,7 +119,6 @@ class DataStore(
|
||||
CacheInvalidationWorkerStore,
|
||||
ServerMetricsStore,
|
||||
EventForwardExtremitiesStore,
|
||||
LockStore,
|
||||
):
|
||||
def __init__(self, database: DatabasePool, db_conn, hs):
|
||||
self.hs = hs
|
||||
|
||||
@@ -14,36 +14,22 @@
|
||||
import itertools
|
||||
import logging
|
||||
from queue import Empty, PriorityQueue
|
||||
from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple
|
||||
|
||||
from prometheus_client import Gauge
|
||||
from typing import Collection, Dict, Iterable, List, Set, Tuple
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import StoreError
|
||||
from synapse.api.room_versions import RoomVersion
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events import EventBase
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Cursor
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
from synapse.util.iterutils import batch_iter
|
||||
|
||||
oldest_pdu_in_federation_staging = Gauge(
|
||||
"synapse_federation_server_oldest_inbound_pdu_in_staging",
|
||||
"The age in seconds since we received the oldest pdu in the federation staging area",
|
||||
)
|
||||
|
||||
number_pdus_in_federation_queue = Gauge(
|
||||
"synapse_federation_server_number_inbound_pdu_in_staging",
|
||||
"The total number of events in the inbound federation staging",
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -66,8 +52,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
500000, "_event_auth_cache", size_callback=len
|
||||
) # type: LruCache[str, List[Tuple[str, int]]]
|
||||
|
||||
self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
|
||||
|
||||
async def get_auth_chain(
|
||||
self, room_id: str, event_ids: Collection[str], include_given: bool = False
|
||||
) -> List[EventBase]:
|
||||
@@ -1060,187 +1044,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
||||
_delete_old_forward_extrem_cache_txn,
|
||||
)
|
||||
|
||||
async def insert_received_event_to_staging(
|
||||
self, origin: str, event: EventBase
|
||||
) -> None:
|
||||
"""Insert a newly received event from federation into the staging area."""
|
||||
|
||||
# We use an upsert here to handle the case where we see the same event
|
||||
# from the same server multiple times.
|
||||
await self.db_pool.simple_upsert(
|
||||
table="federation_inbound_events_staging",
|
||||
keyvalues={
|
||||
"origin": origin,
|
||||
"event_id": event.event_id,
|
||||
},
|
||||
values={},
|
||||
insertion_values={
|
||||
"room_id": event.room_id,
|
||||
"received_ts": self._clock.time_msec(),
|
||||
"event_json": json_encoder.encode(event.get_dict()),
|
||||
"internal_metadata": json_encoder.encode(
|
||||
event.internal_metadata.get_dict()
|
||||
),
|
||||
},
|
||||
desc="insert_received_event_to_staging",
|
||||
)
|
||||
|
||||
async def remove_received_event_from_staging(
|
||||
self,
|
||||
origin: str,
|
||||
event_id: str,
|
||||
) -> Optional[int]:
|
||||
"""Remove the given event from the staging area.
|
||||
|
||||
Returns:
|
||||
The received_ts of the row that was deleted, if any.
|
||||
"""
|
||||
if self.db_pool.engine.supports_returning:
|
||||
|
||||
def _remove_received_event_from_staging_txn(txn):
|
||||
sql = """
|
||||
DELETE FROM federation_inbound_events_staging
|
||||
WHERE origin = ? AND event_id = ?
|
||||
RETURNING received_ts
|
||||
"""
|
||||
|
||||
txn.execute(sql, (origin, event_id))
|
||||
return txn.fetchone()
|
||||
|
||||
row = await self.db_pool.runInteraction(
|
||||
"remove_received_event_from_staging",
|
||||
_remove_received_event_from_staging_txn,
|
||||
db_autocommit=True,
|
||||
)
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return row[0]
|
||||
|
||||
else:
|
||||
|
||||
def _remove_received_event_from_staging_txn(txn):
|
||||
received_ts = self.db_pool.simple_select_one_onecol_txn(
|
||||
txn,
|
||||
table="federation_inbound_events_staging",
|
||||
keyvalues={
|
||||
"origin": origin,
|
||||
"event_id": event_id,
|
||||
},
|
||||
retcol="received_ts",
|
||||
allow_none=True,
|
||||
)
|
||||
self.db_pool.simple_delete_txn(
|
||||
txn,
|
||||
table="federation_inbound_events_staging",
|
||||
keyvalues={
|
||||
"origin": origin,
|
||||
"event_id": event_id,
|
||||
},
|
||||
)
|
||||
|
||||
return received_ts
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"remove_received_event_from_staging",
|
||||
_remove_received_event_from_staging_txn,
|
||||
)
|
||||
|
||||
async def get_next_staged_event_id_for_room(
|
||||
self,
|
||||
room_id: str,
|
||||
) -> Optional[Tuple[str, str]]:
|
||||
"""Get the next event ID in the staging area for the given room."""
|
||||
|
||||
def _get_next_staged_event_id_for_room_txn(txn):
|
||||
sql = """
|
||||
SELECT origin, event_id
|
||||
FROM federation_inbound_events_staging
|
||||
WHERE room_id = ?
|
||||
ORDER BY received_ts ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
txn.execute(sql, (room_id,))
|
||||
|
||||
return txn.fetchone()
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_next_staged_event_id_for_room", _get_next_staged_event_id_for_room_txn
|
||||
)
|
||||
|
||||
async def get_next_staged_event_for_room(
|
||||
self,
|
||||
room_id: str,
|
||||
room_version: RoomVersion,
|
||||
) -> Optional[Tuple[str, EventBase]]:
|
||||
"""Get the next event in the staging area for the given room."""
|
||||
|
||||
def _get_next_staged_event_for_room_txn(txn):
|
||||
sql = """
|
||||
SELECT event_json, internal_metadata, origin
|
||||
FROM federation_inbound_events_staging
|
||||
WHERE room_id = ?
|
||||
ORDER BY received_ts ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
txn.execute(sql, (room_id,))
|
||||
|
||||
return txn.fetchone()
|
||||
|
||||
row = await self.db_pool.runInteraction(
|
||||
"get_next_staged_event_for_room", _get_next_staged_event_for_room_txn
|
||||
)
|
||||
|
||||
if not row:
|
||||
return None
|
||||
|
||||
event_d = db_to_json(row[0])
|
||||
internal_metadata_d = db_to_json(row[1])
|
||||
origin = row[2]
|
||||
|
||||
event = make_event_from_dict(
|
||||
event_dict=event_d,
|
||||
room_version=room_version,
|
||||
internal_metadata_dict=internal_metadata_d,
|
||||
)
|
||||
|
||||
return origin, event
|
||||
|
||||
async def get_all_rooms_with_staged_incoming_events(self) -> List[str]:
|
||||
"""Get the room IDs of all events currently staged."""
|
||||
return await self.db_pool.simple_select_onecol(
|
||||
table="federation_inbound_events_staging",
|
||||
keyvalues={},
|
||||
retcol="DISTINCT room_id",
|
||||
desc="get_all_rooms_with_staged_incoming_events",
|
||||
)
|
||||
|
||||
@wrap_as_background_process("_get_stats_for_federation_staging")
|
||||
async def _get_stats_for_federation_staging(self):
|
||||
"""Update the prometheus metrics for the inbound federation staging area."""
|
||||
|
||||
def _get_stats_for_federation_staging_txn(txn):
|
||||
txn.execute(
|
||||
"SELECT coalesce(count(*), 0) FROM federation_inbound_events_staging"
|
||||
)
|
||||
(count,) = txn.fetchone()
|
||||
|
||||
txn.execute(
|
||||
"SELECT coalesce(min(received_ts), 0) FROM federation_inbound_events_staging"
|
||||
)
|
||||
|
||||
(age,) = txn.fetchone()
|
||||
|
||||
return count, age
|
||||
|
||||
count, age = await self.db_pool.runInteraction(
|
||||
"_get_stats_for_federation_staging", _get_stats_for_federation_staging_txn
|
||||
)
|
||||
|
||||
number_pdus_in_federation_queue.set(count)
|
||||
oldest_pdu_in_federation_staging.set(age)
|
||||
|
||||
|
||||
class EventFederationStore(EventFederationWorkerStore):
|
||||
"""Responsible for storing and serving up the various graphs associated
|
||||
|
||||
@@ -29,34 +29,6 @@ from synapse.types import JsonDict
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_REPLACE_STREAM_ORDERING_SQL_COMMANDS = (
|
||||
# there should be no leftover rows without a stream_ordering2, but just in case...
|
||||
"UPDATE events SET stream_ordering2 = stream_ordering WHERE stream_ordering2 IS NULL",
|
||||
# now we can drop the rule and switch the columns
|
||||
"DROP RULE populate_stream_ordering2 ON events",
|
||||
"ALTER TABLE events DROP COLUMN stream_ordering",
|
||||
"ALTER TABLE events RENAME COLUMN stream_ordering2 TO stream_ordering",
|
||||
# ... and finally, rename the indexes into place for consistency with sqlite
|
||||
"ALTER INDEX event_contains_url_index2 RENAME TO event_contains_url_index",
|
||||
"ALTER INDEX events_order_room2 RENAME TO events_order_room",
|
||||
"ALTER INDEX events_room_stream2 RENAME TO events_room_stream",
|
||||
"ALTER INDEX events_ts2 RENAME TO events_ts",
|
||||
)
|
||||
|
||||
|
||||
class _BackgroundUpdates:
|
||||
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
|
||||
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
|
||||
DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
|
||||
POPULATE_STREAM_ORDERING2 = "populate_stream_ordering2"
|
||||
INDEX_STREAM_ORDERING2 = "index_stream_ordering2"
|
||||
INDEX_STREAM_ORDERING2_CONTAINS_URL = "index_stream_ordering2_contains_url"
|
||||
INDEX_STREAM_ORDERING2_ROOM_ORDER = "index_stream_ordering2_room_order"
|
||||
INDEX_STREAM_ORDERING2_ROOM_STREAM = "index_stream_ordering2_room_stream"
|
||||
INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts"
|
||||
REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column"
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class _CalculateChainCover:
|
||||
"""Return value for _calculate_chain_cover_txn."""
|
||||
@@ -76,15 +48,19 @@ class _CalculateChainCover:
|
||||
|
||||
|
||||
class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
|
||||
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
|
||||
EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url"
|
||||
DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities"
|
||||
|
||||
def __init__(self, database: DatabasePool, db_conn, hs):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME,
|
||||
self._background_reindex_origin_server_ts,
|
||||
self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts
|
||||
)
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
|
||||
self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME,
|
||||
self._background_reindex_fields_sender,
|
||||
)
|
||||
|
||||
@@ -109,8 +85,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES,
|
||||
self._cleanup_extremities_bg_update,
|
||||
self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
|
||||
)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
@@ -164,59 +139,6 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
self._purged_chain_cover_index,
|
||||
)
|
||||
|
||||
################################################################################
|
||||
|
||||
# bg updates for replacing stream_ordering with a BIGINT
|
||||
# (these only run on postgres.)
|
||||
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.POPULATE_STREAM_ORDERING2,
|
||||
self._background_populate_stream_ordering2,
|
||||
)
|
||||
# CREATE UNIQUE INDEX events_stream_ordering ON events(stream_ordering2);
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
_BackgroundUpdates.INDEX_STREAM_ORDERING2,
|
||||
index_name="events_stream_ordering",
|
||||
table="events",
|
||||
columns=["stream_ordering2"],
|
||||
unique=True,
|
||||
)
|
||||
# CREATE INDEX event_contains_url_index ON events(room_id, topological_ordering, stream_ordering) WHERE contains_url = true AND outlier = false;
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
_BackgroundUpdates.INDEX_STREAM_ORDERING2_CONTAINS_URL,
|
||||
index_name="event_contains_url_index2",
|
||||
table="events",
|
||||
columns=["room_id", "topological_ordering", "stream_ordering2"],
|
||||
where_clause="contains_url = true AND outlier = false",
|
||||
)
|
||||
# CREATE INDEX events_order_room ON events(room_id, topological_ordering, stream_ordering);
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
_BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_ORDER,
|
||||
index_name="events_order_room2",
|
||||
table="events",
|
||||
columns=["room_id", "topological_ordering", "stream_ordering2"],
|
||||
)
|
||||
# CREATE INDEX events_room_stream ON events(room_id, stream_ordering);
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
_BackgroundUpdates.INDEX_STREAM_ORDERING2_ROOM_STREAM,
|
||||
index_name="events_room_stream2",
|
||||
table="events",
|
||||
columns=["room_id", "stream_ordering2"],
|
||||
)
|
||||
# CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering);
|
||||
self.db_pool.updates.register_background_index_update(
|
||||
_BackgroundUpdates.INDEX_STREAM_ORDERING2_TS,
|
||||
index_name="events_ts2",
|
||||
table="events",
|
||||
columns=["origin_server_ts", "stream_ordering2"],
|
||||
)
|
||||
self.db_pool.updates.register_background_update_handler(
|
||||
_BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN,
|
||||
self._background_replace_stream_ordering_column,
|
||||
)
|
||||
|
||||
################################################################################
|
||||
|
||||
async def _background_reindex_fields_sender(self, progress, batch_size):
|
||||
target_min_stream_id = progress["target_min_stream_id_inclusive"]
|
||||
max_stream_id = progress["max_stream_id_exclusive"]
|
||||
@@ -268,18 +190,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
}
|
||||
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, _BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
|
||||
txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress
|
||||
)
|
||||
|
||||
return len(rows)
|
||||
|
||||
result = await self.db_pool.runInteraction(
|
||||
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
|
||||
self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn
|
||||
)
|
||||
|
||||
if not result:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
|
||||
self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -342,18 +264,18 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
}
|
||||
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn, _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, progress
|
||||
txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress
|
||||
)
|
||||
|
||||
return len(rows_to_update)
|
||||
|
||||
result = await self.db_pool.runInteraction(
|
||||
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
|
||||
self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn
|
||||
)
|
||||
|
||||
if not result:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME
|
||||
self.EVENT_ORIGIN_SERVER_TS_NAME
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -532,7 +454,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
|
||||
if not num_handled:
|
||||
await self.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES
|
||||
self.DELETE_SOFT_FAILED_EXTREMITIES
|
||||
)
|
||||
|
||||
def _drop_table_txn(txn):
|
||||
@@ -1087,81 +1009,3 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
|
||||
await self.db_pool.updates._end_background_update("purged_chain_cover")
|
||||
|
||||
return result
|
||||
|
||||
async def _background_populate_stream_ordering2(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
"""Populate events.stream_ordering2, then replace stream_ordering
|
||||
|
||||
This is to deal with the fact that stream_ordering was initially created as a
|
||||
32-bit integer field.
|
||||
"""
|
||||
batch_size = max(batch_size, 1)
|
||||
|
||||
def process(txn: Cursor) -> int:
|
||||
last_stream = progress.get("last_stream", -(1 << 31))
|
||||
txn.execute(
|
||||
"""
|
||||
UPDATE events SET stream_ordering2=stream_ordering
|
||||
WHERE stream_ordering IN (
|
||||
SELECT stream_ordering FROM events WHERE stream_ordering > ?
|
||||
ORDER BY stream_ordering LIMIT ?
|
||||
)
|
||||
RETURNING stream_ordering;
|
||||
""",
|
||||
(last_stream, batch_size),
|
||||
)
|
||||
row_count = txn.rowcount
|
||||
if row_count == 0:
|
||||
return 0
|
||||
last_stream = max(row[0] for row in txn)
|
||||
logger.info("populated stream_ordering2 up to %i", last_stream)
|
||||
|
||||
self.db_pool.updates._background_update_progress_txn(
|
||||
txn,
|
||||
_BackgroundUpdates.POPULATE_STREAM_ORDERING2,
|
||||
{"last_stream": last_stream},
|
||||
)
|
||||
return row_count
|
||||
|
||||
result = await self.db_pool.runInteraction(
|
||||
"_background_populate_stream_ordering2", process
|
||||
)
|
||||
|
||||
if result != 0:
|
||||
return result
|
||||
|
||||
await self.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.POPULATE_STREAM_ORDERING2
|
||||
)
|
||||
return 0
|
||||
|
||||
async def _background_replace_stream_ordering_column(
|
||||
self, progress: JsonDict, batch_size: int
|
||||
) -> int:
|
||||
"""Drop the old 'stream_ordering' column and rename 'stream_ordering2' into its place."""
|
||||
|
||||
def process(txn: Cursor) -> None:
|
||||
for sql in _REPLACE_STREAM_ORDERING_SQL_COMMANDS:
|
||||
logger.info("completing stream_ordering migration: %s", sql)
|
||||
txn.execute(sql)
|
||||
|
||||
# ANALYZE the new column to build stats on it, to encourage PostgreSQL to use the
|
||||
# indexes on it.
|
||||
# We need to pass execute a dummy function to handle the txn's result otherwise
|
||||
# it tries to call fetchall() on it and fails because there's no result to fetch.
|
||||
await self.db_pool.execute(
|
||||
"background_analyze_new_stream_ordering_column",
|
||||
lambda txn: None,
|
||||
"ANALYZE events(stream_ordering2)",
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"_background_replace_stream_ordering_column", process
|
||||
)
|
||||
|
||||
await self.db_pool.updates._end_background_update(
|
||||
_BackgroundUpdates.REPLACE_STREAM_ORDERING_COLUMN
|
||||
)
|
||||
|
||||
return 0
|
||||
|
||||
@@ -1,345 +0,0 @@
|
||||
# Copyright 2021 Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Type
|
||||
|
||||
from twisted.internet.interfaces import IReactorCore
|
||||
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import DatabasePool, LoggingTransaction
|
||||
from synapse.storage.types import Connection
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# How often to renew an acquired lock by updating the `last_renewed_ts` time in
|
||||
# the lock table.
|
||||
_RENEWAL_INTERVAL_MS = 30 * 1000
|
||||
|
||||
# How long before an acquired lock times out.
|
||||
_LOCK_TIMEOUT_MS = 2 * 60 * 1000
|
||||
|
||||
|
||||
class LockStore(SQLBaseStore):
|
||||
"""Provides a best effort distributed lock between worker instances.
|
||||
|
||||
Locks are identified by a name and key. A lock is acquired by inserting into
|
||||
the `worker_locks` table if a) there is no existing row for the name/key or
|
||||
b) the existing row has a `last_renewed_ts` older than `_LOCK_TIMEOUT_MS`.
|
||||
|
||||
When a lock is taken out the instance inserts a random `token`, the instance
|
||||
that holds that token holds the lock until it drops (or times out).
|
||||
|
||||
The instance that holds the lock should regularly update the
|
||||
`last_renewed_ts` column with the current time.
|
||||
"""
|
||||
|
||||
def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer"):
|
||||
super().__init__(database, db_conn, hs)
|
||||
|
||||
self._reactor = hs.get_reactor()
|
||||
self._instance_name = hs.get_instance_id()
|
||||
|
||||
# A map from `(lock_name, lock_key)` to the token of any locks that we
|
||||
# think we currently hold.
|
||||
self._live_tokens: Dict[Tuple[str, str], str] = {}
|
||||
|
||||
# When we shut down we want to remove the locks. Technically this can
|
||||
# lead to a race, as we may drop the lock while we are still processing.
|
||||
# However, a) it should be a small window, b) the lock is best effort
|
||||
# anyway and c) we want to really avoid leaking locks when we restart.
|
||||
hs.get_reactor().addSystemEventTrigger(
|
||||
"before",
|
||||
"shutdown",
|
||||
self._on_shutdown,
|
||||
)
|
||||
|
||||
@wrap_as_background_process("LockStore._on_shutdown")
|
||||
async def _on_shutdown(self) -> None:
|
||||
"""Called when the server is shutting down"""
|
||||
logger.info("Dropping held locks due to shutdown")
|
||||
|
||||
for (lock_name, lock_key), token in self._live_tokens.items():
|
||||
await self._drop_lock(lock_name, lock_key, token)
|
||||
|
||||
logger.info("Dropped locks due to shutdown")
|
||||
|
||||
async def try_acquire_lock(self, lock_name: str, lock_key: str) -> Optional["Lock"]:
|
||||
"""Try to acquire a lock for the given name/key. Will return an async
|
||||
context manager if the lock is successfully acquired, which *must* be
|
||||
used (otherwise the lock will leak).
|
||||
"""
|
||||
|
||||
now = self._clock.time_msec()
|
||||
token = random_string(6)
|
||||
|
||||
if self.db_pool.engine.can_native_upsert:
|
||||
|
||||
def _try_acquire_lock_txn(txn: LoggingTransaction) -> bool:
|
||||
# We take out the lock if either a) there is no row for the lock
|
||||
# already or b) the existing row has timed out.
|
||||
sql = """
|
||||
INSERT INTO worker_locks (lock_name, lock_key, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO UPDATE
|
||||
SET
|
||||
token = EXCLUDED.token,
|
||||
instance_name = EXCLUDED.instance_name,
|
||||
last_renewed_ts = EXCLUDED.last_renewed_ts
|
||||
WHERE
|
||||
worker_locks.last_renewed_ts < ?
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
),
|
||||
)
|
||||
|
||||
# We only acquired the lock if we inserted or updated the table.
|
||||
return bool(txn.rowcount)
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock",
|
||||
_try_acquire_lock_txn,
|
||||
# We can autocommit here as we're executing a single query, this
|
||||
# will avoid serialization errors.
|
||||
db_autocommit=True,
|
||||
)
|
||||
if not did_lock:
|
||||
return None
|
||||
|
||||
else:
|
||||
# If we're on an old SQLite we emulate the above logic by first
|
||||
# clearing out any existing stale locks and then upserting.
|
||||
|
||||
def _try_acquire_lock_emulated_txn(txn: LoggingTransaction) -> bool:
|
||||
sql = """
|
||||
DELETE FROM worker_locks
|
||||
WHERE
|
||||
lock_name = ?
|
||||
AND lock_key = ?
|
||||
AND last_renewed_ts < ?
|
||||
"""
|
||||
txn.execute(
|
||||
sql,
|
||||
(lock_name, lock_key, now - _LOCK_TIMEOUT_MS),
|
||||
)
|
||||
|
||||
inserted = self.db_pool.simple_upsert_txn_emulated(
|
||||
txn,
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
},
|
||||
values={},
|
||||
insertion_values={
|
||||
"token": token,
|
||||
"last_renewed_ts": self._clock.time_msec(),
|
||||
"instance_name": self._instance_name,
|
||||
},
|
||||
)
|
||||
|
||||
return inserted
|
||||
|
||||
did_lock = await self.db_pool.runInteraction(
|
||||
"try_acquire_lock_emulated", _try_acquire_lock_emulated_txn
|
||||
)
|
||||
|
||||
if not did_lock:
|
||||
return None
|
||||
|
||||
self._live_tokens[(lock_name, lock_key)] = token
|
||||
|
||||
return Lock(
|
||||
self._reactor,
|
||||
self._clock,
|
||||
self,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
token=token,
|
||||
)
|
||||
|
||||
async def _is_lock_still_valid(
|
||||
self, lock_name: str, lock_key: str, token: str
|
||||
) -> bool:
|
||||
"""Checks whether this instance still holds the lock."""
|
||||
last_renewed_ts = await self.db_pool.simple_select_one_onecol(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
retcol="last_renewed_ts",
|
||||
allow_none=True,
|
||||
desc="is_lock_still_valid",
|
||||
)
|
||||
return (
|
||||
last_renewed_ts is not None
|
||||
and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
|
||||
)
|
||||
|
||||
async def _renew_lock(self, lock_name: str, lock_key: str, token: str) -> None:
|
||||
"""Attempt to renew the lock if we still hold it."""
|
||||
await self.db_pool.simple_update(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
updatevalues={"last_renewed_ts": self._clock.time_msec()},
|
||||
desc="renew_lock",
|
||||
)
|
||||
|
||||
async def _drop_lock(self, lock_name: str, lock_key: str, token: str) -> None:
|
||||
"""Attempt to drop the lock, if we still hold it"""
|
||||
await self.db_pool.simple_delete(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
desc="drop_lock",
|
||||
)
|
||||
|
||||
self._live_tokens.pop((lock_name, lock_key), None)
|
||||
|
||||
|
||||
class Lock:
|
||||
"""An async context manager that manages an acquired lock, ensuring it is
|
||||
regularly renewed and dropping it when the context manager exits.
|
||||
|
||||
The lock object has an `is_still_valid` method which can be used to
|
||||
double-check the lock is still valid, if e.g. processing work in a loop.
|
||||
|
||||
For example:
|
||||
|
||||
lock = await self.store.try_acquire_lock(...)
|
||||
if not lock:
|
||||
return
|
||||
|
||||
async with lock:
|
||||
for item in work:
|
||||
await process(item)
|
||||
|
||||
if not await lock.is_still_valid():
|
||||
break
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
reactor: IReactorCore,
|
||||
clock: Clock,
|
||||
store: LockStore,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
token: str,
|
||||
) -> None:
|
||||
self._reactor = reactor
|
||||
self._clock = clock
|
||||
self._store = store
|
||||
self._lock_name = lock_name
|
||||
self._lock_key = lock_key
|
||||
|
||||
self._token = token
|
||||
|
||||
self._looping_call = clock.looping_call(
|
||||
self._renew, _RENEWAL_INTERVAL_MS, store, lock_name, lock_key, token
|
||||
)
|
||||
|
||||
self._dropped = False
|
||||
|
||||
@staticmethod
|
||||
@wrap_as_background_process("Lock._renew")
|
||||
async def _renew(
|
||||
store: LockStore,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
token: str,
|
||||
) -> None:
|
||||
"""Renew the lock.
|
||||
|
||||
Note: this is a static method, rather than using self.*, so that we
|
||||
don't end up with a reference to `self` in the reactor, which would stop
|
||||
this from being cleaned up if we dropped the context manager.
|
||||
"""
|
||||
await store._renew_lock(lock_name, lock_key, token)
|
||||
|
||||
async def is_still_valid(self) -> bool:
|
||||
"""Check if the lock is still held by us"""
|
||||
return await self._store._is_lock_still_valid(
|
||||
self._lock_name, self._lock_key, self._token
|
||||
)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
if self._dropped:
|
||||
raise Exception("Cannot reuse a Lock object")
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
_exctype: Optional[Type[BaseException]],
|
||||
_excinst: Optional[BaseException],
|
||||
_exctb: Optional[TracebackType],
|
||||
) -> bool:
|
||||
await self.release()
|
||||
|
||||
return False
|
||||
|
||||
async def release(self) -> None:
|
||||
"""Release the lock.
|
||||
|
||||
This is automatically called when using the lock as a context manager.
|
||||
"""
|
||||
|
||||
if self._dropped:
|
||||
return
|
||||
|
||||
if self._looping_call.running:
|
||||
self._looping_call.stop()
|
||||
|
||||
await self._store._drop_lock(self._lock_name, self._lock_key, self._token)
|
||||
self._dropped = True
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._dropped:
|
||||
# We should not be dropped without the lock being released (unless
|
||||
# we're shutting down), but if we are then let's at least stop
|
||||
# renewing the lock.
|
||||
if self._looping_call.running:
|
||||
self._looping_call.stop()
|
||||
|
||||
if self._reactor.running:
|
||||
logger.error(
|
||||
"Lock for (%s, %s) dropped without being released",
|
||||
self._lock_name,
|
||||
self._lock_key,
|
||||
)
|
||||
@@ -73,20 +73,20 @@ class ProfileWorkerStore(SQLBaseStore):
|
||||
async def set_profile_displayname(
|
||||
self, user_localpart: str, new_displayname: Optional[str]
|
||||
) -> None:
|
||||
await self.db_pool.simple_upsert(
|
||||
await self.db_pool.simple_update_one(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
values={"displayname": new_displayname},
|
||||
updatevalues={"displayname": new_displayname},
|
||||
desc="set_profile_displayname",
|
||||
)
|
||||
|
||||
async def set_profile_avatar_url(
|
||||
self, user_localpart: str, new_avatar_url: Optional[str]
|
||||
) -> None:
|
||||
await self.db_pool.simple_upsert(
|
||||
await self.db_pool.simple_update_one(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
values={"avatar_url": new_avatar_url},
|
||||
updatevalues={"avatar_url": new_avatar_url},
|
||||
desc="set_profile_avatar_url",
|
||||
)
|
||||
|
||||
|
||||
@@ -53,9 +53,6 @@ class TokenLookupResult:
|
||||
valid_until_ms: The timestamp the token expires, if any.
|
||||
token_owner: The "owner" of the token. This is either the same as the
|
||||
user, or a server admin who is logged in as the user.
|
||||
token_used: True if this token was used at least once in a request.
|
||||
This field can be out of date since `get_user_by_access_token` is
|
||||
cached.
|
||||
"""
|
||||
|
||||
user_id = attr.ib(type=str)
|
||||
@@ -65,7 +62,6 @@ class TokenLookupResult:
|
||||
device_id = attr.ib(type=Optional[str], default=None)
|
||||
valid_until_ms = attr.ib(type=Optional[int], default=None)
|
||||
token_owner = attr.ib(type=str)
|
||||
token_used = attr.ib(type=bool, default=False)
|
||||
|
||||
# Make the token owner default to the user ID, which is the common case.
|
||||
@token_owner.default
|
||||
@@ -73,29 +69,6 @@ class TokenLookupResult:
|
||||
return self.user_id
|
||||
|
||||
|
||||
@attr.s(frozen=True, slots=True)
|
||||
class RefreshTokenLookupResult:
|
||||
"""Result of looking up a refresh token."""
|
||||
|
||||
user_id = attr.ib(type=str)
|
||||
"""The user this token belongs to."""
|
||||
|
||||
device_id = attr.ib(type=str)
|
||||
"""The device associated with this refresh token."""
|
||||
|
||||
token_id = attr.ib(type=int)
|
||||
"""The ID of this refresh token."""
|
||||
|
||||
next_token_id = attr.ib(type=Optional[int])
|
||||
"""The ID of the refresh token which replaced this one."""
|
||||
|
||||
has_next_refresh_token_been_refreshed = attr.ib(type=bool)
|
||||
"""True if the next refresh token was used for another refresh."""
|
||||
|
||||
has_next_access_token_been_used = attr.ib(type=bool)
|
||||
"""True if the next access token was already used at least once."""
|
||||
|
||||
|
||||
class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
def __init__(
|
||||
self,
|
||||
@@ -468,8 +441,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
access_tokens.id as token_id,
|
||||
access_tokens.device_id,
|
||||
access_tokens.valid_until_ms,
|
||||
access_tokens.user_id as token_owner,
|
||||
access_tokens.used as token_used
|
||||
access_tokens.user_id as token_owner
|
||||
FROM users
|
||||
INNER JOIN access_tokens on users.name = COALESCE(puppets_user_id, access_tokens.user_id)
|
||||
WHERE token = ?
|
||||
@@ -477,15 +449,8 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
|
||||
txn.execute(sql, (token,))
|
||||
rows = self.db_pool.cursor_to_dict(txn)
|
||||
|
||||
if rows:
|
||||
row = rows[0]
|
||||
|
||||
# This field is nullable, ensure it comes out as a boolean
|
||||
if row["token_used"] is None:
|
||||
row["token_used"] = False
|
||||
|
||||
return TokenLookupResult(**row)
|
||||
return TokenLookupResult(**rows[0])
|
||||
|
||||
return None
|
||||
|
||||
@@ -1107,111 +1072,6 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
|
||||
desc="update_access_token_last_validated",
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def mark_access_token_as_used(self, token_id: int) -> None:
|
||||
"""
|
||||
Mark the access token as used, which invalidates the refresh token used
|
||||
to obtain it.
|
||||
|
||||
Because get_user_by_access_token is cached, this function might be
|
||||
called multiple times for the same token, effectively doing unnecessary
|
||||
SQL updates. Because updating the `used` field only goes one way (from
|
||||
False to True) it is safe to cache this function as well to avoid this
|
||||
issue.
|
||||
|
||||
Args:
|
||||
token_id: The ID of the access token to update.
|
||||
Raises:
|
||||
StoreError if there was a problem updating this.
|
||||
"""
|
||||
await self.db_pool.simple_update_one(
|
||||
"access_tokens",
|
||||
{"id": token_id},
|
||||
{"used": True},
|
||||
desc="mark_access_token_as_used",
|
||||
)
|
||||
|
||||
async def lookup_refresh_token(
|
||||
self, token: str
|
||||
) -> Optional[RefreshTokenLookupResult]:
|
||||
"""Lookup a refresh token with hints about its validity."""
|
||||
|
||||
def _lookup_refresh_token_txn(txn) -> Optional[RefreshTokenLookupResult]:
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT
|
||||
rt.id token_id,
|
||||
rt.user_id,
|
||||
rt.device_id,
|
||||
rt.next_token_id,
|
||||
(nrt.next_token_id IS NOT NULL) has_next_refresh_token_been_refreshed,
|
||||
at.used has_next_access_token_been_used
|
||||
FROM refresh_tokens rt
|
||||
LEFT JOIN refresh_tokens nrt ON rt.next_token_id = nrt.id
|
||||
LEFT JOIN access_tokens at ON at.refresh_token_id = nrt.id
|
||||
WHERE rt.token = ?
|
||||
""",
|
||||
(token,),
|
||||
)
|
||||
row = txn.fetchone()
|
||||
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
return RefreshTokenLookupResult(
|
||||
token_id=row[0],
|
||||
user_id=row[1],
|
||||
device_id=row[2],
|
||||
next_token_id=row[3],
|
||||
has_next_refresh_token_been_refreshed=row[4],
|
||||
# This column is nullable, ensure it's a boolean
|
||||
has_next_access_token_been_used=(row[5] or False),
|
||||
)
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"lookup_refresh_token", _lookup_refresh_token_txn
|
||||
)
|
||||
|
||||
async def replace_refresh_token(self, token_id: int, next_token_id: int) -> None:
|
||||
"""
|
||||
Set the successor of a refresh token, removing the existing successor
|
||||
if any.
|
||||
|
||||
Args:
|
||||
token_id: ID of the refresh token to update.
|
||||
next_token_id: ID of its successor.
|
||||
"""
|
||||
|
||||
def _replace_refresh_token_txn(txn) -> None:
|
||||
# First check if there was an existing refresh token
|
||||
old_next_token_id = self.db_pool.simple_select_one_onecol_txn(
|
||||
txn,
|
||||
"refresh_tokens",
|
||||
{"id": token_id},
|
||||
"next_token_id",
|
||||
allow_none=True,
|
||||
)
|
||||
|
||||
self.db_pool.simple_update_one_txn(
|
||||
txn,
|
||||
"refresh_tokens",
|
||||
{"id": token_id},
|
||||
{"next_token_id": next_token_id},
|
||||
)
|
||||
|
||||
# Delete the old "next" token if it exists. This should cascade and
|
||||
# delete the associated access_token
|
||||
if old_next_token_id is not None:
|
||||
self.db_pool.simple_delete_one_txn(
|
||||
txn,
|
||||
"refresh_tokens",
|
||||
{"id": old_next_token_id},
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"replace_refresh_token", _replace_refresh_token_txn
|
||||
)
|
||||
|
||||
|
||||
class RegistrationBackgroundUpdateStore(RegistrationWorkerStore):
|
||||
def __init__(
|
||||
@@ -1403,7 +1263,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors
|
||||
|
||||
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
|
||||
self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
|
||||
|
||||
async def add_access_token_to_user(
|
||||
self,
|
||||
@@ -1412,18 +1271,14 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
device_id: Optional[str],
|
||||
valid_until_ms: Optional[int],
|
||||
puppets_user_id: Optional[str] = None,
|
||||
refresh_token_id: Optional[int] = None,
|
||||
) -> int:
|
||||
"""Adds an access token for the given user.
|
||||
|
||||
Args:
|
||||
user_id: The user ID.
|
||||
token: The new access token to add.
|
||||
device_id: ID of the device to associate with the access token.
|
||||
device_id: ID of the device to associate with the access token
|
||||
valid_until_ms: when the token is valid until. None for no expiry.
|
||||
puppets_user_id
|
||||
refresh_token_id: ID of the refresh token generated alongside this
|
||||
access token.
|
||||
Raises:
|
||||
StoreError if there was a problem adding this.
|
||||
Returns:
|
||||
@@ -1442,47 +1297,12 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
"valid_until_ms": valid_until_ms,
|
||||
"puppets_user_id": puppets_user_id,
|
||||
"last_validated": now,
|
||||
"refresh_token_id": refresh_token_id,
|
||||
"used": False,
|
||||
},
|
||||
desc="add_access_token_to_user",
|
||||
)
|
||||
|
||||
return next_id
|
||||
|
||||
async def add_refresh_token_to_user(
|
||||
self,
|
||||
user_id: str,
|
||||
token: str,
|
||||
device_id: Optional[str],
|
||||
) -> int:
|
||||
"""Adds a refresh token for the given user.
|
||||
|
||||
Args:
|
||||
user_id: The user ID.
|
||||
token: The new access token to add.
|
||||
device_id: ID of the device to associate with the refresh token.
|
||||
Raises:
|
||||
StoreError if there was a problem adding this.
|
||||
Returns:
|
||||
The token ID
|
||||
"""
|
||||
next_id = self._refresh_tokens_id_gen.get_next()
|
||||
|
||||
await self.db_pool.simple_insert(
|
||||
"refresh_tokens",
|
||||
{
|
||||
"id": next_id,
|
||||
"user_id": user_id,
|
||||
"device_id": device_id,
|
||||
"token": token,
|
||||
"next_token_id": None,
|
||||
},
|
||||
desc="add_refresh_token_to_user",
|
||||
)
|
||||
|
||||
return next_id
|
||||
|
||||
def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str:
|
||||
old_device_id = self.db_pool.simple_select_one_onecol_txn(
|
||||
txn, "access_tokens", {"token": token}, "device_id"
|
||||
@@ -1725,7 +1545,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
device_id: Optional[str] = None,
|
||||
) -> List[Tuple[str, int, Optional[str]]]:
|
||||
"""
|
||||
Invalidate access and refresh tokens belonging to a user
|
||||
Invalidate access tokens belonging to a user
|
||||
|
||||
Args:
|
||||
user_id: ID of user the tokens belong to
|
||||
@@ -1745,13 +1565,7 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
items = keyvalues.items()
|
||||
where_clause = " AND ".join(k + " = ?" for k, _ in items)
|
||||
values = [v for _, v in items] # type: List[Union[str, int]]
|
||||
# Conveniently, refresh_tokens and access_tokens both use the user_id and device_id fields. Only caveat
|
||||
# is the `except_token_id` param that is tricky to get right, so for now we're just using the same where
|
||||
# clause and values before we handle that. This seems to be only used in the "set password" handler.
|
||||
refresh_where_clause = where_clause
|
||||
refresh_values = values.copy()
|
||||
if except_token_id:
|
||||
# TODO: support that for refresh tokens
|
||||
where_clause += " AND id != ?"
|
||||
values.append(except_token_id)
|
||||
|
||||
@@ -1769,11 +1583,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
|
||||
txn.execute("DELETE FROM access_tokens WHERE %s" % where_clause, values)
|
||||
|
||||
txn.execute(
|
||||
"DELETE FROM refresh_tokens WHERE %s" % refresh_where_clause,
|
||||
refresh_values,
|
||||
)
|
||||
|
||||
return tokens_and_devices
|
||||
|
||||
return await self.db_pool.runInteraction("user_delete_access_tokens", f)
|
||||
@@ -1790,14 +1599,6 @@ class RegistrationStore(StatsStore, RegistrationBackgroundUpdateStore):
|
||||
|
||||
await self.db_pool.runInteraction("delete_access_token", f)
|
||||
|
||||
async def delete_refresh_token(self, refresh_token: str) -> None:
|
||||
def f(txn):
|
||||
self.db_pool.simple_delete_one_txn(
|
||||
txn, table="refresh_tokens", keyvalues={"token": refresh_token}
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction("delete_refresh_token", f)
|
||||
|
||||
async def add_user_pending_deactivation(self, user_id: str) -> None:
|
||||
"""
|
||||
Adds a user to the table of users who need to be parted from all the rooms they're
|
||||
|
||||
@@ -49,12 +49,6 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta):
|
||||
"""
|
||||
...
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def supports_returning(self) -> bool:
|
||||
"""Do we support the `RETURNING` clause in insert/update/delete?"""
|
||||
...
|
||||
|
||||
@abc.abstractmethod
|
||||
def check_database(
|
||||
self, db_conn: ConnectionType, allow_outdated_version: bool = False
|
||||
|
||||
@@ -133,11 +133,6 @@ class PostgresEngine(BaseDatabaseEngine):
|
||||
"""Do we support using `a = ANY(?)` and passing a list"""
|
||||
return True
|
||||
|
||||
@property
|
||||
def supports_returning(self) -> bool:
|
||||
"""Do we support the `RETURNING` clause in insert/update/delete?"""
|
||||
return True
|
||||
|
||||
def is_deadlock(self, error):
|
||||
if isinstance(error, self.module.DatabaseError):
|
||||
# https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user