1
0

Compare commits

..

2 Commits

Author SHA1 Message Date
Travis Ralston
177f2b838c changelog 2019-05-22 19:16:02 -06:00
Travis Ralston
f9d7d3aa89 Remove m.relates_to from events if the client set it to null
It appears as though Python only checks to see if the key exists in a dictionary, not necessarily for a useful value. This means that when clients submit (valid) requests with `m.relates_to: null` and Synapse later reads it, it gets a None reference error on access.

This is the easier route than guarding all the places where it could be None.
2019-05-22 19:14:10 -06:00
231 changed files with 2316 additions and 14338 deletions

View File

@@ -36,6 +36,8 @@ steps:
image: "python:3.6"
propagate-environment: true
- wait
- command:
- "python -m pip install tox"
- "tox -e check-sampleconfig"
@@ -44,8 +46,6 @@ steps:
- docker#v3.0.1:
image: "python:3.6"
- wait
- command:
- "python -m pip install tox"
- "tox -e py27,codecov"
@@ -56,12 +56,6 @@ steps:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@@ -73,12 +67,6 @@ steps:
- docker#v3.0.1:
image: "python:3.5"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@@ -90,12 +78,6 @@ steps:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@@ -107,12 +89,6 @@ steps:
- docker#v3.0.1:
image: "python:3.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
@@ -124,12 +100,6 @@ steps:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 2.7 / :postgres: 9.4"
env:
@@ -141,12 +111,6 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py27.pg94.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 2.7 / :postgres: 9.5"
env:
@@ -158,12 +122,6 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py27.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.5 / :postgres: 9.4"
env:
@@ -175,12 +133,6 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py35.pg94.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.5 / :postgres: 9.5"
env:
@@ -192,12 +144,6 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py35.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.7 / :postgres: 9.5"
env:
@@ -209,12 +155,6 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py37.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.7 / :postgres: 11"
env:
@@ -226,9 +166,3 @@ steps:
run: testenv
config:
- .buildkite/docker-compose.py37.pg11.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2

View File

@@ -1,8 +1,116 @@
version: 2
jobs:
dockerhubuploadrelease:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:latest-py2
- run: docker push matrixdotorg/synapse:latest-py3
sytestpy2:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2postgres:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2merged:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2postgresmerged:
docker:
- image: matrixdotorg/sytest-synapse:dinsic
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3postgres:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3merged:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3postgresmerged:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
@@ -18,7 +126,45 @@ workflows:
version: 2
build:
jobs:
- sytestpy2:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy2postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
ignore: /develop|master|release-.*/
- sytestpy3merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- dockerhubuploadrelease:
filters:
tags:
only: /v[0-9].[0-9]+.[0-9]+.*/
branches:
ignore: /.*/
- dockerhubuploadlatest:
filters:
branches:
only: master

View File

@@ -9,12 +9,11 @@ source $BASH_ENV
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
echo "Can't figure out what the PR number is! Assuming merge target is dinsic."
echo "Can't figure out what the PR number is! Assuming merge target is develop."
# It probably hasn't had a PR opened yet. Since all PRs for dinsic land on
# dinsic, we can probably assume it's based on it and will be merged into
# it.
GITBASE="dinsic"
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
# can probably assume it's based on it and will be merged into it.
GITBASE="develop"
else
# Get the reference, using the GitHub API
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`

View File

@@ -1,137 +1,8 @@
Synapse 1.0.0 (2019-06-11)
==========================
Bugfixes
--------
- Fix bug where attempting to send transactions with large number of EDUs can fail. ([\#5418](https://github.com/matrix-org/synapse/issues/5418))
Improved Documentation
----------------------
- Expand the federation guide to include relevant content from the MSC1711 FAQ ([\#5419](https://github.com/matrix-org/synapse/issues/5419))
Internal Changes
----------------
- Move password reset links to /_matrix/client/unstable namespace. ([\#5424](https://github.com/matrix-org/synapse/issues/5424))
Synapse 1.0.0rc3 (2019-06-10)
=============================
Security: Fix authentication bug introduced in 1.0.0rc1. Please upgrade to rc3 immediately
Synapse 1.0.0rc2 (2019-06-10)
=============================
Bugfixes
--------
- Remove redundant warning about key server response validation. ([\#5392](https://github.com/matrix-org/synapse/issues/5392))
- Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. ([\#5415](https://github.com/matrix-org/synapse/issues/5415))
- Fix excessive memory using with default `federation_verify_certificates: true` configuration. ([\#5417](https://github.com/matrix-org/synapse/issues/5417))
Synapse 1.0.0rc1 (2019-06-07)
=============================
Features
--------
- Synapse now more efficiently collates room statistics. ([\#4338](https://github.com/matrix-org/synapse/issues/4338), [\#5260](https://github.com/matrix-org/synapse/issues/5260), [\#5324](https://github.com/matrix-org/synapse/issues/5324))
- Add experimental support for relations (aka reactions and edits). ([\#5220](https://github.com/matrix-org/synapse/issues/5220))
- Ability to configure default room version. ([\#5223](https://github.com/matrix-org/synapse/issues/5223), [\#5249](https://github.com/matrix-org/synapse/issues/5249))
- Allow configuring a range for the account validity startup job. ([\#5276](https://github.com/matrix-org/synapse/issues/5276))
- CAS login will now hit the r0 API, not the deprecated v1 one. ([\#5286](https://github.com/matrix-org/synapse/issues/5286))
- Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). ([\#5359](https://github.com/matrix-org/synapse/issues/5359))
- Update /_matrix/client/versions to reference support for r0.5.0. ([\#5360](https://github.com/matrix-org/synapse/issues/5360))
- Add a script to generate new signing-key files. ([\#5361](https://github.com/matrix-org/synapse/issues/5361))
- Update upgrade and installation guides ahead of 1.0. ([\#5371](https://github.com/matrix-org/synapse/issues/5371))
- Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). ([\#5374](https://github.com/matrix-org/synapse/issues/5374))
- Add ability to perform password reset via email without trusting the identity server. ([\#5377](https://github.com/matrix-org/synapse/issues/5377))
- Set default room version to v4. ([\#5379](https://github.com/matrix-org/synapse/issues/5379))
Bugfixes
--------
- Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! ([\#5089](https://github.com/matrix-org/synapse/issues/5089))
- Prevent federation device list updates breaking when processing multiple updates at once. ([\#5156](https://github.com/matrix-org/synapse/issues/5156))
- Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. ([\#5200](https://github.com/matrix-org/synapse/issues/5200))
- Fix race when backfilling in rooms with worker mode. ([\#5221](https://github.com/matrix-org/synapse/issues/5221))
- Fix appservice timestamp massaging. ([\#5233](https://github.com/matrix-org/synapse/issues/5233))
- Ensure that server_keys fetched via a notary server are correctly signed. ([\#5251](https://github.com/matrix-org/synapse/issues/5251))
- Show the correct error when logging out and access token is missing. ([\#5256](https://github.com/matrix-org/synapse/issues/5256))
- Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms ([\#5257](https://github.com/matrix-org/synapse/issues/5257))
- Fix error when downloading thumbnail with missing width/height parameter. ([\#5258](https://github.com/matrix-org/synapse/issues/5258))
- Fix schema update for account validity. ([\#5268](https://github.com/matrix-org/synapse/issues/5268))
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
- Fix "db txn 'update_presence' from sentinel context" log messages. ([\#5275](https://github.com/matrix-org/synapse/issues/5275))
- Fix dropped logcontexts during high outbound traffic. ([\#5277](https://github.com/matrix-org/synapse/issues/5277))
- Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. ([\#5293](https://github.com/matrix-org/synapse/issues/5293))
- Fix performance problems with the rooms stats background update. ([\#5294](https://github.com/matrix-org/synapse/issues/5294))
- Fix noisy 'no key for server' logs. ([\#5300](https://github.com/matrix-org/synapse/issues/5300))
- Fix bug where a notary server would sometimes forget old keys. ([\#5307](https://github.com/matrix-org/synapse/issues/5307))
- Prevent users from setting huge displaynames and avatar URLs. ([\#5309](https://github.com/matrix-org/synapse/issues/5309))
- Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. ([\#5317](https://github.com/matrix-org/synapse/issues/5317))
- Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. ([\#5321](https://github.com/matrix-org/synapse/issues/5321))
- Fix various problems which made the signing-key notary server time out for some requests. ([\#5333](https://github.com/matrix-org/synapse/issues/5333))
- Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. ([\#5334](https://github.com/matrix-org/synapse/issues/5334))
- Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. ([\#5335](https://github.com/matrix-org/synapse/issues/5335), [\#5340](https://github.com/matrix-org/synapse/issues/5340))
- Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. ([\#5341](https://github.com/matrix-org/synapse/issues/5341))
- Fix failure when fetching batches of events during backfill, etc. ([\#5342](https://github.com/matrix-org/synapse/issues/5342))
- Add a new room version where the timestamps on events are checked against the validity periods on signing keys. ([\#5348](https://github.com/matrix-org/synapse/issues/5348), [\#5354](https://github.com/matrix-org/synapse/issues/5354))
- Fix room stats and presence background updates to correctly handle missing events. ([\#5352](https://github.com/matrix-org/synapse/issues/5352))
- Include left members in room summaries' heroes. ([\#5355](https://github.com/matrix-org/synapse/issues/5355))
- Fix `federation_custom_ca_list` configuration option. ([\#5362](https://github.com/matrix-org/synapse/issues/5362))
- Fix missing logcontext warnings on shutdown. ([\#5369](https://github.com/matrix-org/synapse/issues/5369))
Improved Documentation
----------------------
- Fix docs on resetting the user directory. ([\#5282](https://github.com/matrix-org/synapse/issues/5282))
- Fix notes about ACME in the MSC1711 faq. ([\#5357](https://github.com/matrix-org/synapse/issues/5357))
Internal Changes
----------------
- Synapse will now serve the experimental "room complexity" API endpoint. ([\#5216](https://github.com/matrix-org/synapse/issues/5216))
- The base classes for the v1 and v2_alpha REST APIs have been unified. ([\#5226](https://github.com/matrix-org/synapse/issues/5226), [\#5328](https://github.com/matrix-org/synapse/issues/5328))
- Simplifications and comments in do_auth. ([\#5227](https://github.com/matrix-org/synapse/issues/5227))
- Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. ([\#5230](https://github.com/matrix-org/synapse/issues/5230))
- Preparatory work for key-validity features. ([\#5232](https://github.com/matrix-org/synapse/issues/5232), [\#5234](https://github.com/matrix-org/synapse/issues/5234), [\#5235](https://github.com/matrix-org/synapse/issues/5235), [\#5236](https://github.com/matrix-org/synapse/issues/5236), [\#5237](https://github.com/matrix-org/synapse/issues/5237), [\#5244](https://github.com/matrix-org/synapse/issues/5244), [\#5250](https://github.com/matrix-org/synapse/issues/5250), [\#5296](https://github.com/matrix-org/synapse/issues/5296), [\#5299](https://github.com/matrix-org/synapse/issues/5299), [\#5343](https://github.com/matrix-org/synapse/issues/5343), [\#5347](https://github.com/matrix-org/synapse/issues/5347), [\#5356](https://github.com/matrix-org/synapse/issues/5356))
- Specify the type of reCAPTCHA key to use. ([\#5283](https://github.com/matrix-org/synapse/issues/5283))
- Improve sample config for monthly active user blocking. ([\#5284](https://github.com/matrix-org/synapse/issues/5284))
- Remove spurious debug from MatrixFederationHttpClient.get_json. ([\#5287](https://github.com/matrix-org/synapse/issues/5287))
- Improve logging for logcontext leaks. ([\#5288](https://github.com/matrix-org/synapse/issues/5288))
- Clarify that the admin change password API logs the user out. ([\#5303](https://github.com/matrix-org/synapse/issues/5303))
- New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. ([\#5320](https://github.com/matrix-org/synapse/issues/5320))
- Improve docstrings on MatrixFederationClient. ([\#5332](https://github.com/matrix-org/synapse/issues/5332))
- Clean up FederationClient.get_events for clarity. ([\#5344](https://github.com/matrix-org/synapse/issues/5344))
- Various improvements to debug logging. ([\#5353](https://github.com/matrix-org/synapse/issues/5353))
- Don't run CI build checks until sample config check has passed. ([\#5370](https://github.com/matrix-org/synapse/issues/5370))
- Automatically retry buildkite builds (max twice) when an agent is lost. ([\#5380](https://github.com/matrix-org/synapse/issues/5380))
Synapse 0.99.5.2 (2019-05-30)
=============================
Bugfixes
--------
- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291))
Synapse 0.99.5.1 (2019-05-22)
=============================
0.99.5.1 supersedes 0.99.5 due to malformed debian changelog - no functional changes.
No significant changes.
Synapse 0.99.5 (2019-05-22)
===========================

View File

@@ -1,14 +1,13 @@
- [Installing Synapse](#installing-synapse)
- [Installing from source](#installing-from-source)
- [Platform-Specific Instructions](#platform-specific-instructions)
- [Troubleshooting Installation](#troubleshooting-installation)
- [Prebuilt packages](#prebuilt-packages)
- [Setting up Synapse](#setting-up-synapse)
- [TLS certificates](#tls-certificates)
- [Email](#email)
- [Registering a user](#registering-a-user)
- [Setting up a TURN server](#setting-up-a-turn-server)
- [URL previews](#url-previews)
* [Installing Synapse](#installing-synapse)
* [Installing from source](#installing-from-source)
* [Platform-Specific Instructions](#platform-specific-instructions)
* [Troubleshooting Installation](#troubleshooting-installation)
* [Prebuilt packages](#prebuilt-packages)
* [Setting up Synapse](#setting-up-synapse)
* [TLS certificates](#tls-certificates)
* [Registering a user](#registering-a-user)
* [Setting up a TURN server](#setting-up-a-turn-server)
* [URL previews](#url-previews)
# Installing Synapse
@@ -395,22 +394,8 @@ To configure Synapse to expose an HTTPS port, you will need to edit
instance, if using certbot, use `fullchain.pem` as your certificate, not
`cert.pem`).
For a more detailed guide to configuring your server for federation, see
[federate.md](docs/federate.md)
## Email
It is desirable for Synapse to have the capability to send email. For example,
this is required to support the 'password reset' feature.
To configure an SMTP server for Synapse, modify the configuration section
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
``smtp_pass``, and ``require_transport_security``.
If Synapse is not configured with an SMTP server, password reset via email will
be disabled by default.
For those of you upgrading your TLS certificate in readiness for Synapse 1.0,
please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
## Registering a user

View File

@@ -9,19 +9,14 @@ include demo/*.py
include demo/*.sh
recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.sql.postgres
recursive-include synapse/storage/schema *.sql.sqlite
recursive-include synapse/storage/schema *.py
recursive-include synapse/storage/schema *.txt
recursive-include docs *
recursive-include scripts *
recursive-include scripts-dev *
recursive-include synapse *.pyi
recursive-include tests *.pem
recursive-include tests *.py
include tests/http/ca.crt
include tests/http/ca.key
include tests/http/server.key
recursive-include synapse/res *
recursive-include synapse/static *.css
@@ -48,8 +43,3 @@ prune .buildkite
exclude jenkins*
recursive-exclude jenkins *.sh
# FIXME: we shouldn't have these templates here
recursive-include res/templates-dinsic *.css
recursive-include res/templates-dinsic *.html
recursive-include res/templates-dinsic *.txt

View File

@@ -49,55 +49,6 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to v1.0
=================
Validation of TLS certificates
------------------------------
Synapse v1.0 is the first release to enforce
validation of TLS certificates for the federation API. It is therefore
essential that your certificates are correctly configured. See the `FAQ
<docs/MSC1711_certificates_FAQ.md>`_ for more information.
Note, v1.0 installations will also no longer be able to federate with servers
that have not correctly configured their certificates.
In rare cases, it may be desirable to disable certificate checking: for
example, it might be essential to be able to federate with a given legacy
server in a closed federation. This can be done in one of two ways:-
* Configure the global switch ``federation_verify_certificates`` to ``false``.
* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
See the `sample configuration file <docs/sample_config.yaml>`_
for more details on these settings.
Email
-----
When a user requests a password reset, Synapse will send an email to the
user to confirm the request.
Previous versions of Synapse delegated the job of sending this email to an
identity server. If the identity server was somehow malicious or became
compromised, it would be theoretically possible to hijack an account through
this means.
Therefore, by default, Synapse v1.0 will send the confirmation email itself. If
Synapse is not configured with an SMTP server, password reset via email will be
disabled.
To configure an SMTP server for Synapse, modify the configuration section
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
``smtp_pass``, and ``require_transport_security``.
If you are absolutely certain that you wish to continue using an identity
server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``.
See the `sample configuration file <docs/sample_config.yaml>`_
for more details on these settings.
Upgrading to v0.99.0
====================

1
changelog.d/4338.feature Normal file
View File

@@ -0,0 +1 @@
Synapse now more efficiently collates room statistics.

View File

@@ -1 +0,0 @@
Adds auth_profile_reqs option to require access_token to GET /profile endpoints on CS API.

View File

@@ -1 +0,0 @@
Add workarounds for pep-517 install errors.

1
changelog.d/5200.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo.

View File

@@ -1 +0,0 @@
Allow server admins to define and enforce a password policy (MSC2000).

1
changelog.d/5230.misc Normal file
View File

@@ -0,0 +1 @@
Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2.

1
changelog.d/5232.misc Normal file
View File

@@ -0,0 +1 @@
Run black on synapse.crypto.keyring.

1
changelog.d/5239.bugfix Normal file
View File

@@ -0,0 +1 @@
Fix 500 Internal Server Error when sending an event with `m.relates_to: null`.

View File

@@ -1 +0,0 @@
Allow expired user to trigger renewal email sending manually.

View File

@@ -1 +0,0 @@
Track deactivated accounts in the database.

View File

@@ -1 +0,0 @@
Fix a bug where deactivated users could receive renewal emails if the account validity feature is on.

View File

@@ -1 +0,0 @@
Add unique index to the profile_replication_status table.

View File

@@ -1 +0,0 @@
Add configuration option to hide new users from the user directory.

View File

@@ -1 +0,0 @@
Allow server admins to define implementations of extra rules for allowing or denying incoming events.

View File

@@ -1 +0,0 @@
Fix missing invite state after exchanging 3PID invites over federaton.

View File

@@ -1,2 +0,0 @@
Track deactivated accounts in the database.

View File

@@ -1 +0,0 @@
Allow server admins to define implementations of extra rules for allowing or denying incoming events.

View File

@@ -1 +0,0 @@
Allow server admins to define implementations of extra rules for allowing or denying incoming events.

View File

@@ -1 +0,0 @@
Track deactivated accounts in the database.

View File

@@ -1 +0,0 @@
Split public rooms directory auth config in two settings, in order to manage client auth independently from the federation part of it. Obsoletes the "restrict_public_rooms_to_local_users" configuration setting. If "restrict_public_rooms_to_local_users" is set in the config, Synapse will act as if both new options are enabled, i.e. require authentication through the client API and deny federation requests.

View File

@@ -1 +0,0 @@
Fix a bug that would cause invited users to receive several emails for a single 3PID invite in case the inviter is rate limited.

View File

@@ -1 +0,0 @@
Implement new custom event rules for power levels.

View File

@@ -1 +0,0 @@
Fix newly-registered users not being able to lookup their own profile without joining a room.

View File

@@ -1 +0,0 @@
Fix 3PID invite to invite association detection in the Tchap room access rules.

View File

@@ -1 +0,0 @@
Force the access rule to be "restricted" if the join rule is "public".

View File

@@ -1 +0,0 @@
Allow looping calls to be given arguments.

View File

@@ -1 +0,0 @@
Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature.

View File

@@ -1 +0,0 @@
Implement per-room message retention policies.

12
debian/changelog vendored
View File

@@ -1,15 +1,3 @@
matrix-synapse-py3 (1.0.0) stable; urgency=medium
* New synapse release 1.0.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2019 17:09:53 +0100
matrix-synapse-py3 (0.99.5.2) stable; urgency=medium
* New synapse release 0.99.5.2.
-- Synapse Packaging team <packages@matrix.org> Thu, 30 May 2019 16:28:07 +0100
matrix-synapse-py3 (0.99.5.1) stable; urgency=medium
* New synapse release 0.99.5.1.

View File

@@ -14,7 +14,7 @@ This image is designed to run either with an automatically generated
configuration file or with a custom configuration that requires manual editing.
An easy way to make use of this image is via docker-compose. See the
[contrib/docker](https://github.com/matrix-org/synapse/tree/master/contrib/docker) section of the synapse project for
[contrib/docker](../contrib/docker) section of the synapse project for
examples.
### Without Compose (harder)

View File

@@ -7,7 +7,6 @@ Requires a public/private key pair from:
https://developers.google.com/recaptcha/
Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option
Setting ReCaptcha Keys
----------------------

View File

@@ -1,22 +1,5 @@
# MSC1711 Certificates FAQ
## Historical Note
This document was originally written to guide server admins through the upgrade
path towards Synapse 1.0. Specifically,
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
required that all servers present valid TLS certificates on their federation
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
certificate checks.
Much of what follows is now outdated since most admins will have already
upgraded, however it may be of use to those with old installs returning to the
project.
If you are setting up a server from scratch you almost certainly should look at
the [installation guide](INSTALL.md) instead.
## Introduction
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
supports the r0.1 release of the server to server specification, but is
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
@@ -85,14 +68,16 @@ Admins should upgrade and configure a valid CA cert. Homeservers that require a
.well-known entry (see below), should retain their SRV record and use it
alongside their .well-known record.
**10th June 2019 - Synapse 1.0.0 is released**
**>= 5th March 2019 - Synapse 1.0.0 is released**
1.0.0 is scheduled for release on 10th June. In
1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one
month after 5th February to upgrade to 0.99.0 and deploy their certificates. In
accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html)
1.0.0 will enforce certificate validity. This means that any homeserver without a
valid certificate after this point will no longer be able to federate with
1.0.0 servers.
## Configuring certificates for compatibility with Synapse 1.0.0
### If you do not currently have an SRV record
@@ -160,11 +145,12 @@ You can do this with a `.well-known` file as follows:
1. Keep the SRV record in place - it is needed for backwards compatibility
with Synapse 0.34 and earlier.
2. Give Synapse a certificate corresponding to the target domain
(`customer.example.net` in the above example). You can either use Synapse's
built-in [ACME support](./ACME.md) for this (via the `domain` parameter in
the `acme` section), or acquire a certificate yourself and give it to
Synapse via `tls_certificate_path` and `tls_private_key_path`.
2. Give synapse a certificate corresponding to the target domain
(`customer.example.net` in the above example). Currently Synapse's ACME
support [does not support
this](https://github.com/matrix-org/synapse/issues/4552), so you will have
to acquire a certificate yourself and give it to Synapse via
`tls_certificate_path` and `tls_private_key_path`.
3. Restart Synapse to ensure the new certificate is loaded.

View File

@@ -69,7 +69,7 @@ An empty body may be passed for backwards compatibility.
Reset password
==============
Changes the password of another user. This will automatically log the user out of all their devices.
Changes the password of another user.
The api is::

View File

@@ -14,9 +14,9 @@ up and will work provided you set the ``server_name`` to match your
machine's public DNS hostname, and provide Synapse with a TLS certificate
which is valid for your ``server_name``.
Once federation has been configured, you should be able to join a room over
federation. A good place to start is ``#synapse:matrix.org`` - a room for
Synapse admins.
Once you have completed the steps necessary to federate, you should be able to
join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a
room for Synapse admins.)
## Delegation
@@ -98,77 +98,6 @@ _matrix._tcp.<server_name>``. In our example, we would expect this:
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
directly to the server hosting the synapse instance.
### Delegation FAQ
#### When do I need a SRV record or .well-known URI?
If your homeserver listens on the default federation port (8448), and your
`server_name` points to the host that your homeserver runs on, you do not need an SRV
record or `.well-known/matrix/server` URI.
For instance, if you registered `example.com` and pointed its DNS A record at a
fresh server, you could install Synapse on that host,
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
it would automatically generate a valid TLS certificate for you via Let's Encrypt
and no SRV record or .well-known URI would be needed.
This is the common case, although you can add an SRV record or
`.well-known/matrix/server` URI for completeness if you wish.
**However**, if your server does not listen on port 8448, or if your `server_name`
does not point to the host that your homeserver runs on, you will need to let
other servers know how to find it. The way to do this is via .well-known or an
SRV record.
#### I have created a .well-known URI. Do I still need an SRV record?
As of Synapse 0.99, Synapse will first check for the existence of a .well-known
URI and follow any delegation it suggests. It will only then check for the
existence of an SRV record.
That means that the SRV record will often be redundant. However, you should
remember that there may still be older versions of Synapse in the federation
which do not understand .well-known URIs, so if you removed your SRV record
you would no longer be able to federate with them.
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
earlier will follow the SRV record (and not care about the invalid
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
correct certificate chain.
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
Yes, you are welcome to manage your certificates yourself. Synapse will only
attempt to obtain certificates from Let's Encrypt if you configure it to do
so.The only requirement is that there is a valid TLS cert present for
federation end points.
#### Do you still recommend against using a reverse proxy on the federation port?
We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
reverse proxy.
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
Practically speaking, this is no longer necessary.
If you are using a reverse proxy for all of your TLS traffic, then you can set
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
needs the certificate is to populate a legacy `tls_fingerprints` field in the
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
and generally this is delegated via `matrix.org`, which will be running a modern
version of Synapse.
#### Do I need the same certificate for the client and federation port?
No. There is nothing stopping you from using different certificates,
particularly if you are using a reverse proxy. However, Synapse will use the
same certificate on any ports where TLS is configured.
## Troubleshooting
You can use the [federation tester](

View File

@@ -77,25 +77,11 @@ pid_file: DATADIR/homeserver.pid
#
#require_auth_for_profile_requests: true
# If set to 'false', requires authentication to access the server's public rooms
# directory through the client API. Defaults to 'true'.
# If set to 'true', requires authentication to access the server's
# public rooms directory through the client API, and forbids any other
# homeserver to fetch it via federation. Defaults to 'false'.
#
#allow_public_rooms_without_auth: false
# If set to 'false', forbids any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'true'.
#
#allow_public_rooms_over_federation: false
# The default room version for newly created rooms.
#
# Known room versions are listed here:
# https://matrix.org/docs/spec/#complete-list-of-room-versions
#
# For example, for room version 1, default_room_version should be set
# to "1".
#
#default_room_version: "4"
#restrict_public_rooms_to_local_users: true
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
@@ -265,22 +251,6 @@ listeners:
# Monthly Active User Blocking
#
# Used in cases where the admin or server owner wants to limit to the
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
# anabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
# the server will start blocking user actions.
#
# 'mau_trial_days' is a means to add a grace period for active users. It
# means that users must be active for this number of days before they
# can be considered active and guards against the case where lots of users
# sign up in a short space of time never to return after their initial
# session.
#
#limit_usage_by_mau: False
#max_mau_value: 50
#mau_trial_days: 2
@@ -312,74 +282,6 @@ listeners:
#
#allow_per_room_profiles: false
# Whether to show the users on this homeserver in the user directory. Defaults to
# 'true'.
#
#show_users_in_user_directory: false
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, a user won't be able to send a
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
# that's not within this range. This is especially useful in closed federations,
# in which server admins can make sure every federating server applies the same
# rules.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
# that purge to be performed by a job that's iterating over every room it knows,
# which would be quite heavy on the server.
#
#purge_jobs:
# - shortest_max_lifetime: 1d
# longest_max_lifetime: 3d
# interval: 5m:
# - shortest_max_lifetime: 3d
# longest_max_lifetime: 1y
# interval: 24h
## TLS ##
@@ -401,12 +303,12 @@ retention:
#
#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key"
# Whether to verify TLS server certificates for outbound federation requests.
# Whether to verify TLS certificates when sending federation traffic.
#
# Defaults to `true`. To disable certificate verification, uncomment the
# following line.
# This currently defaults to `false`, however this will change in
# Synapse 1.0 when valid federation certificates will be required.
#
#federation_verify_certificates: false
#federation_verify_certificates: true
# Skip federation certificate verification on the following whitelist
# of domains.
@@ -851,25 +753,13 @@ uploads_path: "DATADIR/uploads"
# This means that, if a validity period is set, and Synapse is restarted (it will
# then derive an expiration date from the current validity period), and some time
# after that the validity period changes and Synapse is restarted, the users'
# expiration dates won't be updated unless their account is manually renewed. This
# date will be randomly selected within a range [now + period - d ; now + period],
# where d is equal to 10% of the validity period.
# expiration dates won't be updated unless their account is manually renewed.
#
#account_validity:
# enabled: True
# period: 6w
# renew_at: 1w
# renew_email_subject: "Renew your %(app)s account"
# # Directory in which Synapse will try to find the HTML files to serve to the
# # user when trying to renew an account. Optional, defaults to
# # synapse/res/templates.
# template_dir: "res/templates"
# # HTML to be displayed to the user after they successfully renewed their
# # account. Optional.
# account_renewed_html_path: "account_renewed.html"
# # HTML to be displayed when the user tries to renew an account with an invalid
# # renewal token. Optional.
# invalid_token_html_path: "invalid_token.html"
# The user must provide all of the below types of 3PID when registering.
#
@@ -882,32 +772,9 @@ uploads_path: "DATADIR/uploads"
#
#disable_msisdn_registration: true
# Derive the user's matrix ID from a type of 3PID used when registering.
# This overrides any matrix ID the user proposes when calling /register
# The 3PID type should be present in registrations_require_3pid to avoid
# users failing to register if they don't specify the right kind of 3pid.
#
#register_mxid_from_3pid: email
# Uncomment to set the display name of new users to their email address,
# rather than using the default heuristic.
#
#register_just_use_email_for_display_name: true
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
# Use an Identity Server to establish which 3PIDs are allowed to register?
# Overrides allowed_local_3pids below.
#
#check_is_for_allowed_local_3pids: matrix.org
#
# If you are using an IS you can also check whether that IS registers
# pending invites for the given 3PID (and then allow it to sign up on
# the platform):
#
#allow_invited_3pids: False
#
#allowed_local_3pids:
# - medium: email
# pattern: '.*@matrix\.org'
@@ -916,11 +783,6 @@ uploads_path: "DATADIR/uploads"
# - medium: msisdn
# pattern: '\+44'
# If true, stop users from trying to change the 3PIDs associated with
# their accounts.
#
#disable_3pid_changes: False
# Enable 3PIDs lookup requests to identity servers from this server.
#
#enable_3pid_lookup: true
@@ -962,30 +824,6 @@ uploads_path: "DATADIR/uploads"
# - matrix.org
# - vector.im
# If enabled, user IDs, display names and avatar URLs will be replicated
# to this server whenever they change.
# This is an experimental API currently implemented by sydent to support
# cross-homeserver user directories.
#
#replicate_user_profiles_to: example.com
# If specified, attempt to replay registrations, profile changes & 3pid
# bindings on the given target homeserver via the AS API. The HS is authed
# via a given AS token.
#
#shadow_server:
# hs_url: https://shadow.example.com
# hs: shadow.example.com
# as_token: 12u394refgbdhivsia
# If enabled, don't let users set their own display names/avatars
# other than for the very first time (unless they are a server admin).
# Useful when provisioning users based on the contents of a 3rd party
# directory and to avoid ambiguities.
#
#disable_set_displayname: False
#disable_set_avatar_url: False
# Users who register on this homeserver will automatically be joined
# to these rooms
#
@@ -1086,43 +924,12 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key"
# The trusted servers to download signing keys from.
#
# When we need to fetch a signing key, each server is tried in parallel.
#
# Normally, the connection to the key server is validated via TLS certificates.
# Additional security can be provided by configuring a `verify key`, which
# will make synapse check that the response is signed by that key.
#
# This setting supercedes an older setting named `perspectives`. The old format
# is still supported for backwards-compatibility, but it is deprecated.
#
# Options for each entry in the list include:
#
# server_name: the name of the server. required.
#
# verify_keys: an optional map from key id to base64-encoded public key.
# If specified, we will check that the response is signed by at least
# one of the given keys.
#
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
# and federation_verify_certificates is not `true`, synapse will refuse
# to start, because this would allow anyone who can spoof DNS responses
# to masquerade as the trusted key server. If you know what you are doing
# and are sure that your network environment provides a secure connection
# to the key server, you can set this to `true` to override this
# behaviour.
#
# An example configuration might look like:
#
#trusted_key_servers:
# - server_name: "my_trusted_server.example.com"
# verify_keys:
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
# - server_name: "my_other_trusted_server.example.com"
#
# The default configuration is:
#
#trusted_key_servers:
# - server_name: "matrix.org"
#perspectives:
# servers:
# "matrix.org":
# verify_keys:
# "ed25519:auto":
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
# Enable SAML2 for registration and login. Uses pysaml2.
@@ -1197,40 +1004,12 @@ password_config:
#
#pepper: "EVEN_MORE_SECRET"
# Define and enforce a password policy. Each parameter is optional, boolean
# parameters default to 'false' and integer parameters default to 0.
# This is an early implementation of MSC2000.
#
#policy:
# Whether to enforce the password policy.
#
#enabled: true
# Minimum accepted length for a password.
#
#minimum_length: 15
# Whether a password must contain at least one digit.
#
#require_digit: true
# Whether a password must contain at least one symbol.
# A symbol is any character that's not a number or a letter.
#
#require_symbol: true
# Whether a password must contain at least one lowercase letter.
#
#require_lowercase: true
# Whether a password must contain at least one lowercase letter.
#
#require_uppercase: true
# Enable sending emails for password resets, notification events or
# account expiry notices
# Enable sending emails for notification events or expiry notices
# Defining a custom URL for Riot is only needed if email notifications
# should contain links to a self-hosted installation of Riot; when set
# the "app_name" setting is ignored.
#
# If your SMTP server requires authentication, the optional smtp_user &
# smtp_pass variables should be used
@@ -1238,64 +1017,22 @@ password_config:
#email:
# enable_notifs: false
# smtp_host: "localhost"
# smtp_port: 25 # SSL: 465, STARTTLS: 587
# smtp_port: 25
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: False
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
#
# # Enable email notifications by default
# notif_for_new_users: True
#
# # Defining a custom URL for Riot is only needed if email notifications
# # should contain links to a self-hosted installation of Riot; when set
# # the "app_name" setting is ignored
# riot_base_url: "http://localhost/riot"
#
# # Enable sending password reset emails via the configured, trusted
# # identity servers
# #
# # IMPORTANT! This will give a malicious or overtaken identity server
# # the ability to reset passwords for your users! Make absolutely sure
# # that you want to do this! It is strongly recommended that password
# # reset emails be sent by the homeserver instead
# #
# # If this option is set to false and SMTP options have not been
# # configured, resetting user passwords via email will be disabled
# #trust_identity_server_for_password_resets: false
#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
# # This is currently used for password resets
# #validation_token_lifetime: 1h
#
# # Template directory. All template files should be stored within this
# # directory
# #
# # if template_dir is unset, uses the example templates that are part of
# # the Synapse distribution.
# #template_dir: res/templates
#
# # Templates for email notifications
# #
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
#
# # Templates for account expiry notices
# #
# # Templates for account expiry notices.
# expiry_template_html: notice_expiry.html
# expiry_template_text: notice_expiry.txt
#
# # Templates for password reset emails sent by the homeserver
# #
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
# notif_for_new_users: True
# riot_base_url: "http://localhost/riot"
#password_providers:
@@ -1356,18 +1093,13 @@ password_config:
#
# 'search_all_users' defines whether to search all users visible to your HS
# when searching the user directory, rather than limiting to users visible
# in public rooms. Defaults to false. If you set it True, you'll have to
# rebuild the user_directory search indexes, see
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
# in public rooms. Defaults to false. If you set it True, you'll have to run
# UPDATE user_directory_stream_pos SET stream_id = NULL;
# on your database to tell it to rebuild the user_directory search indexes.
#
#user_directory:
# enabled: true
# search_all_users: false
#
# # If this is set, user search will be delegated to this ID server instead
# # of synapse performing the search itself.
# # This is an experimental API.
# defer_to_id_server: https://id.example.com
# User Consent configuration
@@ -1520,16 +1252,3 @@ password_config:
# alias: "*"
# room_id: "*"
# action: allow
# Server admins can define a Python module that implements extra rules for
# allowing or denying incoming events. In order to work, this module needs to
# override the methods defined in synapse/events/third_party_rules.py.
#
# This feature is designed to be used in closed federations only, where each
# participating server enforces the same rules.
#
#third_party_event_rules:
# module: "my_custom_project.SuperRulesSet"
# config:
# example_option: 'things'

View File

@@ -7,7 +7,11 @@ who are present in a publicly viewable room present on the server.
The directory info is stored in various tables, which can (typically after
DB corruption) get stale or out of sync. If this happens, for now the
solution to fix it is to execute the SQL here
https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql
and then restart synapse. This should then start a background task to
quickest solution to fix it is:
```
UPDATE user_directory_stream_pos SET stream_id = NULL;
```
and restart the synapse, which should then start a background task to
flush the current tables and regenerate the directory.

View File

@@ -1,7 +0,0 @@
.header {
border-bottom: 4px solid #e4f7ed ! important;
}
.notif_link a, .footer a {
color: #76CFA6 ! important;
}

View File

@@ -1,156 +0,0 @@
body {
margin: 0px;
}
pre, code {
word-break: break-word;
white-space: pre-wrap;
}
#page {
font-family: 'Open Sans', Helvetica, Arial, Sans-Serif;
font-color: #454545;
font-size: 12pt;
width: 100%;
padding: 20px;
}
#inner {
width: 640px;
}
.header {
width: 100%;
height: 87px;
color: #454545;
border-bottom: 4px solid #e5e5e5;
}
.logo {
text-align: right;
margin-left: 20px;
}
.salutation {
padding-top: 10px;
font-weight: bold;
}
.summarytext {
}
.room {
width: 100%;
color: #454545;
border-bottom: 1px solid #e5e5e5;
}
.room_header td {
padding-top: 38px;
padding-bottom: 10px;
border-bottom: 1px solid #e5e5e5;
}
.room_name {
vertical-align: middle;
font-size: 18px;
font-weight: bold;
}
.room_header h2 {
margin-top: 0px;
margin-left: 75px;
font-size: 20px;
}
.room_avatar {
width: 56px;
line-height: 0px;
text-align: center;
vertical-align: middle;
}
.room_avatar img {
width: 48px;
height: 48px;
object-fit: cover;
border-radius: 24px;
}
.notif {
border-bottom: 1px solid #e5e5e5;
margin-top: 16px;
padding-bottom: 16px;
}
.historical_message .sender_avatar {
opacity: 0.3;
}
/* spell out opacity and historical_message class names for Outlook aka Word */
.historical_message .sender_name {
color: #e3e3e3;
}
.historical_message .message_time {
color: #e3e3e3;
}
.historical_message .message_body {
color: #c7c7c7;
}
.historical_message td,
.message td {
padding-top: 10px;
}
.sender_avatar {
width: 56px;
text-align: center;
vertical-align: top;
}
.sender_avatar img {
margin-top: -2px;
width: 32px;
height: 32px;
border-radius: 16px;
}
.sender_name {
display: inline;
font-size: 13px;
color: #a2a2a2;
}
.message_time {
text-align: right;
width: 100px;
font-size: 11px;
color: #a2a2a2;
}
.message_body {
}
.notif_link td {
padding-top: 10px;
padding-bottom: 10px;
font-weight: bold;
}
.notif_link a, .footer a {
color: #454545;
text-decoration: none;
}
.debug {
font-size: 10px;
color: #888;
}
.footer {
margin-top: 20px;
text-align: center;
}

View File

@@ -1,45 +0,0 @@
{% for message in notif.messages %}
<tr class="{{ "historical_message" if message.is_historical else "message" }}">
<td class="sender_avatar">
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
{% if message.sender_avatar_url %}
<img alt="" class="sender_avatar" src="{{ message.sender_avatar_url|mxc_to_http(32,32) }}" />
{% else %}
{% if message.sender_hash % 3 == 0 %}
<img class="sender_avatar" src="https://vector.im/beta/img/76cfa6.png" />
{% elif message.sender_hash % 3 == 1 %}
<img class="sender_avatar" src="https://vector.im/beta/img/50e2c2.png" />
{% else %}
<img class="sender_avatar" src="https://vector.im/beta/img/f4c371.png" />
{% endif %}
{% endif %}
{% endif %}
</td>
<td class="message_contents">
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
<div class="sender_name">{% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}</div>
{% endif %}
<div class="message_body">
{% if message.msgtype == "m.text" %}
{{ message.body_text_html }}
{% elif message.msgtype == "m.emote" %}
{{ message.body_text_html }}
{% elif message.msgtype == "m.notice" %}
{{ message.body_text_html }}
{% elif message.msgtype == "m.image" %}
<img src="{{ message.image_url|mxc_to_http(640, 480, scale) }}" />
{% elif message.msgtype == "m.file" %}
<span class="filename">{{ message.body_text_plain }}</span>
{% endif %}
</div>
</td>
<td class="message_time">{{ message.ts|format_ts("%H:%M") }}</td>
</tr>
{% endfor %}
<tr class="notif_link">
<td></td>
<td>
<a href="{{ notif.link }}">Voir {{ room.title }}</a>
</td>
<td></td>
</tr>

View File

@@ -1,16 +0,0 @@
{% for message in notif.messages %}
{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
{% if message.msgtype == "m.text" %}
{{ message.body_text_plain }}
{% elif message.msgtype == "m.emote" %}
{{ message.body_text_plain }}
{% elif message.msgtype == "m.notice" %}
{{ message.body_text_plain }}
{% elif message.msgtype == "m.image" %}
{{ message.body_text_plain }}
{% elif message.msgtype == "m.file" %}
{{ message.body_text_plain }}
{% endif %}
{% endfor %}
Voir {{ room.title }} à {{ notif.link }}

View File

@@ -1,55 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<style type="text/css">
{% include 'mail.css' without context %}
{% include "mail-%s.css" % app_name ignore missing without context %}
</style>
</head>
<body>
<table id="page">
<tr>
<td> </td>
<td id="inner">
<table class="header">
<tr>
<td>
<div class="salutation">Bonjour {{ user_display_name }},</div>
<div class="summarytext">{{ summary_text }}</div>
</td>
<td class="logo">
{% if app_name == "Riot" %}
<img src="http://matrix.org/img/riot-logo-email.png" width="83" height="83" alt="[Riot]"/>
{% elif app_name == "Vector" %}
<img src="http://matrix.org/img/vector-logo-email.png" width="64" height="83" alt="[Vector]"/>
{% else %}
<img src="http://matrix.org/img/matrix-120x51.png" width="120" height="51" alt="[matrix]"/>
{% endif %}
</td>
</tr>
</table>
{% for room in rooms %}
{% include 'room.html' with context %}
{% endfor %}
<div class="footer">
<a href="{{ unsubscribe_link }}">Se désinscrire</a>
<br/>
<br/>
<div class="debug">
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
an event was received at {{ reason.received_at|format_ts("%c") }}
which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
{% if reason.last_sent_ts %}
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
{% else %}
and we don't have a last time we sent a mail for this room.
{% endif %}
</div>
</div>
</td>
<td> </td>
</tr>
</table>
</body>
</html>

View File

@@ -1,10 +0,0 @@
Bonjour {{ user_display_name }},
{{ summary_text }}
{% for room in rooms %}
{% include 'room.txt' with context %}
{% endfor %}
Vous pouvez désactiver ces notifications en cliquant ici {{ unsubscribe_link }}

View File

@@ -1,33 +0,0 @@
<table class="room">
<tr class="room_header">
<td class="room_avatar">
{% if room.avatar_url %}
<img alt="" src="{{ room.avatar_url|mxc_to_http(48,48) }}" />
{% else %}
{% if room.hash % 3 == 0 %}
<img alt="" src="https://vector.im/beta/img/76cfa6.png" />
{% elif room.hash % 3 == 1 %}
<img alt="" src="https://vector.im/beta/img/50e2c2.png" />
{% else %}
<img alt="" src="https://vector.im/beta/img/f4c371.png" />
{% endif %}
{% endif %}
</td>
<td class="room_name" colspan="2">
{{ room.title }}
</td>
</tr>
{% if room.invite %}
<tr>
<td></td>
<td>
<a href="{{ room.link }}">Rejoindre la conversation.</a>
</td>
<td></td>
</tr>
{% else %}
{% for notif in room.notifs %}
{% include 'notif.html' with context %}
{% endfor %}
{% endif %}
</table>

View File

@@ -1,9 +0,0 @@
{{ room.title }}
{% if room.invite %}
  Vous avez été invité, rejoignez la conversation en cliquant sur le lien suivant {{ room.link }}
{% else %}
{% for notif in room.notifs %}
{% include 'notif.txt' with context %}
{% endfor %}
{% endif %}

View File

@@ -20,7 +20,9 @@ class CallVisitor(ast.NodeVisitor):
else:
return
if name == "client_patterns":
if name == "client_path_patterns":
PATTERNS_V1.append(node.args[0].s)
elif name == "client_v2_patterns":
PATTERNS_V2.append(node.args[0].s)

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from signedjson.key import write_signing_keys, generate_signing_key
from synapse.util.stringutils import random_string
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output_file",
type=argparse.FileType('w'),
default=sys.stdout,
help="Where to write the output to",
)
args = parser.parse_args()
key_id = "a_" + random_string(4)
key = generate_signing_key(key_id),
write_signing_keys(args.output_file, key)

View File

@@ -27,4 +27,4 @@ try:
except ImportError:
pass
__version__ = "1.0.0"
__version__ = "0.99.5.1"

View File

@@ -184,22 +184,11 @@ class Auth(object):
return event_auth.get_public_keys(invite_event)
@defer.inlineCallbacks
def get_user_by_req(
self,
request,
allow_guest=False,
rights="access",
allow_expired=False,
):
def get_user_by_req(self, request, allow_guest=False, rights="access"):
""" Get a registered user's ID.
Args:
request - An HTTP request with an access_token query parameter.
allow_expired - Whether to allow the request through even if the account is
expired. If true, Synapse will still require an access token to be
provided but won't check if the account it belongs to has expired. This
works thanks to /login delivering access tokens regardless of accounts'
expiration.
Returns:
defer.Deferred: resolves to a ``synapse.types.Requester`` object
Raises:
@@ -218,7 +207,6 @@ class Auth(object):
)
user_id, app_service = yield self._get_appservice_user_id(request)
if user_id:
request.authenticated_entity = user_id
@@ -241,7 +229,7 @@ class Auth(object):
is_guest = user_info["is_guest"]
# Deny the request if the user account has expired.
if self._account_validity.enabled and not allow_expired:
if self._account_validity.enabled:
user_id = user.to_string()
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:
@@ -280,40 +268,39 @@ class Auth(object):
errcode=Codes.MISSING_TOKEN
)
@defer.inlineCallbacks
def _get_appservice_user_id(self, request):
app_service = self.store.get_app_service_by_token(
self.get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
)
if app_service is None:
return(None, None)
defer.returnValue((None, None))
if app_service.ip_range_whitelist:
ip_address = IPAddress(self.hs.get_ip_from_request(request))
if ip_address not in app_service.ip_range_whitelist:
return(None, None)
defer.returnValue((None, None))
if b"user_id" not in request.args:
return(app_service.sender, app_service)
defer.returnValue((app_service.sender, app_service))
user_id = request.args[b"user_id"][0].decode('utf8')
if app_service.sender == user_id:
return(app_service.sender, app_service)
defer.returnValue((app_service.sender, app_service))
if not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user."
)
# Let ASes manipulate nonexistent users (e.g. to shadow-register them)
# if not (yield self.store.get_user_by_id(user_id)):
# raise AuthError(
# 403,
# "Application service has not registered this user"
# )
return(user_id, app_service)
if not (yield self.store.get_user_by_id(user_id)):
raise AuthError(
403,
"Application service has not registered this user"
)
defer.returnValue((user_id, app_service))
@defer.inlineCallbacks
def get_user_by_access_token(self, token, rights="access"):
@@ -546,15 +533,24 @@ class Auth(object):
defer.returnValue(user_info)
def get_appservice_by_req(self, request):
(user_id, app_service) = self._get_appservice_user_id(request)
if not app_service:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN,
try:
token = self.get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
service = self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN
)
request.authenticated_entity = service.sender
return defer.succeed(service)
except KeyError:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token."
)
request.authenticated_entity = app_service.sender
return app_service
def is_server_admin(self, user):
""" Check if the given user is a local server admin.

View File

@@ -83,7 +83,6 @@ class EventTypes(object):
RoomAvatar = "m.room.avatar"
RoomEncryption = "m.room.encryption"
GuestAccess = "m.room.guest_access"
Encryption = "m.room.encryption"
# These are used for validation
Message = "m.room.message"
@@ -93,8 +92,6 @@ class EventTypes(object):
ServerACL = "m.room.server_acl"
Pinned = "m.room.pinned_events"
Retention = "m.room.retention"
class RejectedReason(object):
AUTH_ERROR = "auth_error"

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -62,13 +61,6 @@ class Codes(object):
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT"
PASSWORD_TOO_SHORT = "M_PASSWORD_TOO_SHORT"
PASSWORD_NO_DIGIT = "M_PASSWORD_NO_DIGIT"
PASSWORD_NO_UPPERCASE = "M_PASSWORD_NO_UPPERCASE"
PASSWORD_NO_LOWERCASE = "M_PASSWORD_NO_LOWERCASE"
PASSWORD_NO_SYMBOL = "M_PASSWORD_NO_SYMBOL"
PASSWORD_IN_DICTIONARY = "M_PASSWORD_IN_DICTIONARY"
WEAK_PASSWORD = "M_WEAK_PASSWORD"
class CodeMessageException(RuntimeError):
@@ -347,15 +339,6 @@ class UnsupportedRoomVersionError(SynapseError):
)
class ThreepidValidationError(SynapseError):
"""An error raised when there was a problem authorising an event."""
def __init__(self, *args, **kwargs):
if "errcode" not in kwargs:
kwargs["errcode"] = Codes.FORBIDDEN
super(ThreepidValidationError, self).__init__(*args, **kwargs)
class IncompatibleRoomVersionError(SynapseError):
"""A server is trying to join a room whose version it does not support.
@@ -380,22 +363,6 @@ class IncompatibleRoomVersionError(SynapseError):
)
class PasswordRefusedError(SynapseError):
"""A password has been refused, either during password reset/change or registration.
"""
def __init__(
self,
msg="This password doesn't comply with the server's policy",
errcode=Codes.WEAK_PASSWORD,
):
super(PasswordRefusedError, self).__init__(
code=400,
msg=msg,
errcode=errcode,
)
class RequestSendFailed(RuntimeError):
"""Sending a HTTP request over federation failed due to not being able to
talk to the remote server for some reason.

View File

@@ -50,7 +50,6 @@ class RoomVersion(object):
disposition = attr.ib() # str; one of the RoomDispositions
event_format = attr.ib() # int; one of the EventFormatVersions
state_res = attr.ib() # int; one of the StateResolutionVersions
enforce_key_validity = attr.ib() # bool
class RoomVersions(object):
@@ -59,44 +58,43 @@ class RoomVersions(object):
RoomDisposition.STABLE,
EventFormatVersions.V1,
StateResolutionVersions.V1,
enforce_key_validity=False,
)
STATE_V2_TEST = RoomVersion(
"state-v2-test",
RoomDisposition.UNSTABLE,
EventFormatVersions.V1,
StateResolutionVersions.V2,
)
V2 = RoomVersion(
"2",
RoomDisposition.STABLE,
EventFormatVersions.V1,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V3 = RoomVersion(
"3",
RoomDisposition.STABLE,
EventFormatVersions.V2,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V4 = RoomVersion(
"4",
RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=False,
)
V5 = RoomVersion(
"5",
RoomDisposition.STABLE,
EventFormatVersions.V3,
StateResolutionVersions.V2,
enforce_key_validity=True,
)
# the version we will give rooms which are created on this server
DEFAULT_ROOM_VERSION = RoomVersions.V1
KNOWN_ROOM_VERSIONS = {
v.identifier: v for v in (
RoomVersions.V1,
RoomVersions.V2,
RoomVersions.V3,
RoomVersions.STATE_V2_TEST,
RoomVersions.V4,
RoomVersions.V5,
)
} # type: dict[str, RoomVersion]

View File

@@ -26,7 +26,6 @@ CLIENT_API_PREFIX = "/_matrix/client"
FEDERATION_PREFIX = "/_matrix/federation"
FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1"
FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2"
FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable"
STATIC_PREFIX = "/_matrix/static"
WEB_CLIENT_PREFIX = "/_matrix/client"
CONTENT_REPO_PREFIX = "/_matrix/content"

View File

@@ -344,21 +344,15 @@ class _LimitedHostnameResolver(object):
def resolveHostName(self, resolutionReceiver, hostName, portNumber=0,
addressTypes=None, transportSemantics='TCP'):
# Note this is happening deep within the reactor, so we don't need to
# worry about log contexts.
# We need this function to return `resolutionReceiver` so we do all the
# actual logic involving deferreds in a separate function.
# even though this is happening within the depths of twisted, we need to drop
# our logcontext before starting _resolve, otherwise: (a) _resolve will drop
# the logcontext if it returns an incomplete deferred; (b) _resolve will
# call the resolutionReceiver *with* a logcontext, which it won't be expecting.
with PreserveLoggingContext():
self._resolve(
resolutionReceiver,
hostName,
portNumber,
addressTypes,
transportSemantics,
)
self._resolve(
resolutionReceiver, hostName, portNumber,
addressTypes, transportSemantics,
)
return resolutionReceiver

View File

@@ -37,7 +37,8 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns
from synapse.rest.client.v2_alpha._base import client_v2_patterns
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
@@ -48,11 +49,11 @@ from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.frontend_proxy")
class PresenceStatusStubServlet(RestServlet):
PATTERNS = client_patterns("/presence/(?P<user_id>[^/]*)/status")
class PresenceStatusStubServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/presence/(?P<user_id>[^/]*)/status")
def __init__(self, hs):
super(PresenceStatusStubServlet, self).__init__()
super(PresenceStatusStubServlet, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.auth = hs.get_auth()
self.main_uri = hs.config.worker_main_http_uri
@@ -83,7 +84,7 @@ class PresenceStatusStubServlet(RestServlet):
class KeyUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
PATTERNS = client_v2_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs):
"""

View File

@@ -265,7 +265,7 @@ class ApplicationService(object):
def is_exclusive_room(self, room_id):
return self._is_exclusive(ApplicationService.NS_ROOMS, room_id)
def get_exclusive_user_regexes(self):
def get_exlusive_user_regexes(self):
"""Get the list of regexes used to determine if a user is exclusively
registered by the AS
"""

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -31,76 +29,12 @@ logger = logging.getLogger(__name__)
class EmailConfig(Config):
def read_config(self, config):
# TODO: We should separate better the email configuration from the notification
# and account validity config.
self.email_enable_notifs = False
email_config = config.get("email", {})
self.email_smtp_host = email_config.get("smtp_host", None)
self.email_smtp_port = email_config.get("smtp_port", None)
self.email_smtp_user = email_config.get("smtp_user", None)
self.email_smtp_pass = email_config.get("smtp_pass", None)
self.require_transport_security = email_config.get(
"require_transport_security", False
)
if "app_name" in email_config:
self.email_app_name = email_config["app_name"]
else:
self.email_app_name = "Matrix"
# TODO: Rename notif_from to something more generic, or have a separate
# from for password resets, message notifications, etc?
# Currently the email section is a bit bogged down with settings for
# multiple functions. Would be good to split it out into separate
# sections and only put the common ones under email:
self.email_notif_from = email_config.get("notif_from", None)
if self.email_notif_from is not None:
# make sure it's valid
parsed = email.utils.parseaddr(self.email_notif_from)
if parsed[1] == '':
raise RuntimeError("Invalid notif_from address")
template_dir = email_config.get("template_dir")
# we need an absolute path, because we change directory after starting (and
# we don't yet know what auxilliary templates like mail.css we will need).
# (Note that loading as package_resources with jinja.PackageLoader doesn't
# work for the same reason.)
if not template_dir:
template_dir = pkg_resources.resource_filename(
'synapse', 'res/templates'
)
self.email_template_dir = os.path.abspath(template_dir)
self.email_enable_notifs = email_config.get("enable_notifs", False)
account_validity_renewal_enabled = config.get(
"account_validity", {},
).get("renew_at")
email_trust_identity_server_for_password_resets = email_config.get(
"trust_identity_server_for_password_resets", False,
)
self.email_password_reset_behaviour = (
"remote" if email_trust_identity_server_for_password_resets else "local"
)
if self.email_password_reset_behaviour == "local" and email_config == {}:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
self.email_password_reset_behaviour = "off"
# Get lifetime of a validation token in milliseconds
self.email_validation_token_lifetime = self.parse_duration(
email_config.get("validation_token_lifetime", "1h")
)
if (
self.email_enable_notifs
or account_validity_renewal_enabled
or self.email_password_reset_behaviour == "local"
):
if self.email_enable_notifs:
# make sure we can import the required deps
import jinja2
import bleach
@@ -108,68 +42,6 @@ class EmailConfig(Config):
jinja2
bleach
if self.email_password_reset_behaviour == "local":
required = [
"smtp_host",
"smtp_port",
"notif_from",
]
missing = []
for k in required:
if k not in email_config:
missing.append(k)
if (len(missing) > 0):
raise RuntimeError(
"email.password_reset_behaviour is set to 'local' "
"but required keys are missing: %s" %
(", ".join(["email." + k for k in missing]),)
)
# Templates for password reset emails
self.email_password_reset_template_html = email_config.get(
"password_reset_template_html", "password_reset.html",
)
self.email_password_reset_template_text = email_config.get(
"password_reset_template_text", "password_reset.txt",
)
self.email_password_reset_failure_template = email_config.get(
"password_reset_failure_template", "password_reset_failure.html",
)
# This template does not support any replaceable variables, so we will
# read it from the disk once during setup
email_password_reset_success_template = email_config.get(
"password_reset_success_template", "password_reset_success.html",
)
# Check templates exist
for f in [self.email_password_reset_template_html,
self.email_password_reset_template_text,
self.email_password_reset_failure_template,
email_password_reset_success_template]:
p = os.path.join(self.email_template_dir, f)
if not os.path.isfile(p):
raise ConfigError("Unable to find template file %s" % (p, ))
# Retrieve content of web templates
filepath = os.path.join(
self.email_template_dir,
email_password_reset_success_template,
)
self.email_password_reset_success_html_content = self.read_file(
filepath,
"email.password_reset_template_success_html",
)
if config.get("public_baseurl") is None:
raise RuntimeError(
"email.password_reset_behaviour is set to 'local' but no "
"public_baseurl is set. This is necessary to generate password "
"reset links"
)
if self.email_enable_notifs:
required = [
"smtp_host",
"smtp_port",
@@ -194,22 +66,11 @@ class EmailConfig(Config):
"email.enable_notifs is True but no public_baseurl is set"
)
self.email_smtp_host = email_config["smtp_host"]
self.email_smtp_port = email_config["smtp_port"]
self.email_notif_from = email_config["notif_from"]
self.email_notif_template_html = email_config["notif_template_html"]
self.email_notif_template_text = email_config["notif_template_text"]
for f in self.email_notif_template_text, self.email_notif_template_html:
p = os.path.join(self.email_template_dir, f)
if not os.path.isfile(p):
raise ConfigError("Unable to find email template file %s" % (p, ))
self.email_notif_for_new_users = email_config.get(
"notif_for_new_users", True
)
self.email_riot_base_url = email_config.get(
"riot_base_url", None
)
if account_validity_renewal_enabled:
self.email_expiry_template_html = email_config.get(
"expiry_template_html", "notice_expiry.html",
)
@@ -217,15 +78,58 @@ class EmailConfig(Config):
"expiry_template_text", "notice_expiry.txt",
)
for f in self.email_expiry_template_text, self.email_expiry_template_html:
p = os.path.join(self.email_template_dir, f)
template_dir = email_config.get("template_dir")
# we need an absolute path, because we change directory after starting (and
# we don't yet know what auxilliary templates like mail.css we will need).
# (Note that loading as package_resources with jinja.PackageLoader doesn't
# work for the same reason.)
if not template_dir:
template_dir = pkg_resources.resource_filename(
'synapse', 'res/templates'
)
template_dir = os.path.abspath(template_dir)
for f in self.email_notif_template_text, self.email_notif_template_html:
p = os.path.join(template_dir, f)
if not os.path.isfile(p):
raise ConfigError("Unable to find email template file %s" % (p, ))
self.email_template_dir = template_dir
self.email_notif_for_new_users = email_config.get(
"notif_for_new_users", True
)
self.email_riot_base_url = email_config.get(
"riot_base_url", None
)
self.email_smtp_user = email_config.get(
"smtp_user", None
)
self.email_smtp_pass = email_config.get(
"smtp_pass", None
)
self.require_transport_security = email_config.get(
"require_transport_security", False
)
if "app_name" in email_config:
self.email_app_name = email_config["app_name"]
else:
self.email_app_name = "Matrix"
# make sure it's valid
parsed = email.utils.parseaddr(self.email_notif_from)
if parsed[1] == '':
raise RuntimeError("Invalid notif_from address")
else:
self.email_enable_notifs = False
# Not much point setting defaults for the rest: it would be an
# error for them to be used.
def default_config(self, config_dir_path, server_name, **kwargs):
return """
# Enable sending emails for password resets, notification events or
# account expiry notices
# Enable sending emails for notification events or expiry notices
# Defining a custom URL for Riot is only needed if email notifications
# should contain links to a self-hosted installation of Riot; when set
# the "app_name" setting is ignored.
#
# If your SMTP server requires authentication, the optional smtp_user &
# smtp_pass variables should be used
@@ -233,62 +137,20 @@ class EmailConfig(Config):
#email:
# enable_notifs: false
# smtp_host: "localhost"
# smtp_port: 25 # SSL: 465, STARTTLS: 587
# smtp_port: 25
# smtp_user: "exampleusername"
# smtp_pass: "examplepassword"
# require_transport_security: False
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
# app_name: Matrix
#
# # Enable email notifications by default
# notif_for_new_users: True
#
# # Defining a custom URL for Riot is only needed if email notifications
# # should contain links to a self-hosted installation of Riot; when set
# # the "app_name" setting is ignored
# riot_base_url: "http://localhost/riot"
#
# # Enable sending password reset emails via the configured, trusted
# # identity servers
# #
# # IMPORTANT! This will give a malicious or overtaken identity server
# # the ability to reset passwords for your users! Make absolutely sure
# # that you want to do this! It is strongly recommended that password
# # reset emails be sent by the homeserver instead
# #
# # If this option is set to false and SMTP options have not been
# # configured, resetting user passwords via email will be disabled
# #trust_identity_server_for_password_resets: false
#
# # Configure the time that a validation email or text message code
# # will expire after sending
# #
# # This is currently used for password resets
# #validation_token_lifetime: 1h
#
# # Template directory. All template files should be stored within this
# # directory
# #
# # if template_dir is unset, uses the example templates that are part of
# # the Synapse distribution.
# #template_dir: res/templates
#
# # Templates for email notifications
# #
# notif_template_html: notif_mail.html
# notif_template_text: notif_mail.txt
#
# # Templates for account expiry notices
# #
# # Templates for account expiry notices.
# expiry_template_html: notice_expiry.html
# expiry_template_text: notice_expiry.txt
#
# # Templates for password reset emails sent by the homeserver
# #
# #password_reset_template_html: password_reset.html
# #password_reset_template_text: password_reset.txt
#
# # Templates for password reset success and failure pages that a user
# # will see after attempting to reset their password
# #
# #password_reset_template_success_html: password_reset_success.html
# #password_reset_template_failure_html: password_reset_failure.html
# notif_for_new_users: True
# riot_base_url: "http://localhost/riot"
"""

View File

@@ -38,7 +38,6 @@ from .server import ServerConfig
from .server_notices_config import ServerNoticesConfig
from .spam_checker import SpamCheckerConfig
from .stats import StatsConfig
from .third_party_event_rules import ThirdPartyRulesConfig
from .tls import TlsConfig
from .user_directory import UserDirectoryConfig
from .voip import VoipConfig
@@ -74,6 +73,5 @@ class HomeServerConfig(
StatsConfig,
ServerNoticesConfig,
RoomDirectoryConfig,
ThirdPartyRulesConfig,
):
pass

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,8 +17,6 @@ import hashlib
import logging
import os
import attr
import jsonschema
from signedjson.key import (
NACL_ED25519,
decode_signing_key_base64,
@@ -35,36 +32,11 @@ from synapse.util.stringutils import random_string, random_string_with_symbols
from ._base import Config, ConfigError
INSECURE_NOTARY_ERROR = """\
Your server is configured to accept key server responses without signature
validation or TLS certificate validation. This is likely to be very insecure. If
you are *sure* you want to do this, set 'accept_keys_insecurely' on the
keyserver configuration."""
RELYING_ON_MATRIX_KEY_ERROR = """\
Your server is configured to accept key server responses without TLS certificate
validation, and which are only signed by the old (possibly compromised)
matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do,
and you should enable 'federation_verify_certificates' in your configuration.
If you are *sure* you want to do this, set 'accept_keys_insecurely' on the
trusted_key_server configuration."""
logger = logging.getLogger(__name__)
@attr.s
class TrustedKeyServer(object):
# string: name of the server.
server_name = attr.ib()
# dict[str,VerifyKey]|None: map from key id to key object, or None to disable
# signature verification.
verify_keys = attr.ib(default=None)
class KeyConfig(Config):
def read_config(self, config):
# the signing key can be specified inline or in a separate file
if "signing_key" in config:
@@ -77,27 +49,16 @@ class KeyConfig(Config):
config.get("old_signing_keys", {})
)
self.key_refresh_interval = self.parse_duration(
config.get("key_refresh_interval", "1d")
config.get("key_refresh_interval", "1d"),
)
# if neither trusted_key_servers nor perspectives are given, use the default.
if "perspectives" not in config and "trusted_key_servers" not in config:
key_servers = [{"server_name": "matrix.org"}]
else:
key_servers = config.get("trusted_key_servers", [])
if not isinstance(key_servers, list):
raise ConfigError(
"trusted_key_servers, if given, must be a list, not a %s"
% (type(key_servers).__name__,)
)
# merge the 'perspectives' config into the 'trusted_key_servers' config.
key_servers.extend(_perspectives_to_key_servers(config))
# list of TrustedKeyServer objects
self.key_servers = list(
_parse_key_servers(key_servers, self.federation_verify_certificates)
self.perspectives = self.read_perspectives(
config.get("perspectives", {}).get("servers", {
"matrix.org": {"verify_keys": {
"ed25519:auto": {
"key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
}
}}
})
)
self.macaroon_secret_key = config.get(
@@ -117,9 +78,8 @@ class KeyConfig(Config):
# falsification of values
self.form_secret = config.get("form_secret", None)
def default_config(
self, config_dir_path, server_name, generate_secrets=False, **kwargs
):
def default_config(self, config_dir_path, server_name, generate_secrets=False,
**kwargs):
base_key_name = os.path.join(config_dir_path, server_name)
if generate_secrets:
@@ -131,8 +91,7 @@ class KeyConfig(Config):
macaroon_secret_key = "# macaroon_secret_key: <PRIVATE STRING>"
form_secret = "# form_secret: <PRIVATE STRING>"
return (
"""\
return """\
# a secret which is used to sign access tokens. If none is specified,
# the registration_shared_secret is used, if one is given; otherwise,
# a secret key is derived from the signing key.
@@ -174,53 +133,33 @@ class KeyConfig(Config):
# The trusted servers to download signing keys from.
#
# When we need to fetch a signing key, each server is tried in parallel.
#
# Normally, the connection to the key server is validated via TLS certificates.
# Additional security can be provided by configuring a `verify key`, which
# will make synapse check that the response is signed by that key.
#
# This setting supercedes an older setting named `perspectives`. The old format
# is still supported for backwards-compatibility, but it is deprecated.
#
# Options for each entry in the list include:
#
# server_name: the name of the server. required.
#
# verify_keys: an optional map from key id to base64-encoded public key.
# If specified, we will check that the response is signed by at least
# one of the given keys.
#
# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset,
# and federation_verify_certificates is not `true`, synapse will refuse
# to start, because this would allow anyone who can spoof DNS responses
# to masquerade as the trusted key server. If you know what you are doing
# and are sure that your network environment provides a secure connection
# to the key server, you can set this to `true` to override this
# behaviour.
#
# An example configuration might look like:
#
#trusted_key_servers:
# - server_name: "my_trusted_server.example.com"
# verify_keys:
# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr"
# - server_name: "my_other_trusted_server.example.com"
#
# The default configuration is:
#
#trusted_key_servers:
# - server_name: "matrix.org"
"""
% locals()
)
#perspectives:
# servers:
# "matrix.org":
# verify_keys:
# "ed25519:auto":
# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
""" % locals()
def read_perspectives(self, perspectives_servers):
servers = {}
for server_name, server_config in perspectives_servers.items():
for key_id, key_data in server_config["verify_keys"].items():
if is_signing_algorithm_supported(key_id):
key_base64 = key_data["key"]
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
servers.setdefault(server_name, {})[key_id] = verify_key
return servers
def read_signing_key(self, signing_key_path):
signing_keys = self.read_file(signing_key_path, "signing_key")
try:
return read_signing_keys(signing_keys.splitlines(True))
except Exception as e:
raise ConfigError("Error reading signing_key: %s" % (str(e)))
raise ConfigError(
"Error reading signing_key: %s" % (str(e))
)
def read_old_signing_keys(self, old_signing_keys):
keys = {}
@@ -243,7 +182,9 @@ class KeyConfig(Config):
if not self.path_exists(signing_key_path):
with open(signing_key_path, "w") as signing_key_file:
key_id = "a_" + random_string(4)
write_signing_keys(signing_key_file, (generate_signing_key(key_id),))
write_signing_keys(
signing_key_file, (generate_signing_key(key_id),),
)
else:
signing_keys = self.read_file(signing_key_path, "signing_key")
if len(signing_keys.split("\n")[0].split()) == 1:
@@ -253,116 +194,6 @@ class KeyConfig(Config):
NACL_ED25519, key_id, signing_keys.split("\n")[0]
)
with open(signing_key_path, "w") as signing_key_file:
write_signing_keys(signing_key_file, (key,))
def _perspectives_to_key_servers(config):
"""Convert old-style 'perspectives' configs into new-style 'trusted_key_servers'
Returns an iterable of entries to add to trusted_key_servers.
"""
# 'perspectives' looks like:
#
# {
# "servers": {
# "matrix.org": {
# "verify_keys": {
# "ed25519:auto": {
# "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
# }
# }
# }
# }
# }
#
# 'trusted_keys' looks like:
#
# [
# {
# "server_name": "matrix.org",
# "verify_keys": {
# "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw",
# }
# }
# ]
perspectives_servers = config.get("perspectives", {}).get("servers", {})
for server_name, server_opts in perspectives_servers.items():
trusted_key_server_entry = {"server_name": server_name}
verify_keys = server_opts.get("verify_keys")
if verify_keys is not None:
trusted_key_server_entry["verify_keys"] = {
key_id: key_data["key"] for key_id, key_data in verify_keys.items()
}
yield trusted_key_server_entry
TRUSTED_KEY_SERVERS_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "schema for the trusted_key_servers setting",
"type": "array",
"items": {
"type": "object",
"properties": {
"server_name": {"type": "string"},
"verify_keys": {
"type": "object",
# each key must be a base64 string
"additionalProperties": {"type": "string"},
},
},
"required": ["server_name"],
},
}
def _parse_key_servers(key_servers, federation_verify_certificates):
try:
jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA)
except jsonschema.ValidationError as e:
raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message)
for server in key_servers:
server_name = server["server_name"]
result = TrustedKeyServer(server_name=server_name)
verify_keys = server.get("verify_keys")
if verify_keys is not None:
result.verify_keys = {}
for key_id, key_base64 in verify_keys.items():
if not is_signing_algorithm_supported(key_id):
raise ConfigError(
"Unsupported signing algorithm on key %s for server %s in "
"trusted_key_servers" % (key_id, server_name)
write_signing_keys(
signing_key_file, (key,),
)
try:
key_bytes = decode_base64(key_base64)
verify_key = decode_verify_key_bytes(key_id, key_bytes)
except Exception as e:
raise ConfigError(
"Unable to parse key %s for server %s in "
"trusted_key_servers: %s" % (key_id, server_name, e)
)
result.verify_keys[key_id] = verify_key
if (
not federation_verify_certificates and
not server.get("accept_keys_insecurely")
):
_assert_keyserver_has_verify_keys(result)
yield result
def _assert_keyserver_has_verify_keys(trusted_key_server):
if not trusted_key_server.verify_keys:
raise ConfigError(INSECURE_NOTARY_ERROR)
# also check that they are not blindly checking the old matrix.org key
if trusted_key_server.server_name == "matrix.org" and any(
key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys
):
raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR)

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2015-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,10 +28,6 @@ class PasswordConfig(Config):
self.password_enabled = password_config.get("enabled", True)
self.password_pepper = password_config.get("pepper", "")
# Password policy
self.password_policy = password_config.get("policy", {})
self.password_policy_enabled = self.password_policy.pop("enabled", False)
def default_config(self, config_dir_path, server_name, **kwargs):
return """\
password_config:
@@ -45,34 +39,4 @@ class PasswordConfig(Config):
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#
#pepper: "EVEN_MORE_SECRET"
# Define and enforce a password policy. Each parameter is optional, boolean
# parameters default to 'false' and integer parameters default to 0.
# This is an early implementation of MSC2000.
#
#policy:
# Whether to enforce the password policy.
#
#enabled: true
# Minimum accepted length for a password.
#
#minimum_length: 15
# Whether a password must contain at least one digit.
#
#require_digit: true
# Whether a password must contain at least one symbol.
# A symbol is any character that's not a number or a letter.
#
#require_symbol: true
# Whether a password must contain at least one lowercase letter.
#
#require_lowercase: true
# Whether a password must contain at least one lowercase letter.
#
#require_uppercase: true
"""

View File

@@ -13,11 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from distutils.util import strtobool
import pkg_resources
from synapse.config._base import Config, ConfigError
from synapse.types import RoomAlias
from synapse.util.stringutils import random_string_with_symbols
@@ -42,38 +39,8 @@ class AccountValidityConfig(Config):
else:
self.renew_email_subject = "Renew your %(app)s account"
self.startup_job_max_delta = self.period * 10. / 100.
if self.renew_by_email_enabled:
if "public_baseurl" not in synapse_config:
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
template_dir = config.get("template_dir")
if not template_dir:
template_dir = pkg_resources.resource_filename("synapse", "res/templates")
if "account_renewed_html_path" in config:
file_path = os.path.join(template_dir, config["account_renewed_html_path"])
self.account_renewed_html_content = self.read_file(
file_path, "account_validity.account_renewed_html_path"
)
else:
self.account_renewed_html_content = (
"<html><body>Your account has been successfully renewed.</body><html>"
)
if "invalid_token_html_path" in config:
file_path = os.path.join(template_dir, config["invalid_token_html_path"])
self.invalid_token_html_content = self.read_file(
file_path, "account_validity.invalid_token_html_path"
)
else:
self.invalid_token_html_content = (
"<html><body>Invalid renewal token.</body><html>"
)
if self.renew_by_email_enabled and "public_baseurl" not in synapse_config:
raise ConfigError("Can't send renewal emails without 'public_baseurl'")
class RegistrationConfig(Config):
@@ -93,19 +60,8 @@ class RegistrationConfig(Config):
self.registrations_require_3pid = config.get("registrations_require_3pid", [])
self.allowed_local_3pids = config.get("allowed_local_3pids", [])
self.check_is_for_allowed_local_3pids = config.get(
"check_is_for_allowed_local_3pids", None
)
self.allow_invited_3pids = config.get("allow_invited_3pids", False)
self.disable_3pid_changes = config.get("disable_3pid_changes", False)
self.enable_3pid_lookup = config.get("enable_3pid_lookup", True)
self.registration_shared_secret = config.get("registration_shared_secret")
self.register_mxid_from_3pid = config.get("register_mxid_from_3pid")
self.register_just_use_email_for_display_name = config.get(
"register_just_use_email_for_display_name", False,
)
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
self.trusted_third_party_id_servers = config.get(
@@ -125,16 +81,6 @@ class RegistrationConfig(Config):
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
self.disable_set_displayname = config.get("disable_set_displayname", False)
self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
self.replicate_user_profiles_to = config.get("replicate_user_profiles_to", [])
if not isinstance(self.replicate_user_profiles_to, list):
self.replicate_user_profiles_to = [self.replicate_user_profiles_to, ]
self.shadow_server = config.get("shadow_server", None)
self.rewrite_identity_server_urls = config.get("rewrite_identity_server_urls", {})
self.disable_msisdn_registration = (
config.get("disable_msisdn_registration", False)
)
@@ -183,25 +129,13 @@ class RegistrationConfig(Config):
# This means that, if a validity period is set, and Synapse is restarted (it will
# then derive an expiration date from the current validity period), and some time
# after that the validity period changes and Synapse is restarted, the users'
# expiration dates won't be updated unless their account is manually renewed. This
# date will be randomly selected within a range [now + period - d ; now + period],
# where d is equal to 10%% of the validity period.
# expiration dates won't be updated unless their account is manually renewed.
#
#account_validity:
# enabled: True
# period: 6w
# renew_at: 1w
# renew_email_subject: "Renew your %%(app)s account"
# # Directory in which Synapse will try to find the HTML files to serve to the
# # user when trying to renew an account. Optional, defaults to
# # synapse/res/templates.
# template_dir: "res/templates"
# # HTML to be displayed to the user after they successfully renewed their
# # account. Optional.
# account_renewed_html_path: "account_renewed.html"
# # HTML to be displayed when the user tries to renew an account with an invalid
# # renewal token. Optional.
# invalid_token_html_path: "invalid_token.html"
# The user must provide all of the below types of 3PID when registering.
#
@@ -214,32 +148,9 @@ class RegistrationConfig(Config):
#
#disable_msisdn_registration: true
# Derive the user's matrix ID from a type of 3PID used when registering.
# This overrides any matrix ID the user proposes when calling /register
# The 3PID type should be present in registrations_require_3pid to avoid
# users failing to register if they don't specify the right kind of 3pid.
#
#register_mxid_from_3pid: email
# Uncomment to set the display name of new users to their email address,
# rather than using the default heuristic.
#
#register_just_use_email_for_display_name: true
# Mandate that users are only allowed to associate certain formats of
# 3PIDs with accounts on this server.
#
# Use an Identity Server to establish which 3PIDs are allowed to register?
# Overrides allowed_local_3pids below.
#
#check_is_for_allowed_local_3pids: matrix.org
#
# If you are using an IS you can also check whether that IS registers
# pending invites for the given 3PID (and then allow it to sign up on
# the platform):
#
#allow_invited_3pids: False
#
#allowed_local_3pids:
# - medium: email
# pattern: '.*@matrix\\.org'
@@ -248,11 +159,6 @@ class RegistrationConfig(Config):
# - medium: msisdn
# pattern: '\\+44'
# If true, stop users from trying to change the 3PIDs associated with
# their accounts.
#
#disable_3pid_changes: False
# Enable 3PIDs lookup requests to identity servers from this server.
#
#enable_3pid_lookup: true
@@ -294,30 +200,6 @@ class RegistrationConfig(Config):
# - matrix.org
# - vector.im
# If enabled, user IDs, display names and avatar URLs will be replicated
# to this server whenever they change.
# This is an experimental API currently implemented by sydent to support
# cross-homeserver user directories.
#
#replicate_user_profiles_to: example.com
# If specified, attempt to replay registrations, profile changes & 3pid
# bindings on the given target homeserver via the AS API. The HS is authed
# via a given AS token.
#
#shadow_server:
# hs_url: https://shadow.example.com
# hs: shadow.example.com
# as_token: 12u394refgbdhivsia
# If enabled, don't let users set their own display names/avatars
# other than for the very first time (unless they are a server admin).
# Useful when provisioning users based on the contents of a 3rd party
# directory and to avoid ambiguities.
#
#disable_set_displayname: False
#disable_set_avatar_url: False
# Users who register on this homeserver will automatically be joined
# to these rooms
#

View File

@@ -20,7 +20,6 @@ import os.path
from netaddr import IPSet
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.python_dependencies import DependencyException, check_requirements
@@ -36,8 +35,6 @@ logger = logging.Logger(__name__)
# in the list.
DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0']
DEFAULT_ROOM_VERSION = "4"
class ServerConfig(Config):
@@ -84,49 +81,13 @@ class ServerConfig(Config):
"require_auth_for_profile_requests", False,
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
):
raise ConfigError(
"Can't use 'restrict_public_rooms_to_local_users' if"
" 'allow_public_rooms_without_auth' and/or"
" 'allow_public_rooms_over_federation' is set."
)
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
# flag is now obsolete but we need to check it for backward-compatibility.
if config.get("restrict_public_rooms_to_local_users", False):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
# If set to 'False', requires authentication to access the server's public
# rooms directory through the client API. Defaults to 'True'.
self.allow_public_rooms_without_auth = config.get(
"allow_public_rooms_without_auth", True
)
# If set to 'False', forbids any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'True'.
self.allow_public_rooms_over_federation = config.get(
"allow_public_rooms_over_federation", True
)
default_room_version = config.get(
"default_room_version", DEFAULT_ROOM_VERSION,
# If set to 'True', requires authentication to access the server's
# public rooms directory through the client API, and forbids any other
# homeserver to fetch it via federation.
self.restrict_public_rooms_to_local_users = config.get(
"restrict_public_rooms_to_local_users", False,
)
# Ensure room version is a str
default_room_version = str(default_room_version)
if default_room_version not in KNOWN_ROOM_VERSIONS:
raise ConfigError(
"Unknown default_room_version: %s, known room versions: %s" %
(default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
)
# Get the actual room version object rather than just the identifier
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
# whether to enable search. If disabled, new entries will not be inserted
# into the search tables and they will not be indexed. Users will receive
# errors when attempting to search for messages.
@@ -222,121 +183,6 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
# Whether to show the users on this homeserver in the user directory. Defaults to
# True.
self.show_users_in_user_directory = config.get(
"show_users_in_user_directory", True,
)
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
self.retention_allowed_lifetime_min = retention_config.get("allowed_lifetime_min")
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get("allowed_lifetime_max")
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min > self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append({
"interval": interval,
"shortest_max_lifetime": shortest_max_lifetime,
"longest_max_lifetime": longest_max_lifetime,
})
if not self.retention_purge_jobs:
self.retention_purge_jobs = [{
"interval": self.parse_duration("1d"),
"shortest_max_lifetime": None,
"longest_max_lifetime": None,
}]
self.listeners = []
for listener in config.get("listeners", []):
if not isinstance(listener.get("port", None), int):
@@ -464,10 +310,6 @@ class ServerConfig(Config):
unsecure_port = 8008
pid_file = os.path.join(data_dir_path, "homeserver.pid")
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
# default config string
default_room_version = DEFAULT_ROOM_VERSION
return """\
## Server ##
@@ -536,25 +378,11 @@ class ServerConfig(Config):
#
#require_auth_for_profile_requests: true
# If set to 'false', requires authentication to access the server's public rooms
# directory through the client API. Defaults to 'true'.
# If set to 'true', requires authentication to access the server's
# public rooms directory through the client API, and forbids any other
# homeserver to fetch it via federation. Defaults to 'false'.
#
#allow_public_rooms_without_auth: false
# If set to 'false', forbids any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'true'.
#
#allow_public_rooms_over_federation: false
# The default room version for newly created rooms.
#
# Known room versions are listed here:
# https://matrix.org/docs/spec/#complete-list-of-room-versions
#
# For example, for room version 1, default_room_version should be set
# to "1".
#
#default_room_version: "%(default_room_version)s"
#restrict_public_rooms_to_local_users: true
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
@@ -724,22 +552,6 @@ class ServerConfig(Config):
# Monthly Active User Blocking
#
# Used in cases where the admin or server owner wants to limit to the
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
# anabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
# the server will start blocking user actions.
#
# 'mau_trial_days' is a means to add a grace period for active users. It
# means that users must be active for this number of days before they
# can be considered active and guards against the case where lots of users
# sign up in a short space of time never to return after their initial
# session.
#
#limit_usage_by_mau: False
#max_mau_value: 50
#mau_trial_days: 2
@@ -770,74 +582,6 @@ class ServerConfig(Config):
# Defaults to 'true'.
#
#allow_per_room_profiles: false
# Whether to show the users on this homeserver in the user directory. Defaults to
# 'true'.
#
#show_users_in_user_directory: false
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, a user won't be able to send a
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
# that's not within this range. This is especially useful in closed federations,
# in which server admins can make sure every federating server applies the same
# rules.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a very frequent basis (e.g. every 5min), but not want
# that purge to be performed by a job that's iterating over every room it knows,
# which would be quite heavy on the server.
#
#purge_jobs:
# - shortest_max_lifetime: 1d
# longest_max_lifetime: 3d
# interval: 5m:
# - shortest_max_lifetime: 3d
# longest_max_lifetime: 1y
# interval: 24h
""" % locals()
def read_arguments(self, args):

View File

@@ -1,42 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.module_loader import load_module
from ._base import Config
class ThirdPartyRulesConfig(Config):
def read_config(self, config):
self.third_party_event_rules = None
provider = config.get("third_party_event_rules", None)
if provider is not None:
self.third_party_event_rules = load_module(provider)
def default_config(self, **kwargs):
return """\
# Server admins can define a Python module that implements extra rules for
# allowing or denying incoming events. In order to work, this module needs to
# override the methods defined in synapse/events/third_party_rules.py.
#
# This feature is designed to be used in closed federations only, where each
# participating server enforces the same rules.
#
#third_party_event_rules:
# module: "my_custom_project.SuperRulesSet"
# config:
# example_option: 'things'
"""

View File

@@ -74,7 +74,7 @@ class TlsConfig(Config):
# Whether to verify certificates on outbound federation traffic
self.federation_verify_certificates = config.get(
"federation_verify_certificates", True,
"federation_verify_certificates", False,
)
# Whitelist of domains to not verify certificates for
@@ -107,7 +107,7 @@ class TlsConfig(Config):
certs = []
for ca_file in custom_ca_list:
logger.debug("Reading custom CA certificate file: %s", ca_file)
content = self.read_file(ca_file, "federation_custom_ca_list")
content = self.read_file(ca_file)
# Parse the CA certificates
try:
@@ -241,12 +241,12 @@ class TlsConfig(Config):
#
#tls_private_key_path: "%(tls_private_key_path)s"
# Whether to verify TLS server certificates for outbound federation requests.
# Whether to verify TLS certificates when sending federation traffic.
#
# Defaults to `true`. To disable certificate verification, uncomment the
# following line.
# This currently defaults to `false`, however this will change in
# Synapse 1.0 when valid federation certificates will be required.
#
#federation_verify_certificates: false
#federation_verify_certificates: true
# Skip federation certificate verification on the following whitelist
# of domains.

View File

@@ -24,7 +24,6 @@ class UserDirectoryConfig(Config):
def read_config(self, config):
self.user_directory_search_enabled = True
self.user_directory_search_all_users = False
self.user_directory_defer_to_id_server = None
user_directory_config = config.get("user_directory", None)
if user_directory_config:
self.user_directory_search_enabled = (
@@ -33,9 +32,6 @@ class UserDirectoryConfig(Config):
self.user_directory_search_all_users = (
user_directory_config.get("search_all_users", False)
)
self.user_directory_defer_to_id_server = (
user_directory_config.get("defer_to_id_server", None)
)
def default_config(self, config_dir_path, server_name, **kwargs):
return """
@@ -47,16 +43,11 @@ class UserDirectoryConfig(Config):
#
# 'search_all_users' defines whether to search all users visible to your HS
# when searching the user directory, rather than limiting to users visible
# in public rooms. Defaults to false. If you set it True, you'll have to
# rebuild the user_directory search indexes, see
# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md
# in public rooms. Defaults to false. If you set it True, you'll have to run
# UPDATE user_directory_stream_pos SET stream_id = NULL;
# on your database to tell it to rebuild the user_directory search indexes.
#
#user_directory:
# enabled: true
# search_all_users: false
#
# # If this is set, user search will be delegated to this ID server instead
# # of synapse performing the search itself.
# # This is an experimental API.
# defer_to_id_server: https://id.example.com
"""

View File

@@ -15,13 +15,10 @@
import logging
import idna
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname, verify_ip_address
from zope.interface import implementer
from OpenSSL import SSL, crypto
from twisted.internet._sslverify import _defaultCurveName
from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName
from twisted.internet.abstract import isIPAddress, isIPv6Address
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust
@@ -59,19 +56,79 @@ class ServerContextFactory(ContextFactory):
return self._context
class ClientTLSOptionsFactory(object):
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
to remote servers for federation.
Uses one of two OpenSSL context objects for all connections, depending on whether
we should do SSL certificate verification.
get_options decides whether we should do SSL certificate verification and
constructs an SSLClientConnectionCreator factory accordingly.
def _idnaBytes(text):
"""
Convert some text typed by a human into some ASCII bytes. This is a
copy of twisted.internet._idna._idnaBytes. For documentation, see the
twisted documentation.
"""
try:
import idna
except ImportError:
return text.encode("idna")
else:
return idna.encode(text)
def _tolerateErrors(wrapped):
"""
Wrap up an info_callback for pyOpenSSL so that if something goes wrong
the error is immediately logged and the connection is dropped if possible.
This is a copy of twisted.internet._sslverify._tolerateErrors. For
documentation, see the twisted documentation.
"""
def infoCallback(connection, where, ret):
try:
return wrapped(connection, where, ret)
except: # noqa: E722, taken from the twisted implementation
f = Failure()
logger.exception("Error during info_callback")
connection.get_app_data().failVerification(f)
return infoCallback
@implementer(IOpenSSLClientConnectionCreator)
class ClientTLSOptionsNoVerify(object):
"""
Client creator for TLS without certificate identity verification. This is a
copy of twisted.internet._sslverify.ClientTLSOptions with the identity
verification left out. For documentation, see the twisted documentation.
"""
def __init__(self, hostname, ctx):
self._ctx = ctx
if isIPAddress(hostname) or isIPv6Address(hostname):
self._hostnameBytes = hostname.encode('ascii')
self._sendSNI = False
else:
self._hostnameBytes = _idnaBytes(hostname)
self._sendSNI = True
ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback))
def clientConnectionForTLS(self, tlsProtocol):
context = self._ctx
connection = SSL.Connection(context, None)
connection.set_app_data(tlsProtocol)
return connection
def _identityVerifyingInfoCallback(self, connection, where, ret):
# Literal IPv4 and IPv6 addresses are not permitted
# as host names according to the RFCs
if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI:
connection.set_tlsext_host_name(self._hostnameBytes)
class ClientTLSOptionsFactory(object):
"""Factory for Twisted ClientTLSOptions that are used to make connections
to remote servers for federation."""
def __init__(self, config):
self._config = config
self._options_noverify = CertificateOptions()
# Check if we're using a custom list of a CA certificates
trust_root = config.federation_ca_trust_root
@@ -79,13 +136,11 @@ class ClientTLSOptionsFactory(object):
# Use CA root certs provided by OpenSSL
trust_root = platformTrust()
self._verify_ssl_context = CertificateOptions(trustRoot=trust_root).getContext()
self._verify_ssl_context.set_info_callback(self._context_info_cb)
self._no_verify_ssl_context = CertificateOptions().getContext()
self._no_verify_ssl_context.set_info_callback(self._context_info_cb)
self._options_verify = CertificateOptions(trustRoot=trust_root)
def get_options(self, host):
# Use _makeContext so that we get a fresh OpenSSL CTX each time.
# Check if certificate verification has been enabled
should_verify = self._config.federation_verify_certificates
@@ -96,93 +151,6 @@ class ClientTLSOptionsFactory(object):
should_verify = False
break
ssl_context = (
self._verify_ssl_context if should_verify else self._no_verify_ssl_context
)
return SSLClientConnectionCreator(host, ssl_context, should_verify)
@staticmethod
def _context_info_cb(ssl_connection, where, ret):
"""The 'information callback' for our openssl context object."""
# we assume that the app_data on the connection object has been set to
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
tls_protocol = ssl_connection.get_app_data()
try:
# ... we further assume that SSLClientConnectionCreator has set the
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
tls_protocol._synapse_tls_verifier.verify_context_info_cb(
ssl_connection, where
)
except: # noqa: E722, taken from the twisted implementation
logger.exception("Error during info_callback")
f = Failure()
tls_protocol.failVerification(f)
@implementer(IOpenSSLClientConnectionCreator)
class SSLClientConnectionCreator(object):
"""Creates openssl connection objects for client connections.
Replaces twisted.internet.ssl.ClientTLSOptions
"""
def __init__(self, hostname, ctx, verify_certs):
self._ctx = ctx
self._verifier = ConnectionVerifier(hostname, verify_certs)
def clientConnectionForTLS(self, tls_protocol):
context = self._ctx
connection = SSL.Connection(context, None)
# as per twisted.internet.ssl.ClientTLSOptions, we set the application
# data to our TLSMemoryBIOProtocol...
connection.set_app_data(tls_protocol)
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
# tls_protocol so that the SSL context's info callback has something to
# call to do the cert verification.
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
return connection
class ConnectionVerifier(object):
"""Set the SNI, and do cert verification
This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by
the ssl context's info callback.
"""
# This code is based on twisted.internet.ssl.ClientTLSOptions.
def __init__(self, hostname, verify_certs):
self._verify_certs = verify_certs
if isIPAddress(hostname) or isIPv6Address(hostname):
self._hostnameBytes = hostname.encode("ascii")
self._is_ip_address = True
else:
# twisted's ClientTLSOptions falls back to the stdlib impl here if
# idna is not installed, but points out that lacks support for
# IDNA2008 (http://bugs.python.org/issue17305).
#
# We can rely on having idna.
self._hostnameBytes = idna.encode(hostname)
self._is_ip_address = False
self._hostnameASCII = self._hostnameBytes.decode("ascii")
def verify_context_info_cb(self, ssl_connection, where):
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs:
try:
if self._is_ip_address:
verify_ip_address(ssl_connection, self._hostnameASCII)
else:
verify_hostname(ssl_connection, self._hostnameASCII)
except VerificationError:
f = Failure()
tls_protocol = ssl_connection.get_app_data()
tls_protocol.failVerification(f)
if should_verify:
return ClientTLSOptions(host, self._options_verify._makeContext())
return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext())

View File

@@ -31,11 +31,7 @@ logger = logging.getLogger(__name__)
def check_event_content_hash(event, hash_algorithm=hashlib.sha256):
"""Check whether the hash for this PDU matches the contents"""
name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm)
logger.debug(
"Verifying content hash on %s (expecting: %s)",
event.event_id,
encode_base64(expected_hash),
)
logger.debug("Expecting hash: %s", encode_base64(expected_hash))
# some malformed events lack a 'hashes'. Protect against it being missing
# or a weird type by basically treating it the same as an unhashed event.

File diff suppressed because it is too large Load Diff

View File

@@ -76,7 +76,6 @@ class EventBuilder(object):
# someone tries to get them when they don't exist.
_state_key = attr.ib(default=None)
_redacts = attr.ib(default=None)
_origin_server_ts = attr.ib(default=None)
internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({})))
@@ -143,9 +142,6 @@ class EventBuilder(object):
if self._redacts is not None:
event_dict["redacts"] = self._redacts
if self._origin_server_ts is not None:
event_dict["origin_server_ts"] = self._origin_server_ts
defer.returnValue(
create_local_event_from_event_dict(
clock=self._clock,
@@ -213,7 +209,6 @@ class EventBuilderFactory(object):
content=key_values.get("content", {}),
unsigned=key_values.get("unsigned", {}),
redacts=key_values.get("redacts", None),
origin_server_ts=key_values.get("origin_server_ts", None),
)
@@ -250,7 +245,7 @@ def create_local_event_from_event_dict(clock, hostname, signing_key,
event_dict["event_id"] = _create_event_id(clock, hostname)
event_dict["origin"] = hostname
event_dict.setdefault("origin_server_ts", time_now)
event_dict["origin_server_ts"] = time_now
event_dict.setdefault("unsigned", {})
age = event_dict["unsigned"].pop("age", 0)

View File

@@ -46,26 +46,13 @@ class SpamChecker(object):
return self.spam_checker.check_event_for_spam(event)
def user_may_invite(self, inviter_userid, invitee_userid, third_party_invite,
room_id, new_room, published_room):
def user_may_invite(self, inviter_userid, invitee_userid, room_id):
"""Checks if a given user may send an invite
If this method returns false, the invite will be rejected.
Args:
inviter_userid (str)
invitee_userid (str|None): The user ID of the invitee. Is None
if this is a third party invite and the 3PID is not bound to a
user ID.
third_party_invite (dict|None): If a third party invite then is a
dict containing the medium and address of the invitee.
room_id (str)
new_room (bool): Whether the user is being invited to the room as
part of a room creation, if so the invitee would have been
included in the call to `user_may_create_room`.
published_room (bool): Whether the room the user is being invited
to has been published in the local homeserver's public room
directory.
userid (string): The sender's user ID
Returns:
bool: True if the user may send an invite, otherwise False
@@ -73,25 +60,15 @@ class SpamChecker(object):
if self.spam_checker is None:
return True
return self.spam_checker.user_may_invite(
inviter_userid, invitee_userid, third_party_invite, room_id, new_room,
published_room,
)
return self.spam_checker.user_may_invite(inviter_userid, invitee_userid, room_id)
def user_may_create_room(self, userid, invite_list, third_party_invite_list,
cloning):
def user_may_create_room(self, userid):
"""Checks if a given user may create a room
If this method returns false, the creation request will be rejected.
Args:
userid (string): The sender's user ID
invite_list (list[str]): List of user IDs that would be invited to
the new room.
third_party_invite_list (list[dict]): List of third party invites
for the new room.
cloning (bool): Whether the user is cloning an existing room, e.g.
upgrading a room.
Returns:
bool: True if the user may create a room, otherwise False
@@ -99,9 +76,7 @@ class SpamChecker(object):
if self.spam_checker is None:
return True
return self.spam_checker.user_may_create_room(
userid, invite_list, third_party_invite_list, cloning,
)
return self.spam_checker.user_may_create_room(userid)
def user_may_create_room_alias(self, userid, room_alias):
"""Checks if a given user may create a room alias
@@ -136,21 +111,3 @@ class SpamChecker(object):
return True
return self.spam_checker.user_may_publish_room(userid, room_id)
def user_may_join_room(self, userid, room_id, is_invited):
"""Checks if a given users is allowed to join a room.
Is not called when the user creates a room.
Args:
userid (str)
room_id (str)
is_invited (bool): Whether the user is invited into the room
Returns:
bool: Whether the user may join the room
"""
if self.spam_checker is None:
return True
return self.spam_checker.user_may_join_room(userid, room_id, is_invited)

View File

@@ -1,114 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
class ThirdPartyEventRules(object):
"""Allows server admins to provide a Python module implementing an extra
set of rules to apply when processing events.
This is designed to help admins of closed federations with enforcing custom
behaviours.
"""
def __init__(self, hs):
self.third_party_rules = None
self.store = hs.get_datastore()
module = None
config = None
if hs.config.third_party_event_rules:
module, config = hs.config.third_party_event_rules
if module is not None:
self.third_party_rules = module(
config=config,
http_client=hs.get_simple_http_client(),
)
@defer.inlineCallbacks
def check_event_allowed(self, event, context):
"""Check if a provided event should be allowed in the given context.
Args:
event (synapse.events.EventBase): The event to be checked.
context (synapse.events.snapshot.EventContext): The context of the event.
Returns:
defer.Deferred[bool]: True if the event should be allowed, False if not.
"""
if self.third_party_rules is None:
defer.returnValue(True)
prev_state_ids = yield context.get_prev_state_ids(self.store)
# Retrieve the state events from the database.
state_events = {}
for key, event_id in prev_state_ids.items():
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
defer.returnValue(ret)
@defer.inlineCallbacks
def on_create_room(self, requester, config, is_requester_admin):
"""Intercept requests to create room to allow, deny or update the
request config.
Args:
requester (Requester)
config (dict): The creation config from the client.
is_requester_admin (bool): If the requester is an admin
Returns:
defer.Deferred
"""
if self.third_party_rules is None:
return
yield self.third_party_rules.on_create_room(
requester, config, is_requester_admin
)
@defer.inlineCallbacks
def check_threepid_can_be_invited(self, medium, address, room_id):
"""Check if a provided 3PID can be invited in the given room.
Args:
medium (str): The 3PID's medium.
address (str): The 3PID's address.
room_id (str): The room we want to invite the threepid to.
Returns:
defer.Deferred[bool], True if the 3PID can be invited, False if not.
"""
if self.third_party_rules is None:
defer.returnValue(True)
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
room_state_events = yield self.store.get_events(state_ids.values())
state_events = {}
for key, event_id in state_ids.items():
state_events[key] = room_state_events[event_id]
ret = yield self.third_party_rules.check_threepid_can_be_invited(
medium, address, state_events,
)
defer.returnValue(ret)

View File

@@ -330,13 +330,12 @@ class EventClientSerializer(object):
)
@defer.inlineCallbacks
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
def serialize_event(self, event, time_now, **kwargs):
"""Serializes a single event.
Args:
event (EventBase)
time_now (int): The current time in milliseconds
bundle_aggregations (bool): Whether to bundle in related events
**kwargs: Arguments to pass to `serialize_event`
Returns:
@@ -351,7 +350,7 @@ class EventClientSerializer(object):
# If MSC1849 is enabled then we need to look if thre are any relations
# we need to bundle in with the event
if self.experimental_msc1849_support_enabled and bundle_aggregations:
if self.experimental_msc1849_support_enabled:
annotations = yield self.store.get_aggregation_groups_for_event(
event_id,
)

View File

@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from six import integer_types, string_types
from six import string_types
from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@@ -22,12 +22,11 @@ from synapse.types import EventID, RoomID, UserID
class EventValidator(object):
def validate_new(self, event, config):
def validate_new(self, event):
"""Validates the event has roughly the right format
Args:
event (FrozenEvent): The event to validate.
config (Config): The homeserver's configuration.
event (FrozenEvent)
"""
self.validate_builder(event)
@@ -68,99 +67,6 @@ class EventValidator(object):
Codes.INVALID_PARAM,
)
if event.type == EventTypes.Retention:
self._validate_retention(event, config)
def _validate_retention(self, event, config):
"""Checks that an event that defines the retention policy for a room respects the
boundaries imposed by the server's administrator.
Args:
event (FrozenEvent): The event to validate.
config (Config): The homeserver's configuration.
"""
min_lifetime = event.content.get("min_lifetime")
max_lifetime = event.content.get("max_lifetime")
if min_lifetime is not None:
if not isinstance(min_lifetime, integer_types):
raise SynapseError(
code=400,
msg="'min_lifetime' must be an integer",
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_min is not None
and min_lifetime < config.retention_allowed_lifetime_min
):
raise SynapseError(
code=400,
msg=(
"'min_lifetime' can't be lower than the minimum allowed"
" value enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_max is not None
and min_lifetime > config.retention_allowed_lifetime_max
):
raise SynapseError(
code=400,
msg=(
"'min_lifetime' can't be greater than the maximum allowed"
" value enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if max_lifetime is not None:
if not isinstance(max_lifetime, integer_types):
raise SynapseError(
code=400,
msg="'max_lifetime' must be an integer",
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_min is not None
and max_lifetime < config.retention_allowed_lifetime_min
):
raise SynapseError(
code=400,
msg=(
"'max_lifetime' can't be lower than the minimum allowed value"
" enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if (
config.retention_allowed_lifetime_max is not None
and max_lifetime > config.retention_allowed_lifetime_max
):
raise SynapseError(
code=400,
msg=(
"'max_lifetime' can't be greater than the maximum allowed"
" value enforced by the server's administrator"
),
errcode=Codes.BAD_JSON,
)
if (
min_lifetime is not None
and max_lifetime is not None
and min_lifetime > max_lifetime
):
raise SynapseError(
code=400,
msg="'min_lifetime' can't be greater than 'max_lifetime",
errcode=Codes.BAD_JSON,
)
def validate_builder(self, event):
"""Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the

View File

@@ -223,6 +223,9 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
the signatures are valid, or fail (with a SynapseError) if not.
"""
# (currently this is written assuming the v1 room structure; we'll probably want a
# separate function for checking v2 rooms)
# we want to check that the event is signed by:
#
# (a) the sender's server
@@ -254,10 +257,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
for p in pdus
]
v = KNOWN_ROOM_VERSIONS.get(room_version)
if not v:
raise RuntimeError("Unrecognized room version %s" % (room_version,))
# First we check that the sender event is signed by the sender's domain
# (except if its a 3pid invite, in which case it may be sent by any server)
pdus_to_check_sender = [
@@ -265,17 +264,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
if not _is_invite_via_3pid(p.pdu)
]
more_deferreds = keyring.verify_json_objects_for_server(
[
(
p.sender_domain,
p.redacted_pdu_json,
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
p.pdu.event_id,
)
for p in pdus_to_check_sender
]
)
more_deferreds = keyring.verify_json_objects_for_server([
(p.sender_domain, p.redacted_pdu_json)
for p in pdus_to_check_sender
])
def sender_err(e, pdu_to_check):
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
@@ -295,23 +287,20 @@ def _check_sigs_on_pdus(keyring, room_version, pdus):
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
# (ie, the room version uses old-style non-hash event IDs).
v = KNOWN_ROOM_VERSIONS.get(room_version)
if not v:
raise RuntimeError("Unrecognized room version %s" % (room_version,))
if v.event_format == EventFormatVersions.V1:
pdus_to_check_event_id = [
p for p in pdus_to_check
if p.sender_domain != get_domain_from_id(p.pdu.event_id)
]
more_deferreds = keyring.verify_json_objects_for_server(
[
(
get_domain_from_id(p.pdu.event_id),
p.redacted_pdu_json,
p.pdu.origin_server_ts if v.enforce_key_validity else 0,
p.pdu.event_id,
)
for p in pdus_to_check_event_id
]
)
more_deferreds = keyring.verify_json_objects_for_server([
(get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json)
for p in pdus_to_check_event_id
])
def event_err(e, pdu_to_check):
errmsg = (

View File

@@ -17,6 +17,7 @@
import copy
import itertools
import logging
import random
from six.moves import range
@@ -232,8 +233,7 @@ class FederationClient(FederationBase):
moving to the next destination. None indicates no timeout.
Returns:
Deferred: Results in the requested PDU, or None if we were unable to find
it.
Deferred: Results in the requested PDU.
"""
# TODO: Rate limit the number of times we try and get the same event.
@@ -258,12 +258,7 @@ class FederationClient(FederationBase):
destination, event_id, timeout=timeout,
)
logger.debug(
"retrieved event id %s from %s: %r",
event_id,
destination,
transaction_data,
)
logger.debug("transaction_data %r", transaction_data)
pdu_list = [
event_from_pdu_json(p, format_ver, outlier=outlier)
@@ -285,7 +280,6 @@ class FederationClient(FederationBase):
"Failed to get PDU %s from %s because %s",
event_id, destination, e,
)
continue
except NotRetryingDestination as e:
logger.info(str(e))
continue
@@ -332,16 +326,12 @@ class FederationClient(FederationBase):
state_event_ids = result["pdu_ids"]
auth_event_ids = result.get("auth_chain_ids", [])
fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest(
destination, room_id, set(state_event_ids + auth_event_ids)
fetched_events, failed_to_fetch = yield self.get_events(
[destination], room_id, set(state_event_ids + auth_event_ids)
)
if failed_to_fetch:
logger.warning(
"Failed to fetch missing state/auth events for %s: %s",
room_id,
failed_to_fetch
)
logger.warn("Failed to get %r", failed_to_fetch)
event_map = {
ev.event_id: ev for ev in fetched_events
@@ -407,20 +397,27 @@ class FederationClient(FederationBase):
defer.returnValue((signed_pdus, signed_auth))
@defer.inlineCallbacks
def get_events_from_store_or_dest(self, destination, room_id, event_ids):
"""Fetch events from a remote destination, checking if we already have them.
def get_events(self, destinations, room_id, event_ids, return_local=True):
"""Fetch events from some remote destinations, checking if we already
have them.
Args:
destination (str)
destinations (list)
room_id (str)
event_ids (list)
return_local (bool): Whether to include events we already have in
the DB in the returned list of events
Returns:
Deferred: A deferred resolving to a 2-tuple where the first is a list of
events and the second is a list of event ids that we failed to fetch.
"""
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
signed_events = list(seen_events.values())
if return_local:
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
signed_events = list(seen_events.values())
else:
seen_events = yield self.store.have_seen_events(event_ids)
signed_events = []
failed_to_fetch = set()
@@ -431,11 +428,10 @@ class FederationClient(FederationBase):
if not missing_events:
defer.returnValue((signed_events, failed_to_fetch))
logger.debug(
"Fetching unknown state/auth events %s for room %s",
missing_events,
event_ids,
)
def random_server_list():
srvs = list(destinations)
random.shuffle(srvs)
return srvs
room_version = yield self.store.get_room_version(room_id)
@@ -447,7 +443,7 @@ class FederationClient(FederationBase):
deferreds = [
run_in_background(
self.get_pdu,
destinations=[destination],
destinations=random_server_list(),
event_id=e_id,
room_version=room_version,
)

View File

@@ -349,10 +349,9 @@ class PerDestinationQueue(object):
@defer.inlineCallbacks
def _get_new_device_messages(self, limit):
last_device_list = self._last_device_list_stream_id
# Retrieve list of new device updates to send to the destination
# Will return at most 20 entries
now_stream_id, results = yield self._store.get_devices_by_remote(
self._destination, last_device_list, limit=limit,
self._destination, last_device_list
)
edus = [
Edu(

View File

@@ -23,11 +23,7 @@ from twisted.internet import defer
import synapse
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX,
FEDERATION_V1_PREFIX,
FEDERATION_V2_PREFIX,
)
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
from synapse.http.endpoint import parse_and_validate_server_name
from synapse.http.server import JsonResource
from synapse.http.servlet import (
@@ -94,7 +90,6 @@ class NoAuthenticationError(AuthenticationError):
class Authenticator(object):
def __init__(self, hs):
self._clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.server_name = hs.hostname
self.store = hs.get_datastore()
@@ -103,7 +98,6 @@ class Authenticator(object):
# A method just so we can pass 'self' as the authenticator to the Servlets
@defer.inlineCallbacks
def authenticate_request(self, request, content):
now = self._clock.time_msec()
json_request = {
"method": request.method.decode('ascii'),
"uri": request.uri.decode('ascii'),
@@ -140,9 +134,7 @@ class Authenticator(object):
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
)
yield self.keyring.verify_json_for_server(
origin, json_request, now, "Incoming request"
)
yield self.keyring.verify_json_for_server(origin, json_request)
logger.info("Request from %s", origin)
request.authenticated_entity = origin
@@ -720,15 +712,15 @@ class PublicRoomList(BaseFederationServlet):
PATH = "/publicRooms"
def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access):
def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access):
super(PublicRoomList, self).__init__(
handler, authenticator, ratelimiter, server_name,
)
self.allow_access = allow_access
self.deny_access = deny_access
@defer.inlineCallbacks
def on_GET(self, origin, content, query):
if not self.allow_access:
if self.deny_access:
raise FederationDeniedError(origin)
limit = parse_integer_from_args(query, "limit", 0)
@@ -1312,30 +1304,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
defer.returnValue((200, new_content))
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
resource-intensive) a public room this server knows about is.
"""
PATH = "/rooms/(?P<room_id>[^/]*)/complexity"
PREFIX = FEDERATION_UNSTABLE_PREFIX
@defer.inlineCallbacks
def on_GET(self, origin, content, query, room_id):
store = self.handler.hs.get_datastore()
is_public = yield store.is_room_world_readable_or_publicly_joinable(
room_id
)
if not is_public:
raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM)
complexity = yield store.get_room_complexity(room_id)
defer.returnValue((200, complexity))
FEDERATION_SERVLET_CLASSES = (
FederationSendServlet,
FederationEventServlet,
@@ -1359,7 +1327,6 @@ FEDERATION_SERVLET_CLASSES = (
FederationThirdPartyInviteExchangeServlet,
On3pidBindServlet,
FederationVersionServlet,
RoomComplexityServlet,
)
OPENID_SERVLET_CLASSES = (
@@ -1455,7 +1422,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N
authenticator=authenticator,
ratelimiter=ratelimiter,
server_name=hs.hostname,
allow_access=hs.config.allow_public_rooms_over_federation,
deny_access=hs.config.restrict_public_rooms_to_local_users,
).register(resource)
if "group_server" in servlet_groups:

View File

@@ -97,13 +97,10 @@ class GroupAttestationSigning(object):
# TODO: We also want to check that *new* attestations that people give
# us to store are valid for at least a little while.
now = self.clock.time_msec()
if valid_until_ms < now:
if valid_until_ms < self.clock.time_msec():
raise SynapseError(400, "Attestation expired")
yield self.keyring.verify_json_for_server(
server_name, attestation, now, "Group attestation"
)
yield self.keyring.verify_json_for_server(server_name, attestation)
def create_attestation(self, group_id, user_id):
"""Create an attestation for the group_id and user_id with default

View File

@@ -110,9 +110,6 @@ class AccountValidityHandler(object):
# Stop right here if the user doesn't have at least one email address.
# In this case, they will have to ask their server admin to renew their
# account manually.
# We don't need to do a specific check to make sure the account isn't
# deactivated, as a deactivated account isn't supposed to have any
# email address attached to it.
if not addresses:
return
@@ -223,19 +220,11 @@ class AccountValidityHandler(object):
Args:
renewal_token (str): Token sent with the renewal request.
Returns:
bool: Whether the provided token is valid.
"""
try:
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
except StoreError:
defer.returnValue(False)
user_id = yield self.store.get_user_from_renewal_token(renewal_token)
logger.debug("Renewing an account for user %s", user_id)
yield self.renew_account_for_user(user_id)
defer.returnValue(True)
@defer.inlineCallbacks
def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False):
"""Renews the account attached to a given user by pushing back the

View File

@@ -162,7 +162,7 @@ class AuthHandler(BaseHandler):
defer.returnValue(params)
@defer.inlineCallbacks
def check_auth(self, flows, clientdict, clientip, password_servlet=False):
def check_auth(self, flows, clientdict, clientip):
"""
Takes a dictionary sent by the client in the login / registration
protocol and handles the User-Interactive Auth flow.
@@ -186,16 +186,6 @@ class AuthHandler(BaseHandler):
clientip (str): The IP address of the client.
password_servlet (bool): Whether the request originated from
PasswordRestServlet.
XXX: This is a temporary hack to distinguish between checking
for threepid validations locally (in the case of password
resets) and using the identity server (in the case of binding
a 3PID during registration). Once we start using the
homeserver for both tasks, this distinction will no longer be
necessary.
Returns:
defer.Deferred[dict, dict, str]: a deferred tuple of
(creds, params, session_id).
@@ -251,9 +241,7 @@ class AuthHandler(BaseHandler):
if 'type' in authdict:
login_type = authdict['type']
try:
result = yield self._check_auth_dict(
authdict, clientip, password_servlet=password_servlet,
)
result = yield self._check_auth_dict(authdict, clientip)
if result:
creds[login_type] = result
self._save_session(session)
@@ -363,7 +351,7 @@ class AuthHandler(BaseHandler):
return sess.setdefault('serverdict', {}).get(key, default)
@defer.inlineCallbacks
def _check_auth_dict(self, authdict, clientip, password_servlet=False):
def _check_auth_dict(self, authdict, clientip):
"""Attempt to validate the auth dict provided by a client
Args:
@@ -381,13 +369,7 @@ class AuthHandler(BaseHandler):
login_type = authdict['type']
checker = self.checkers.get(login_type)
if checker is not None:
# XXX: Temporary workaround for having Synapse handle password resets
# See AuthHandler.check_auth for further details
res = yield checker(
authdict,
clientip=clientip,
password_servlet=password_servlet,
)
res = yield checker(authdict, clientip)
defer.returnValue(res)
# build a v1-login-style dict out of the authdict and fall back to the
@@ -401,7 +383,7 @@ class AuthHandler(BaseHandler):
defer.returnValue(canonical_id)
@defer.inlineCallbacks
def _check_recaptcha(self, authdict, clientip, **kwargs):
def _check_recaptcha(self, authdict, clientip):
try:
user_response = authdict["response"]
except KeyError:
@@ -447,20 +429,20 @@ class AuthHandler(BaseHandler):
defer.returnValue(True)
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)
def _check_email_identity(self, authdict, **kwargs):
return self._check_threepid('email', authdict, **kwargs)
def _check_email_identity(self, authdict, _):
return self._check_threepid('email', authdict)
def _check_msisdn(self, authdict, **kwargs):
def _check_msisdn(self, authdict, _):
return self._check_threepid('msisdn', authdict)
def _check_dummy_auth(self, authdict, **kwargs):
def _check_dummy_auth(self, authdict, _):
return defer.succeed(True)
def _check_terms_auth(self, authdict, **kwargs):
def _check_terms_auth(self, authdict, _):
return defer.succeed(True)
@defer.inlineCallbacks
def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs):
def _check_threepid(self, medium, authdict):
if 'threepid_creds' not in authdict:
raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM)
@@ -469,30 +451,7 @@ class AuthHandler(BaseHandler):
identity_handler = self.hs.get_handlers().identity_handler
logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,))
if (
not password_servlet
or self.hs.config.email_password_reset_behaviour == "remote"
):
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
elif self.hs.config.email_password_reset_behaviour == "local":
row = yield self.store.get_threepid_validation_session(
medium,
threepid_creds["client_secret"],
sid=threepid_creds["sid"],
validated=True,
)
threepid = {
"medium": row["medium"],
"address": row["address"],
"validated_at": row["validated_at"],
} if row else None
if row:
# Valid threepid returned, delete from the db
yield self.store.delete_threepid_session(threepid_creds["sid"])
else:
raise SynapseError(400, "Password resets are not enabled on this homeserver")
threepid = yield identity_handler.threepid_from_creds(threepid_creds)
if not threepid:
raise LoginError(401, "", errcode=Codes.UNAUTHORIZED)

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2017, 2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -34,7 +33,6 @@ class DeactivateAccountHandler(BaseHandler):
self._device_handler = hs.get_device_handler()
self._room_member_handler = hs.get_room_member_handler()
self._identity_handler = hs.get_handlers().identity_handler
self._profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
# Flag that indicates whether the process to part users from rooms is running
@@ -44,8 +42,6 @@ class DeactivateAccountHandler(BaseHandler):
# it left off (if it has work left to do).
hs.get_reactor().callWhenRunning(self._start_user_parting)
self._account_validity_enabled = hs.config.account_validity.enabled
@defer.inlineCallbacks
def deactivate_account(self, user_id, erase_data, id_server=None):
"""Deactivate a user's account
@@ -102,9 +98,6 @@ class DeactivateAccountHandler(BaseHandler):
yield self.store.user_set_password_hash(user_id, None)
user = UserID.from_string(user_id)
yield self._profile_handler.set_active(user, False, False)
# Add the user to a table of users pending deactivation (ie.
# removal from all the rooms they're a member of)
yield self.store.add_user_pending_deactivation(user_id)
@@ -121,13 +114,6 @@ class DeactivateAccountHandler(BaseHandler):
# parts users from rooms (if it isn't already running)
self._start_user_parting()
# Remove all information on the user from the account_validity table.
if self._account_validity_enabled:
yield self.store.delete_account_validity_for_user(user_id)
# Mark the user as deactivated.
yield self.store.set_user_deactivated_status(user_id, True)
defer.returnValue(identity_server_supports_unbinding)
def _start_user_parting(self):

View File

@@ -568,12 +568,6 @@ class DeviceListEduUpdater(object):
stream_id = result["stream_id"]
devices = result["devices"]
for device in devices:
logger.debug(
"Handling resync update %r/%r, ID: %r",
user_id, device["device_id"], stream_id,
)
# If the remote server has more than ~1000 devices for this user
# we assume that something is going horribly wrong (e.g. a bot
# that logs in and creates a new device every time it tries to

View File

@@ -122,9 +122,6 @@ class EventStreamHandler(BaseHandler):
chunks = yield self._event_serializer.serialize_events(
events, time_now, as_client_event=as_client_event,
# We don't bundle "live" events, as otherwise clients
# will end up double counting annotations.
bundle_aggregations=False,
)
chunk = {

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -34,10 +33,8 @@ from synapse.api.constants import EventTypes, Membership, RejectedReason
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
RequestSendFailed,
StoreError,
SynapseError,
)
@@ -129,8 +126,6 @@ class FederationHandler(BaseHandler):
self.room_queues = {}
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
self.third_party_event_rules = hs.get_third_party_event_rules()
@defer.inlineCallbacks
def on_receive_pdu(
self, origin, pdu, sent_to_us_directly=False,
@@ -1262,15 +1257,6 @@ class FederationHandler(BaseHandler):
logger.warn("Failed to create join %r because %s", event, e)
raise e
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info("Creation of join %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
yield self.auth.check_from_context(
@@ -1313,15 +1299,6 @@ class FederationHandler(BaseHandler):
origin, event
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info("Sending of join %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
logger.debug(
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
@@ -1363,12 +1340,8 @@ class FederationHandler(BaseHandler):
if self.hs.config.block_non_admin_invites:
raise SynapseError(403, "This server does not accept room invites")
is_published = yield self.store.is_room_published(event.room_id)
if not self.spam_checker.user_may_invite(
event.sender, event.state_key, None,
room_id=event.room_id, new_room=False,
published_room=is_published,
event.sender, event.state_key, event.room_id,
):
raise SynapseError(
403, "This user is not permitted to send invites to this server/user"
@@ -1484,15 +1457,6 @@ class FederationHandler(BaseHandler):
builder=builder,
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.warning("Creation of leave %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
@@ -1519,19 +1483,10 @@ class FederationHandler(BaseHandler):
event.internal_metadata.outlier = False
context = yield self._handle_new_event(
yield self._handle_new_event(
origin, event
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info("Sending of leave %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
logger.debug(
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
@@ -2058,65 +2013,15 @@ class FederationHandler(BaseHandler):
Args:
origin (str):
event (synapse.events.EventBase):
event (synapse.events.FrozenEvent):
context (synapse.events.snapshot.EventContext):
auth_events (dict[(str, str)->synapse.events.EventBase]):
Map from (event_type, state_key) to event
What we expect the event's auth_events to be, based on the event's
position in the dag. I think? maybe??
Also NB that this function adds entries to it.
Returns:
defer.Deferred[None]
"""
room_version = yield self.store.get_room_version(event.room_id)
try:
yield self._update_auth_events_and_context_for_auth(
origin, event, context, auth_events
)
except Exception:
# We don't really mind if the above fails, so lets not fail
# processing if it does. However, it really shouldn't fail so
# let's still log as an exception since we'll still want to fix
# any bugs.
logger.exception(
"Failed to double check auth events for %s with remote. "
"Ignoring failure and continuing processing of event.",
event.event_id,
)
try:
self.auth.check(room_version, event, auth_events=auth_events)
except AuthError as e:
logger.warn("Failed auth resolution for %r because %s", event, e)
raise e
@defer.inlineCallbacks
def _update_auth_events_and_context_for_auth(
self, origin, event, context, auth_events
):
"""Helper for do_auth. See there for docs.
Checks whether a given event has the expected auth events. If it
doesn't then we talk to the remote server to compare state to see if
we can come to a consensus (e.g. if one server missed some valid
state).
This attempts to resovle any potential divergence of state between
servers, but is not essential and so failures should not block further
processing of the event.
Args:
origin (str):
event (synapse.events.EventBase):
context (synapse.events.snapshot.EventContext):
auth_events (dict[(str, str)->synapse.events.EventBase]):
auth_events (dict[(str, str)->str]):
Returns:
defer.Deferred[None]
"""
# Check if we have all the auth events.
current_state = set(e.event_id for e in auth_events.values())
event_auth_events = set(event.auth_event_ids())
if event.is_state():
@@ -2124,21 +2029,11 @@ class FederationHandler(BaseHandler):
else:
event_key = None
# if the event's auth_events refers to events which are not in our
# calculated auth_events, we need to fetch those events from somewhere.
#
# we start by fetching them from the store, and then try calling /event_auth/.
missing_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
if missing_auth:
if event_auth_events - current_state:
# TODO: can we use store.have_seen_events here instead?
have_events = yield self.store.get_seen_events_with_rejections(
missing_auth
event_auth_events - current_state
)
logger.debug("Got events %s from store", have_events)
missing_auth.difference_update(have_events.keys())
else:
have_events = {}
@@ -2147,22 +2042,17 @@ class FederationHandler(BaseHandler):
for e in auth_events.values()
})
seen_events = set(have_events.keys())
missing_auth = event_auth_events - seen_events - current_state
if missing_auth:
logger.info("Missing auth: %s", missing_auth)
# If we don't have all the auth events, we need to get them.
logger.info(
"auth_events contains unknown events: %s",
missing_auth,
)
try:
try:
remote_auth_chain = yield self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
except RequestSendFailed as e:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to get event auth from remote: %s", e)
return
remote_auth_chain = yield self.federation_client.get_event_auth(
origin, event.room_id, event.event_id
)
seen_remotes = yield self.store.have_seen_events(
[e.event_id for e in remote_auth_chain]
@@ -2199,174 +2089,145 @@ class FederationHandler(BaseHandler):
have_events = yield self.store.get_seen_events_with_rejections(
event.auth_event_ids()
)
seen_events = set(have_events.keys())
except Exception:
# FIXME:
logger.exception("Failed to get auth chain")
if event.internal_metadata.is_outlier():
logger.info("Skipping auth_event fetch for outlier")
return
# FIXME: Assumes we have and stored all the state for all the
# prev_events
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
if not different_auth:
return
logger.info(
"auth_events refers to events which are not in our calculated auth "
"chain: %s",
different_auth,
)
current_state = set(e.event_id for e in auth_events.values())
different_auth = event_auth_events - current_state
room_version = yield self.store.get_room_version(event.room_id)
different_events = yield logcontext.make_deferred_yieldable(
defer.gatherResults([
logcontext.run_in_background(
self.store.get_event,
d,
allow_none=True,
allow_rejected=False,
if different_auth and not event.internal_metadata.is_outlier():
# Do auth conflict res.
logger.info("Different auth: %s", different_auth)
different_events = yield logcontext.make_deferred_yieldable(
defer.gatherResults([
logcontext.run_in_background(
self.store.get_event,
d,
allow_none=True,
allow_rejected=False,
)
for d in different_auth
if d in have_events and not have_events[d]
], consumeErrors=True)
).addErrback(unwrapFirstError)
if different_events:
local_view = dict(auth_events)
remote_view = dict(auth_events)
remote_view.update({
(d.type, d.state_key): d for d in different_events if d
})
new_state = yield self.state_handler.resolve_events(
room_version,
[list(local_view.values()), list(remote_view.values())],
event
)
for d in different_auth
if d in have_events and not have_events[d]
], consumeErrors=True)
).addErrback(unwrapFirstError)
if different_events:
local_view = dict(auth_events)
remote_view = dict(auth_events)
remote_view.update({
(d.type, d.state_key): d for d in different_events if d
})
auth_events.update(new_state)
new_state = yield self.state_handler.resolve_events(
room_version,
[list(local_view.values()), list(remote_view.values())],
event
)
current_state = set(e.event_id for e in auth_events.values())
different_auth = event_auth_events - current_state
logger.info(
"After state res: updating auth_events with new state %s",
{
(d.type, d.state_key): d.event_id for d in new_state.values()
if auth_events.get((d.type, d.state_key)) != d
},
)
auth_events.update(new_state)
different_auth = event_auth_events.difference(
e.event_id for e in auth_events.values()
)
yield self._update_context_for_auth_events(
event, context, auth_events, event_key,
)
if not different_auth:
# we're done
return
logger.info(
"auth_events still refers to events which are not in the calculated auth "
"chain after state resolution: %s",
different_auth,
)
# Only do auth resolution if we have something new to say.
# We can't prove an auth failure.
do_resolution = False
for e_id in different_auth:
if e_id in have_events:
if have_events[e_id] == RejectedReason.NOT_ANCESTOR:
do_resolution = True
break
if not do_resolution:
logger.info(
"Skipping auth resolution due to lack of provable rejection reasons"
)
return
logger.info("Doing auth resolution")
prev_state_ids = yield context.get_prev_state_ids(self.store)
# 1. Get what we think is the auth chain.
auth_ids = yield self.auth.compute_auth_events(
event, prev_state_ids
)
local_auth_chain = yield self.store.get_auth_chain(
auth_ids, include_given=True
)
try:
# 2. Get remote difference.
try:
result = yield self.federation_client.query_auth(
origin,
event.room_id,
event.event_id,
local_auth_chain,
yield self._update_context_for_auth_events(
event, context, auth_events, event_key,
)
except RequestSendFailed as e:
# The other side isn't around or doesn't implement the
# endpoint, so lets just bail out.
logger.info("Failed to query auth from remote: %s", e)
return
seen_remotes = yield self.store.have_seen_events(
[e.event_id for e in result["auth_chain"]]
)
if different_auth and not event.internal_metadata.is_outlier():
logger.info("Different auth after resolution: %s", different_auth)
# 3. Process any remote auth chain events we haven't seen.
for ev in result["auth_chain"]:
if ev.event_id in seen_remotes:
continue
# Only do auth resolution if we have something new to say.
# We can't rove an auth failure.
do_resolution = False
if ev.event_id == event.event_id:
continue
provable = [
RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR,
]
for e_id in different_auth:
if e_id in have_events:
if have_events[e_id] in provable:
do_resolution = True
break
if do_resolution:
prev_state_ids = yield context.get_prev_state_ids(self.store)
# 1. Get what we think is the auth chain.
auth_ids = yield self.auth.compute_auth_events(
event, prev_state_ids
)
local_auth_chain = yield self.store.get_auth_chain(
auth_ids, include_given=True
)
try:
auth_ids = ev.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in result["auth_chain"]
if e.event_id in auth_ids
or event.type == EventTypes.Create
}
ev.internal_metadata.outlier = True
logger.debug(
"do_auth %s different_auth: %s",
event.event_id, e.event_id
# 2. Get remote difference.
result = yield self.federation_client.query_auth(
origin,
event.room_id,
event.event_id,
local_auth_chain,
)
yield self._handle_new_event(
origin, ev, auth_events=auth
seen_remotes = yield self.store.have_seen_events(
[e.event_id for e in result["auth_chain"]]
)
if ev.event_id in event_auth_events:
auth_events[(ev.type, ev.state_key)] = ev
except AuthError:
pass
# 3. Process any remote auth chain events we haven't seen.
for ev in result["auth_chain"]:
if ev.event_id in seen_remotes:
continue
except Exception:
# FIXME:
logger.exception("Failed to query auth chain")
if ev.event_id == event.event_id:
continue
# 4. Look at rejects and their proofs.
# TODO.
try:
auth_ids = ev.auth_event_ids()
auth = {
(e.type, e.state_key): e
for e in result["auth_chain"]
if e.event_id in auth_ids
or event.type == EventTypes.Create
}
ev.internal_metadata.outlier = True
yield self._update_context_for_auth_events(
event, context, auth_events, event_key,
)
logger.debug(
"do_auth %s different_auth: %s",
event.event_id, e.event_id
)
yield self._handle_new_event(
origin, ev, auth_events=auth
)
if ev.event_id in event_auth_events:
auth_events[(ev.type, ev.state_key)] = ev
except AuthError:
pass
except Exception:
# FIXME:
logger.exception("Failed to query auth chain")
# 4. Look at rejects and their proofs.
# TODO.
yield self._update_context_for_auth_events(
event, context, auth_events, event_key,
)
try:
self.auth.check(room_version, event, auth_events=auth_events)
except AuthError as e:
logger.warn("Failed auth resolution for %r because %s", event, e)
raise e
@defer.inlineCallbacks
def _update_context_for_auth_events(self, event, context, auth_events,
@@ -2594,23 +2455,11 @@ class FederationHandler(BaseHandler):
builder=builder
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info(
"Creation of threepid invite %s forbidden by third-party rules",
event,
)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
event, context = yield self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
EventValidator().validate_new(event, self.config)
EventValidator().validate_new(event)
# We need to tell the transaction queue to send this out, even
# though the sender isn't a local user.
@@ -2654,18 +2503,6 @@ class FederationHandler(BaseHandler):
builder=builder,
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.warning(
"Exchange of threepid invite %s forbidden by third-party rules",
event,
)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
event, context = yield self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
@@ -2681,6 +2518,12 @@ class FederationHandler(BaseHandler):
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
# XXX we send the invite here, but send_membership_event also sends it,
# so we end up making two requests. I think this is redundant.
returned_invite = yield self.send_invite(origin, event)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
member_handler = self.hs.get_room_member_handler()
yield member_handler.send_membership_event(None, event, context)
@@ -2715,7 +2558,7 @@ class FederationHandler(BaseHandler):
event, context = yield self.event_creation_handler.create_new_client_event(
builder=builder,
)
EventValidator().validate_new(event, self.config)
EventValidator().validate_new(event)
defer.returnValue((event, context))
@defer.inlineCallbacks

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018, 2019 New Vector Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,18 +20,13 @@
import logging
from canonicaljson import json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
HttpResponseException,
ProxiedRequestError,
SynapseError,
)
@@ -52,8 +47,6 @@ class IdentityHandler(BaseHandler):
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
self.rewrite_identity_server_urls = hs.config.rewrite_identity_server_urls
self._enable_lookup = hs.config.enable_3pid_lookup
def _should_trust_id_server(self, id_server):
if id_server not in self.trusted_id_servers:
@@ -91,10 +84,7 @@ class IdentityHandler(BaseHandler):
'credentials', id_server
)
defer.returnValue(None)
# if we have a rewrite rule set for the identity server,
# apply it now.
if id_server in self.rewrite_identity_server_urls:
id_server = self.rewrite_identity_server_urls[id_server]
try:
data = yield self.http_client.get_json(
"https://%s%s" % (
@@ -130,18 +120,10 @@ class IdentityHandler(BaseHandler):
else:
raise SynapseError(400, "No client_secret in creds")
# if we have a rewrite rule set for the identity server,
# apply it now, but only for sending the request (not
# storing in the database).
if id_server in self.rewrite_identity_server_urls:
id_server_host = self.rewrite_identity_server_urls[id_server]
else:
id_server_host = id_server
try:
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server_host, "/_matrix/identity/api/v1/3pid/bind"
id_server, "/_matrix/identity/api/v1/3pid/bind"
),
{
'sid': creds['sid'],
@@ -239,16 +221,6 @@ class IdentityHandler(BaseHandler):
b"Authorization": auth_headers,
}
# if we have a rewrite rule set for the identity server,
# apply it now.
#
# Note that destination_is has to be the real id_server, not
# the server we connect to.
if id_server in self.rewrite_identity_server_urls:
id_server = self.rewrite_identity_server_urls[id_server]
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
try:
yield self.http_client.post_json_get_json(
url,
@@ -275,14 +247,7 @@ class IdentityHandler(BaseHandler):
defer.returnValue(changed)
@defer.inlineCallbacks
def requestEmailToken(
self,
id_server,
email,
client_secret,
send_attempt,
next_link=None,
):
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
@@ -294,14 +259,7 @@ class IdentityHandler(BaseHandler):
'client_secret': client_secret,
'send_attempt': send_attempt,
}
# if we have a rewrite rule set for the identity server,
# apply it now.
if id_server in self.rewrite_identity_server_urls:
id_server = self.rewrite_identity_server_urls[id_server]
if next_link:
params.update({'next_link': next_link})
params.update(kwargs)
try:
data = yield self.http_client.post_json_get_json(
@@ -334,10 +292,7 @@ class IdentityHandler(BaseHandler):
'send_attempt': send_attempt,
}
params.update(kwargs)
# if we have a rewrite rule set for the identity server,
# apply it now.
if id_server in self.rewrite_identity_server_urls:
id_server = self.rewrite_identity_server_urls[id_server]
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s" % (
@@ -350,125 +305,3 @@ class IdentityHandler(BaseHandler):
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
@defer.inlineCallbacks
def lookup_3pid(self, id_server, medium, address):
"""Looks up a 3pid in the passed identity server.
Args:
id_server (str): The server name (including port, if required)
of the identity server to use.
medium (str): The type of the third party identifier (e.g. "email").
address (str): The third party identifier (e.g. "foo@example.com").
Returns:
Deferred[dict]: The result of the lookup. See
https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
for details
"""
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
if not self._enable_lookup:
raise AuthError(
403, "Looking up third-party identifiers is denied from this server",
)
target = self.rewrite_identity_server_urls.get(id_server, id_server)
try:
data = yield self.http_client.get_json(
"https://%s/_matrix/identity/api/v1/lookup" % (target,),
{
"medium": medium,
"address": address,
}
)
if "mxid" in data:
if "signatures" not in data:
raise AuthError(401, "No signatures on 3pid binding")
yield self._verify_any_signature(data, id_server)
except HttpResponseException as e:
logger.info("Proxied lookup failed: %r", e)
raise e.to_synapse_error()
except IOError as e:
logger.info("Failed to contact %r: %s", id_server, e)
raise ProxiedRequestError(503, "Failed to contact identity server")
defer.returnValue(data)
@defer.inlineCallbacks
def bulk_lookup_3pid(self, id_server, threepids):
"""Looks up given 3pids in the passed identity server.
Args:
id_server (str): The server name (including port, if required)
of the identity server to use.
threepids ([[str, str]]): The third party identifiers to lookup, as
a list of 2-string sized lists ([medium, address]).
Returns:
Deferred[dict]: The result of the lookup. See
https://matrix.org/docs/spec/identity_service/r0.1.0.html#association-lookup
for details
"""
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
if not self._enable_lookup:
raise AuthError(
403, "Looking up third-party identifiers is denied from this server",
)
target = self.rewrite_identity_server_urls.get(id_server, id_server)
try:
data = yield self.http_client.post_json_get_json(
"https://%s/_matrix/identity/api/v1/bulk_lookup" % (target,),
{
"threepids": threepids,
}
)
except HttpResponseException as e:
logger.info("Proxied lookup failed: %r", e)
raise e.to_synapse_error()
except IOError as e:
logger.info("Failed to contact %r: %s", id_server, e)
raise ProxiedRequestError(503, "Failed to contact identity server")
defer.returnValue(data)
@defer.inlineCallbacks
def _verify_any_signature(self, data, server_hostname):
if server_hostname not in data["signatures"]:
raise AuthError(401, "No signature from server %s" % (server_hostname,))
for key_name, signature in data["signatures"][server_hostname].items():
target = self.rewrite_identity_server_urls.get(
server_hostname, server_hostname,
)
key_data = yield self.http_client.get_json(
"https://%s/_matrix/identity/api/v1/pubkey/%s" %
(target, key_name,),
)
if "public_key" not in key_data:
raise AuthError(401, "No public key named %s from %s" %
(key_name, server_hostname,))
verify_signed_json(
data,
server_hostname,
decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
)
return
raise AuthError(401, "No signature from server %s" % (server_hostname,))

View File

@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2014 - 2016 OpenMarket Ltd
# Copyright 2017 - 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -167,9 +166,6 @@ class MessageHandler(object):
now = self.clock.time_msec()
events = yield self._event_serializer.serialize_events(
room_state.values(), now,
# We don't bother bundling aggregations in when asked for state
# events, as clients won't use them.
bundle_aggregations=False,
)
defer.returnValue(events)
@@ -249,7 +245,6 @@ class EventCreationHandler(object):
self.action_generator = hs.get_action_generator()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._block_events_without_consent_error = (
self.config.block_events_without_consent_error
@@ -372,7 +367,7 @@ class EventCreationHandler(object):
"You must be in the room to create an alias for it",
)
self.validator.validate_new(event, self.config)
self.validator.validate_new(event)
defer.returnValue((event, context))
@@ -604,7 +599,7 @@ class EventCreationHandler(object):
if requester:
context.app_service = requester.app_service
self.validator.validate_new(event, self.config)
self.validator.validate_new(event)
# If this event is an annotation then we check that that the sender
# can't annotate the same way twice (e.g. stops users from liking an
@@ -660,14 +655,6 @@ class EventCreationHandler(object):
else:
room_version = yield self.store.get_room_version(event.room_id)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
try:
yield self.auth.check_from_context(room_version, event, context)
except AuthError as err:

View File

@@ -15,14 +15,11 @@
# limitations under the License.
import logging
from six import iteritems
from twisted.internet import defer
from twisted.python.failure import Failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.state import StateFilter
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import ReadWriteLock
@@ -82,114 +79,6 @@ class PaginationHandler(object):
self._purges_by_id = {}
self._event_serializer = hs.get_event_client_serializer()
self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime
if hs.config.retention_enabled:
# Run the purge jobs described in the configuration file.
for job in hs.config.retention_purge_jobs:
self.clock.looping_call(
run_as_background_process,
job["interval"],
"purge_history_for_rooms_in_range",
self.purge_history_for_rooms_in_range,
job["shortest_max_lifetime"],
job["longest_max_lifetime"],
)
@defer.inlineCallbacks
def purge_history_for_rooms_in_range(self, min_ms, max_ms):
"""Purge outdated events from rooms within the given retention range.
If a default retention policy is defined in the server's configuration and its
'max_lifetime' is within this range, also targets rooms which don't have a
retention policy.
Args:
min_ms (int|None): Duration in milliseconds that define the lower limit of
the range to handle (exclusive). If None, it means that the range has no
lower limit.
max_ms (int|None): Duration in milliseconds that define the upper limit of
the range to handle (inclusive). If None, it means that the range has no
upper limit.
"""
# We want the storage layer to to include rooms with no retention policy in its
# return value only if a default retention policy is defined in the server's
# configuration and that policy's 'max_lifetime' is either lower (or equal) than
# max_ms or higher than min_ms (or both).
if self._retention_default_max_lifetime is not None:
include_null = True
if min_ms is not None and min_ms >= self._retention_default_max_lifetime:
# The default max_lifetime is lower than (or equal to) min_ms.
include_null = False
if max_ms is not None and max_ms < self._retention_default_max_lifetime:
# The default max_lifetime is higher than max_ms.
include_null = False
else:
include_null = False
rooms = yield self.store.get_rooms_for_retention_period_in_range(
min_ms, max_ms, include_null
)
for room_id, retention_policy in iteritems(rooms):
if room_id in self._purges_in_progress_by_room:
logger.warning(
"[purge] not purging room %s as there's an ongoing purge running"
" for this room",
room_id,
)
continue
max_lifetime = retention_policy["max_lifetime"]
if max_lifetime is None:
# If max_lifetime is None, it means that include_null equals True,
# therefore we can safely assume that there is a default policy defined
# in the server's configuration.
max_lifetime = self._retention_default_max_lifetime
# Figure out what token we should start purging at.
ts = self.clock.time_msec() - max_lifetime
stream_ordering = (
yield self.store.find_first_stream_ordering_after_ts(ts)
)
r = (
yield self.store.get_room_event_after_stream_ordering(
room_id, stream_ordering,
)
)
if not r:
logger.warning(
"[purge] purging events not possible: No event found "
"(ts %i => stream_ordering %i)",
ts, stream_ordering,
)
continue
(stream, topo, _event_id) = r
token = "t%d-%d" % (topo, stream)
purge_id = random_string(16)
self._purges_by_id[purge_id] = PurgeStatus()
logger.info(
"Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
)
# We want to purge everything, including local events, and to run the purge in
# the background so that it's not blocking any other operation apart from
# other purges in the same room.
run_as_background_process(
"_purge_history",
self._purge_history,
purge_id, room_id, token, True,
)
def start_purge_history(self, room_id, token,
delete_local_events=False):
"""Start off a history purge on a room.

View File

@@ -1,93 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from synapse.api.errors import Codes, PasswordRefusedError
logger = logging.getLogger(__name__)
class PasswordPolicyHandler(object):
def __init__(self, hs):
self.policy = hs.config.password_policy
self.enabled = hs.config.password_policy_enabled
# Regexps for the spec'd policy parameters.
self.regexp_digit = re.compile("[0-9]")
self.regexp_symbol = re.compile("[^a-zA-Z0-9]")
self.regexp_uppercase = re.compile("[A-Z]")
self.regexp_lowercase = re.compile("[a-z]")
def validate_password(self, password):
"""Checks whether a given password complies with the server's policy.
Args:
password (str): The password to check against the server's policy.
Raises:
PasswordRefusedError: The password doesn't comply with the server's policy.
"""
if not self.enabled:
return
minimum_accepted_length = self.policy.get("minimum_length", 0)
if len(password) < minimum_accepted_length:
raise PasswordRefusedError(
msg=(
"The password must be at least %d characters long"
% minimum_accepted_length
),
errcode=Codes.PASSWORD_TOO_SHORT,
)
if (
self.policy.get("require_digit", False) and
self.regexp_digit.search(password) is None
):
raise PasswordRefusedError(
msg="The password must include at least one digit",
errcode=Codes.PASSWORD_NO_DIGIT,
)
if (
self.policy.get("require_symbol", False) and
self.regexp_symbol.search(password) is None
):
raise PasswordRefusedError(
msg="The password must include at least one symbol",
errcode=Codes.PASSWORD_NO_SYMBOL,
)
if (
self.policy.get("require_uppercase", False) and
self.regexp_uppercase.search(password) is None
):
raise PasswordRefusedError(
msg="The password must include at least one uppercase letter",
errcode=Codes.PASSWORD_NO_UPPERCASE,
)
if (
self.policy.get("require_lowercase", False) and
self.regexp_lowercase.search(password) is None
):
raise PasswordRefusedError(
msg="The password must include at least one lowercase letter",
errcode=Codes.PASSWORD_NO_LOWERCASE,
)

View File

@@ -158,13 +158,7 @@ class PresenceHandler(object):
# have not yet been persisted
self.unpersisted_users_changes = set()
hs.get_reactor().addSystemEventTrigger(
"before",
"shutdown",
run_as_background_process,
"presence.on_shutdown",
self._on_shutdown,
)
hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown)
self.serial_to_user = {}
self._next_serial = 1
@@ -188,27 +182,17 @@ class PresenceHandler(object):
# Start a LoopingCall in 30s that fires every 5s.
# The initial delay is to allow disconnected clients a chance to
# reconnect before we treat them as offline.
def run_timeout_handler():
return run_as_background_process(
"handle_presence_timeouts", self._handle_timeouts
)
self.clock.call_later(
30,
self.clock.looping_call,
run_timeout_handler,
self._handle_timeouts,
5000,
)
def run_persister():
return run_as_background_process(
"persist_presence_changes", self._persist_unpersisted_changes
)
self.clock.call_later(
60,
self.clock.looping_call,
run_persister,
self._persist_unpersisted_changes,
60 * 1000,
)
@@ -245,7 +229,6 @@ class PresenceHandler(object):
)
if self.unpersisted_users_changes:
yield self.store.update_presence([
self.user_to_current_state[user_id]
for user_id in self.unpersisted_users_changes
@@ -257,18 +240,30 @@ class PresenceHandler(object):
"""We periodically persist the unpersisted changes, as otherwise they
may stack up and slow down shutdown times.
"""
logger.info(
"Performing _persist_unpersisted_changes. Persisting %d unpersisted changes",
len(self.unpersisted_users_changes)
)
unpersisted = self.unpersisted_users_changes
self.unpersisted_users_changes = set()
if unpersisted:
logger.info(
"Persisting %d upersisted presence updates", len(unpersisted)
)
yield self.store.update_presence([
self.user_to_current_state[user_id]
for user_id in unpersisted
])
logger.info("Finished _persist_unpersisted_changes")
@defer.inlineCallbacks
def _update_states_and_catch_exception(self, new_states):
try:
res = yield self._update_states(new_states)
defer.returnValue(res)
except Exception:
logger.exception("Error updating presence")
@defer.inlineCallbacks
def _update_states(self, new_states):
"""Updates presence of users. Sets the appropriate timeouts. Pokes
@@ -343,41 +338,45 @@ class PresenceHandler(object):
logger.info("Handling presence timeouts")
now = self.clock.time_msec()
# Fetch the list of users that *may* have timed out. Things may have
# changed since the timeout was set, so we won't necessarily have to
# take any action.
users_to_check = set(self.wheel_timer.fetch(now))
try:
with Measure(self.clock, "presence_handle_timeouts"):
# Fetch the list of users that *may* have timed out. Things may have
# changed since the timeout was set, so we won't necessarily have to
# take any action.
users_to_check = set(self.wheel_timer.fetch(now))
# Check whether the lists of syncing processes from an external
# process have expired.
expired_process_ids = [
process_id for process_id, last_update
in self.external_process_last_updated_ms.items()
if now - last_update > EXTERNAL_PROCESS_EXPIRY
]
for process_id in expired_process_ids:
users_to_check.update(
self.external_process_last_updated_ms.pop(process_id, ())
)
self.external_process_last_update.pop(process_id)
# Check whether the lists of syncing processes from an external
# process have expired.
expired_process_ids = [
process_id for process_id, last_update
in self.external_process_last_updated_ms.items()
if now - last_update > EXTERNAL_PROCESS_EXPIRY
]
for process_id in expired_process_ids:
users_to_check.update(
self.external_process_last_updated_ms.pop(process_id, ())
)
self.external_process_last_update.pop(process_id)
states = [
self.user_to_current_state.get(
user_id, UserPresenceState.default(user_id)
)
for user_id in users_to_check
]
states = [
self.user_to_current_state.get(
user_id, UserPresenceState.default(user_id)
)
for user_id in users_to_check
]
timers_fired_counter.inc(len(states))
timers_fired_counter.inc(len(states))
changes = handle_timeouts(
states,
is_mine_fn=self.is_mine_id,
syncing_user_ids=self.get_currently_syncing_users(),
now=now,
)
changes = handle_timeouts(
states,
is_mine_fn=self.is_mine_id,
syncing_user_ids=self.get_currently_syncing_users(),
now=now,
)
return self._update_states(changes)
run_in_background(self._update_states_and_catch_exception, changes)
except Exception:
logger.exception("Exception in _handle_timeouts loop")
@defer.inlineCallbacks
def bump_presence_active_time(self, user):
@@ -834,17 +833,14 @@ class PresenceHandler(object):
# joins.
continue
event = yield self.store.get_event(event_id, allow_none=True)
if not event or event.content.get("membership") != Membership.JOIN:
event = yield self.store.get_event(event_id)
if event.content.get("membership") != Membership.JOIN:
# We only care about joins
continue
if prev_event_id:
prev_event = yield self.store.get_event(prev_event_id, allow_none=True)
if (
prev_event
and prev_event.content.get("membership") == Membership.JOIN
):
prev_event = yield self.store.get_event(prev_event_id)
if prev_event.content.get("membership") == Membership.JOIN:
# Ignore changes to join events.
continue

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,11 +15,7 @@
import logging
from six.moves import range
from signedjson.sign import sign_json
from twisted.internet import defer, reactor
from twisted.internet import defer
from synapse.api.errors import (
AuthError,
@@ -31,15 +26,11 @@ from synapse.api.errors import (
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID, get_domain_from_id
from synapse.util.logcontext import run_in_background
from ._base import BaseHandler
logger = logging.getLogger(__name__)
MAX_DISPLAYNAME_LEN = 100
MAX_AVATAR_URL_LEN = 1000
class BaseProfileHandler(BaseHandler):
"""Handles fetching and updating user profile information.
@@ -49,8 +40,6 @@ class BaseProfileHandler(BaseHandler):
subclass MasterProfileHandler
"""
PROFILE_REPLICATE_INTERVAL = 2 * 60 * 1000
def __init__(self, hs):
super(BaseProfileHandler, self).__init__(hs)
@@ -61,84 +50,6 @@ class BaseProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler()
self.http_client = hs.get_simple_http_client()
if hs.config.worker_app is None:
self.clock.looping_call(
self._start_update_remote_profile_cache, self.PROFILE_UPDATE_MS,
)
if len(self.hs.config.replicate_user_profiles_to) > 0:
reactor.callWhenRunning(self._assign_profile_replication_batches)
reactor.callWhenRunning(self._replicate_profiles)
# Add a looping call to replicate_profiles: this handles retries
# if the replication is unsuccessful when the user updated their
# profile.
self.clock.looping_call(
self._replicate_profiles, self.PROFILE_REPLICATE_INTERVAL
)
@defer.inlineCallbacks
def _assign_profile_replication_batches(self):
"""If no profile replication has been done yet, allocate replication batch
numbers to each profile to start the replication process.
"""
logger.info("Assigning profile batch numbers...")
total = 0
while True:
assigned = yield self.store.assign_profile_batch()
total += assigned
if assigned == 0:
break
logger.info("Assigned %d profile batch numbers", total)
@defer.inlineCallbacks
def _replicate_profiles(self):
"""If any profile data has been updated and not pushed to the replication targets,
replicate it.
"""
host_batches = yield self.store.get_replication_hosts()
latest_batch = yield self.store.get_latest_profile_replication_batch_number()
if latest_batch is None:
latest_batch = -1
for repl_host in self.hs.config.replicate_user_profiles_to:
if repl_host not in host_batches:
host_batches[repl_host] = -1
try:
for i in range(host_batches[repl_host] + 1, latest_batch + 1):
yield self._replicate_host_profile_batch(repl_host, i)
except Exception:
logger.exception(
"Exception while replicating to %s: aborting for now", repl_host,
)
@defer.inlineCallbacks
def _replicate_host_profile_batch(self, host, batchnum):
logger.info("Replicating profile batch %d to %s", batchnum, host)
batch_rows = yield self.store.get_profile_batch(batchnum)
batch = {
UserID(r["user_id"], self.hs.hostname).to_string(): ({
"display_name": r["displayname"],
"avatar_url": r["avatar_url"],
} if r["active"] else None) for r in batch_rows
}
url = "https://%s/_matrix/identity/api/v1/replicate_profiles" % (host,)
body = {
"batchnum": batchnum,
"batch": batch,
"origin_server": self.hs.hostname,
}
signed_body = sign_json(body, self.hs.hostname, self.hs.config.signing_key[0])
try:
yield self.http_client.post_json_get_json(url, signed_body)
yield self.store.update_replication_batch_for_host(host, batchnum)
logger.info("Sucessfully replicated profile batch %d to %s", batchnum, host)
except Exception:
# This will get retried when the looping call next comes around
logger.exception("Failed to replicate profile batch %d to %s", batchnum, host)
raise
@defer.inlineCallbacks
def get_profile(self, user_id):
target_user = UserID.from_string(user_id)
@@ -248,30 +159,14 @@ class BaseProfileHandler(BaseHandler):
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
if not by_admin and requester and target_user != requester.user:
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's displayname")
if not by_admin and self.hs.config.disable_set_displayname:
profile = yield self.store.get_profileinfo(target_user.localpart)
if profile.display_name:
raise SynapseError(400, "Changing displayname is disabled on this server")
if len(new_displayname) > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ),
)
if new_displayname == '':
new_displayname = None
if len(self.hs.config.replicate_user_profiles_to) > 0:
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
else:
new_batchnum = None
yield self.store.set_profile_displayname(
target_user.localpart, new_displayname, new_batchnum
target_user.localpart, new_displayname
)
if self.hs.config.user_directory_search_all_users:
@@ -280,37 +175,7 @@ class BaseProfileHandler(BaseHandler):
target_user.to_string(), profile
)
if requester:
yield self._update_join_states(requester, target_user)
# start a profile replication push
run_in_background(self._replicate_profiles)
@defer.inlineCallbacks
def set_active(self, target_user, active, hide):
"""
Sets the 'active' flag on a user profile. If set to false, the user
account is considered deactivated or hidden.
If 'hide' is true, then we interpret active=False as a request to try to
hide the user rather than deactivating it. This means withholding the
profile from replication (and mark it as inactive) rather than clearing
the profile from the HS DB. Note that unlike set_displayname and
set_avatar_url, this does *not* perform authorization checks! This is
because the only place it's used currently is in account deactivation
where we've already done these checks anyway.
"""
if len(self.hs.config.replicate_user_profiles_to) > 0:
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
else:
new_batchnum = None
yield self.store.set_profile_active(
target_user.localpart, active, hide, new_batchnum
)
# start a profile replication push
run_in_background(self._replicate_profiles)
yield self._update_join_states(requester, target_user)
@defer.inlineCallbacks
def get_avatar_url(self, target_user):
@@ -352,24 +217,8 @@ class BaseProfileHandler(BaseHandler):
if not by_admin and target_user != requester.user:
raise AuthError(400, "Cannot set another user's avatar_url")
if not by_admin and self.hs.config.disable_set_avatar_url:
profile = yield self.store.get_profileinfo(target_user.localpart)
if profile.avatar_url:
raise SynapseError(400, "Changing avatar url is disabled on this server")
if len(self.hs.config.replicate_user_profiles_to) > 0:
cur_batchnum = yield self.store.get_latest_profile_replication_batch_number()
new_batchnum = 0 if cur_batchnum is None else cur_batchnum + 1
else:
new_batchnum = None
if len(new_avatar_url) > MAX_AVATAR_URL_LEN:
raise SynapseError(
400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ),
)
yield self.store.set_profile_avatar_url(
target_user.localpart, new_avatar_url, new_batchnum,
target_user.localpart, new_avatar_url
)
if self.hs.config.user_directory_search_all_users:
@@ -380,9 +229,6 @@ class BaseProfileHandler(BaseHandler):
yield self._update_join_states(requester, target_user)
# start a profile replication push
run_in_background(self._replicate_profiles)
@defer.inlineCallbacks
def on_profile_query(self, args):
user = UserID.from_string(args["user_id"])
@@ -462,10 +308,6 @@ class BaseProfileHandler(BaseHandler):
if not self.hs.config.require_auth_for_profile_requests or not requester:
return
# Always allow the user to query their own profile.
if target_user.to_string() == requester.to_string():
return
try:
requester_rooms = yield self.store.get_rooms_for_user(
requester.to_string()

View File

@@ -61,7 +61,6 @@ class RegistrationHandler(BaseHandler):
self.profile_handler = hs.get_profile_handler()
self.user_directory_handler = hs.get_user_directory_handler()
self.captcha_client = CaptchaServerHttpClient(hs)
self.http_client = hs.get_simple_http_client()
self.identity_handler = self.hs.get_handlers().identity_handler
self.ratelimiter = hs.get_registration_ratelimiter()
@@ -74,8 +73,6 @@ class RegistrationHandler(BaseHandler):
)
self._server_notices_mxid = hs.config.server_notices_mxid
self._show_in_user_directory = self.hs.config.show_users_in_user_directory
if hs.config.worker_app:
self._register_client = ReplicationRegisterServlet.make_client(hs)
self._register_device_client = (
@@ -237,11 +234,6 @@ class RegistrationHandler(BaseHandler):
address=address,
)
if default_display_name:
yield self.profile_handler.set_displayname(
user, None, default_display_name, by_admin=True,
)
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(localpart)
yield self.user_directory_handler.handle_local_profile_change(
@@ -271,11 +263,6 @@ class RegistrationHandler(BaseHandler):
create_profile_with_displayname=default_display_name,
address=address,
)
yield self.profile_handler.set_displayname(
user, None, default_display_name, by_admin=True,
)
except SynapseError:
# if user id is taken, just generate another
user = None
@@ -300,14 +287,6 @@ class RegistrationHandler(BaseHandler):
user_id, threepid_dict, None, False,
)
# Prevent the new user from showing up in the user directory if the server
# mandates it.
if not self._show_in_user_directory:
yield self.store.add_account_data_for_user(
user_id, "im.vector.hide_profile", {'hide_profile': True},
)
yield self.profile_handler.set_active(user, False, True)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
@@ -377,9 +356,7 @@ class RegistrationHandler(BaseHandler):
yield self._auto_join_rooms(user_id)
@defer.inlineCallbacks
def appservice_register(self, user_localpart, as_token, password, display_name):
# FIXME: this should be factored out and merged with normal register()
def appservice_register(self, user_localpart, as_token):
user = UserID(user_localpart, self.hs.hostname)
user_id = user.to_string()
service = self.store.get_app_service_by_token(as_token)
@@ -397,29 +374,12 @@ class RegistrationHandler(BaseHandler):
user_id, allowed_appservice=service
)
password_hash = ""
if password:
password_hash = yield self.auth_handler().hash(password)
display_name = display_name or user.localpart
yield self.register_with_store(
user_id=user_id,
password_hash=password_hash,
password_hash="",
appservice_id=service_id,
create_profile_with_displayname=display_name,
create_profile_with_displayname=user.localpart,
)
yield self.profile_handler.set_displayname(
user, None, display_name, by_admin=True,
)
if self.hs.config.user_directory_search_all_users:
profile = yield self.store.get_profileinfo(user_localpart)
yield self.user_directory_handler.handle_local_profile_change(
user_id, profile
)
defer.returnValue(user_id)
@defer.inlineCallbacks
@@ -445,39 +405,6 @@ class RegistrationHandler(BaseHandler):
else:
logger.info("Valid captcha entered from %s", ip)
@defer.inlineCallbacks
def register_saml2(self, localpart):
"""
Registers email_id as SAML2 Based Auth.
"""
if types.contains_invalid_mxid_characters(localpart):
raise SynapseError(
400,
"User ID can only contain characters a-z, 0-9, or '=_-./'",
)
yield self.auth.check_auth_blocking()
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_not_appservice_exclusive(user_id)
token = self.macaroon_gen.generate_access_token(user_id)
try:
yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=None,
create_profile_with_displayname=user.localpart,
)
yield self.profile_handler.set_displayname(
user, None, user.localpart, by_admin=True,
)
except Exception as e:
yield self.store.add_access_token_to_user(user_id, token)
# Ignore Registration errors
logger.exception(e)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def register_email(self, threepidCreds):
"""
@@ -500,9 +427,7 @@ class RegistrationHandler(BaseHandler):
logger.info("got threepid with medium '%s' and address '%s'",
threepid['medium'], threepid['address'])
if not (
yield check_3pid_allowed(self.hs, threepid['medium'], threepid['address'])
):
if not check_3pid_allowed(self.hs, threepid['medium'], threepid['address']):
raise RegistrationError(
403, "Third party identifier is not allowed"
)
@@ -543,39 +468,6 @@ class RegistrationHandler(BaseHandler):
errcode=Codes.EXCLUSIVE
)
@defer.inlineCallbacks
def shadow_register(self, localpart, display_name, auth_result, params):
"""Invokes the current registration on another server, using
shared secret registration, passing in any auth_results from
other registration UI auth flows (e.g. validated 3pids)
Useful for setting up shadow/backup accounts on a parallel deployment.
"""
# TODO: retries
shadow_hs_url = self.hs.config.shadow_server.get("hs_url")
as_token = self.hs.config.shadow_server.get("as_token")
yield self.http_client.post_json_get_json(
"%s/_matrix/client/r0/register?access_token=%s" % (
shadow_hs_url, as_token,
),
{
# XXX: auth_result is an unspecified extension for shadow registration
'auth_result': auth_result,
# XXX: another unspecified extension for shadow registration to ensure
# that the displayname is correctly set by the masters erver
'display_name': display_name,
'username': localpart,
'password': params.get("password"),
'bind_email': params.get("bind_email"),
'bind_msisdn': params.get("bind_msisdn"),
'device_id': params.get("device_id"),
'initial_device_display_name': params.get("initial_device_display_name"),
'inhibit_login': False,
'access_token': as_token,
}
)
@defer.inlineCallbacks
def _generate_user_id(self, reseed=False):
if reseed or self._next_generated_user_id is None:
@@ -639,8 +531,6 @@ class RegistrationHandler(BaseHandler):
A tuple of (user_id, access_token).
Raises:
RegistrationError if there was a problem registering.
NB this is only used in tests. TODO: move it to the test package!
"""
if localpart is None:
raise SynapseError(400, "Request must include user id")
@@ -664,16 +554,18 @@ class RegistrationHandler(BaseHandler):
user_id=user_id,
token=token,
password_hash=password_hash,
create_profile_with_displayname=displayname or user.localpart,
create_profile_with_displayname=user.localpart,
)
if displayname is not None:
yield self.profile_handler.set_displayname(
user, None, displayname or user.localpart, by_admin=True,
)
else:
yield self._auth_handler.delete_access_tokens_for_user(user_id)
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
if displayname is not None:
logger.info("setting user display name: %s -> %s", user_id, displayname)
yield self.profile_handler.set_displayname(
user, requester, displayname, by_admin=True,
)
defer.returnValue((user_id, token))
@defer.inlineCallbacks

View File

@@ -27,7 +27,7 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset
from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID
from synapse.util import stringutils
@@ -49,14 +49,12 @@ class RoomCreationHandler(BaseHandler):
"history_visibility": "shared",
"original_invitees_have_ops": False,
"guest_can_join": True,
"encryption_alg": "m.megolm.v1.aes-sha2",
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": "shared",
"original_invitees_have_ops": True,
"guest_can_join": True,
"encryption_alg": "m.megolm.v1.aes-sha2",
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
@@ -72,15 +70,10 @@ class RoomCreationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.config = hs.config
# linearizer to stop two upgrades happening at once
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
self._server_notices_mxid = hs.config.server_notices_mxid
self.third_party_event_rules = hs.get_third_party_event_rules()
@defer.inlineCallbacks
def upgrade_room(self, requester, old_room_id, new_version):
"""Replace a room with a new room with a different version
@@ -254,22 +247,7 @@ class RoomCreationHandler(BaseHandler):
"""
user_id = requester.user.to_string()
if (self._server_notices_mxid is not None and
requester.user.to_string() == self._server_notices_mxid):
# allow the server notices mxid to create rooms
is_requester_admin = True
else:
is_requester_admin = yield self.auth.is_server_admin(
requester.user,
)
if not is_requester_admin and not self.spam_checker.user_may_create_room(
user_id,
invite_list=[],
third_party_invite_list=[],
cloning=True,
):
if not self.spam_checker.user_may_create_room(user_id):
raise SynapseError(403, "You are not permitted to create rooms")
creation_content = {
@@ -491,42 +469,13 @@ class RoomCreationHandler(BaseHandler):
yield self.auth.check_auth_blocking(user_id)
if (self._server_notices_mxid is not None and
requester.user.to_string() == self._server_notices_mxid):
# allow the server notices mxid to create rooms
is_requester_admin = True
else:
is_requester_admin = yield self.auth.is_server_admin(
requester.user,
)
# Check whether the third party rules allows/changes the room create
# request.
yield self.third_party_event_rules.on_create_room(
requester,
config,
is_requester_admin=is_requester_admin,
)
invite_list = config.get("invite", [])
invite_3pid_list = config.get("invite_3pid", [])
if not is_requester_admin and not self.spam_checker.user_may_create_room(
user_id,
invite_list=invite_list,
third_party_invite_list=invite_3pid_list,
cloning=False,
):
if not self.spam_checker.user_may_create_room(user_id):
raise SynapseError(403, "You are not permitted to create rooms")
if ratelimit:
yield self.ratelimit(requester)
room_version = config.get(
"room_version",
self.config.default_room_version.identifier,
)
room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier)
if not isinstance(room_version, string_types):
raise SynapseError(
400,
@@ -563,6 +512,7 @@ class RoomCreationHandler(BaseHandler):
else:
room_alias = None
invite_list = config.get("invite", [])
for i in invite_list:
try:
UserID.from_string(i)
@@ -573,6 +523,8 @@ class RoomCreationHandler(BaseHandler):
requester,
)
invite_3pid_list = config.get("invite_3pid", [])
visibility = config.get("visibility", None)
is_public = visibility == "public"
@@ -658,7 +610,6 @@ class RoomCreationHandler(BaseHandler):
"invite",
ratelimit=False,
content=content,
new_room=True,
)
for invite_3pid in invite_3pid_list:
@@ -673,7 +624,6 @@ class RoomCreationHandler(BaseHandler):
id_server,
requester,
txn_id=None,
new_room=True,
)
result = {"room_id": room_id}
@@ -744,7 +694,6 @@ class RoomCreationHandler(BaseHandler):
"join",
ratelimit=False,
content=creator_join_profile,
new_room=True,
)
# We treat the power levels override specially as this needs to be one
@@ -820,15 +769,6 @@ class RoomCreationHandler(BaseHandler):
content=content,
)
if "encryption_alg" in config:
yield send(
etype=EventTypes.Encryption,
state_key="",
content={
'algorithm': config["encryption_alg"],
}
)
@defer.inlineCallbacks
def _generate_room_id(self, creator_id, is_public):
# autogen room IDs and try to create it. We may clash, so just

View File

@@ -20,12 +20,16 @@ import logging
from six.moves import http_client
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
import synapse.server
import synapse.types
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, Codes, ProxiedRequestError, SynapseError
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.types import RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
@@ -63,15 +67,12 @@ class RoomMemberHandler(object):
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.identity_handler = hs.get_handlers().identity_handler
self.member_linearizer = Linearizer(name="member")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.server_notices_mxid
self.rewrite_identity_server_urls = self.config.rewrite_identity_server_urls
self._enable_lookup = hs.config.enable_3pid_lookup
self.allow_per_room_profiles = self.config.allow_per_room_profiles
@@ -316,31 +317,8 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
new_room=False,
require_consent=True,
):
"""Update a users membership in a room
Args:
requester (Requester)
target (UserID)
room_id (str)
action (str): The "action" the requester is performing against the
target. One of join/leave/kick/ban/invite/unban.
txn_id (str|None): The transaction ID associated with the request,
or None not provided.
remote_room_hosts (list[str]|None): List of remote servers to try
and join via if server isn't already in the room.
third_party_signed (dict|None): The signed object for third party
invites.
ratelimit (bool): Whether to apply ratelimiting to this request.
content (dict|None): Fields to include in the new events content.
new_room (bool): Whether these membership changes are happening
as part of a room creation (e.g. initial joins and invites)
Returns:
Deferred[FrozenEvent]
"""
key = (room_id,)
with (yield self.member_linearizer.queue(key)):
@@ -354,7 +332,6 @@ class RoomMemberHandler(object):
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
new_room=new_room,
require_consent=require_consent,
)
@@ -372,7 +349,6 @@ class RoomMemberHandler(object):
third_party_signed=None,
ratelimit=True,
content=None,
new_room=False,
require_consent=True,
):
content_specified = bool(content)
@@ -440,14 +416,8 @@ class RoomMemberHandler(object):
)
block_invite = True
is_published = yield self.store.is_room_published(room_id)
if not self.spam_checker.user_may_invite(
requester.user.to_string(), target.to_string(),
third_party_invite=None,
room_id=room_id,
new_room=new_room,
published_room=is_published,
requester.user.to_string(), target.to_string(), room_id,
):
logger.info("Blocking invite due to spam checker")
block_invite = True
@@ -526,29 +496,8 @@ class RoomMemberHandler(object):
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
if (self._server_notices_mxid is not None and
requester.user.to_string() == self._server_notices_mxid):
# allow the server notices mxid to join rooms
is_requester_admin = True
else:
is_requester_admin = yield self.auth.is_server_admin(
requester.user,
)
inviter = yield self._get_inviter(target.to_string(), room_id)
if not is_requester_admin:
# We assume that if the spam checker allowed the user to create
# a room then they're allowed to join it.
if not new_room and not self.spam_checker.user_may_join_room(
target.to_string(), room_id,
is_invited=inviter is not None,
):
raise SynapseError(
403, "Not allowed to join this room",
)
if not is_host_in_room:
inviter = yield self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
@@ -758,8 +707,7 @@ class RoomMemberHandler(object):
address,
id_server,
requester,
txn_id,
new_room=False,
txn_id
):
if self.config.block_non_admin_invites:
is_requester_admin = yield self.auth.is_server_admin(
@@ -775,36 +723,10 @@ class RoomMemberHandler(object):
# can't just rely on the standard ratelimiting of events.
yield self.base_handler.ratelimit(requester)
can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id,
)
if not can_invite:
raise SynapseError(
403, "This third-party identifier can not be invited in this room",
Codes.FORBIDDEN,
)
invitee = yield self._lookup_3pid(
id_server, medium, address
)
is_published = yield self.store.is_room_published(room_id)
if not self.spam_checker.user_may_invite(
requester.user.to_string(), invitee,
third_party_invite={
"medium": medium,
"address": address,
},
room_id=room_id,
new_room=new_room,
published_room=is_published,
):
logger.info("Blocking invite due to spam checker")
raise SynapseError(
403, "Invites have been disabled on this server",
)
if invitee:
yield self.update_membership(
requester,
@@ -824,20 +746,6 @@ class RoomMemberHandler(object):
txn_id=txn_id
)
def _get_id_server_target(self, id_server):
"""Looks up an id_server's actual http endpoint
Args:
id_server (str): the server name to lookup.
Returns:
the http endpoint to connect to.
"""
if id_server in self.rewrite_identity_server_urls:
return self.rewrite_identity_server_urls[id_server]
return id_server
@defer.inlineCallbacks
def _lookup_3pid(self, id_server, medium, address):
"""Looks up a 3pid in the passed identity server.
@@ -851,13 +759,48 @@ class RoomMemberHandler(object):
Returns:
str: the matrix ID of the 3pid, or None if it is not recognized.
"""
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server",
)
try:
data = yield self.identity_handler.lookup_3pid(id_server, medium, address)
defer.returnValue(data.get("mxid"))
except ProxiedRequestError as e:
data = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
{
"medium": medium,
"address": address,
}
)
if "mxid" in data:
if "signatures" not in data:
raise AuthError(401, "No signatures on 3pid binding")
yield self._verify_any_signature(data, id_server)
defer.returnValue(data["mxid"])
except IOError as e:
logger.warn("Error from identity server lookup: %s" % (e,))
defer.returnValue(None)
@defer.inlineCallbacks
def _verify_any_signature(self, data, server_hostname):
if server_hostname not in data["signatures"]:
raise AuthError(401, "No signature from server %s" % (server_hostname,))
for key_name, signature in data["signatures"][server_hostname].items():
key_data = yield self.simple_http_client.get_json(
"%s%s/_matrix/identity/api/v1/pubkey/%s" %
(id_server_scheme, server_hostname, key_name,),
)
if "public_key" not in key_data:
raise AuthError(401, "No public key named %s from %s" %
(key_name, server_hostname,))
verify_signed_json(
data,
server_hostname,
decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
)
return
@defer.inlineCallbacks
def _make_and_store_3pid_invite(
self,
@@ -935,7 +878,6 @@ class RoomMemberHandler(object):
"sender": user.to_string(),
"state_key": token,
},
ratelimit=False,
txn_id=txn_id,
)
@@ -984,9 +926,8 @@ class RoomMemberHandler(object):
user.
"""
target = self._get_id_server_target(id_server)
is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
id_server_scheme, target,
id_server_scheme, id_server,
)
invite_config = {
@@ -1026,7 +967,7 @@ class RoomMemberHandler(object):
fallback_public_key = {
"public_key": data["public_key"],
"key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
id_server_scheme, target,
id_server_scheme, id_server,
),
}
else:

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,12 +29,9 @@ class SetPasswordHandler(BaseHandler):
super(SetPasswordHandler, self).__init__(hs)
self._auth_handler = hs.get_auth_handler()
self._device_handler = hs.get_device_handler()
self._password_policy_handler = hs.get_password_policy_handler()
@defer.inlineCallbacks
def set_password(self, user_id, newpassword, requester=None):
self._password_policy_handler.validate_password(newpassword)
password_hash = yield self._auth_handler.hash(newpassword)
except_device_id = requester.device_id if requester else None

Some files were not shown because too many files have changed in this diff Show More