1
0

Compare commits

..

2 Commits

Author SHA1 Message Date
Andrew Morgan
72d589ff1e Add changelog 2019-07-02 17:21:48 +01:00
Andrew Morgan
88a0596565 Remove SMTP_* env var functionality from docker conf 2019-07-02 17:15:11 +01:00
187 changed files with 2466 additions and 5577 deletions

View File

@@ -31,7 +31,7 @@ steps:
- "python -m pip install tox"
- "scripts-dev/check-newsfragment"
label: ":newspaper: Newsfile"
branches: "!master !develop !release-* !shhs-v*"
branches: "!master !develop !release-*"
plugins:
- docker#v3.0.1:
image: "python:3.6"
@@ -47,11 +47,11 @@ steps:
- wait
- command:
- "python -m pip install tox"
- "tox -e py35-old,codecov"
label: ":python: 3.5 / SQLite / Old Deps"
branches: "!shhs !shhs-*"
env:
TRIAL_FLAGS: "-j 2"
plugins:
@@ -69,7 +69,6 @@ steps:
- "python -m pip install tox"
- "tox -e py35,codecov"
label: ":python: 3.5 / SQLite"
branches: "!shhs !shhs-*"
env:
TRIAL_FLAGS: "-j 2"
plugins:
@@ -87,7 +86,6 @@ steps:
- "python -m pip install tox"
- "tox -e py36,codecov"
label: ":python: 3.6 / SQLite"
branches: "!shhs !shhs-*"
env:
TRIAL_FLAGS: "-j 2"
plugins:
@@ -119,7 +117,6 @@ steps:
limit: 2
- label: ":python: 3.5 / :postgres: 9.5"
branches: "!shhs !shhs-*"
env:
TRIAL_FLAGS: "-j 4"
command:
@@ -137,7 +134,6 @@ steps:
limit: 2
- label: ":python: 3.7 / :postgres: 9.5"
branches: "!shhs !shhs-*"
env:
TRIAL_FLAGS: "-j 4"
command:
@@ -173,18 +169,15 @@ steps:
- label: "SyTest - :python: 3.5 / SQLite / Monolith"
branches: "!shhs !shhs-*"
agents:
queue: "medium"
command:
- "bash .buildkite/merge_base_branch.sh"
- "bash /synapse_sytest.sh"
- "bash .buildkite/synapse_sytest.sh"
plugins:
- docker#v3.0.1:
image: "matrixdotorg/sytest-synapse:py35"
propagate-environment: true
always-pull: true
workdir: "/src"
retry:
automatic:
- exit_status: -1
@@ -199,13 +192,11 @@ steps:
POSTGRES: "1"
command:
- "bash .buildkite/merge_base_branch.sh"
- "bash /synapse_sytest.sh"
- "bash .buildkite/synapse_sytest.sh"
plugins:
- docker#v3.0.1:
image: "matrixdotorg/sytest-synapse:py35"
propagate-environment: true
always-pull: true
workdir: "/src"
retry:
automatic:
- exit_status: -1
@@ -214,7 +205,6 @@ steps:
limit: 2
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
branches: "!shhs !shhs-*"
agents:
queue: "medium"
env:
@@ -222,13 +212,11 @@ steps:
WORKERS: "1"
command:
- "bash .buildkite/merge_base_branch.sh"
- "bash /synapse_sytest.sh"
- "bash .buildkite/synapse_sytest.sh"
plugins:
- docker#v3.0.1:
image: "matrixdotorg/sytest-synapse:py35"
propagate-environment: true
always-pull: true
workdir: "/src"
soft_fail: true
retry:
automatic:
@@ -236,15 +224,3 @@ steps:
limit: 2
- exit_status: 2
limit: 2
- wait
- label: ":docker: x86_64"
agents:
queue: "release"
branches: "shhs-*"
command:
- "docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.7.4 . -t matrixdotorg/synapse:${BUILDKITE_TAG}"
- "docker save matrixdotorg/synapse:${BUILDKITE_TAG} | gzip -9 > docker.tar.gz"
artifact_paths:
- "docker.tar.gz"

View File

@@ -0,0 +1,145 @@
#!/bin/bash
#
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
# sytest-synapse docker images.
set -ex
if [ -n "$BUILDKITE" ]
then
SYNAPSE_DIR=`pwd`
else
SYNAPSE_DIR="/src"
fi
# Attempt to find a sytest to use.
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
if [ -d "/sytest" ]; then
# If the user has mounted in a SyTest checkout, use that.
echo "Using local sytests..."
# create ourselves a working directory and dos2unix some scripts therein
mkdir -p /work/jenkins
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
dos2unix -n "/sytest/$i" "/work/$i"
done
ln -sf /sytest/tests /work
ln -sf /sytest/keys /work
SYTEST_LIB="/sytest/lib"
else
if [ -n "BUILDKITE_BRANCH" ]
then
branch_name=$BUILDKITE_BRANCH
else
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
fi
# Try and fetch the branch
echo "Trying to get same-named sytest branch..."
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
# Probably a 404, fall back to develop
echo "Using develop instead..."
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
}
mkdir -p /work
tar -C /work --strip-components=1 -xf sytest.tar.gz
SYTEST_LIB="/work/lib"
fi
cd /work
# PostgreSQL setup
if [ -n "$POSTGRES" ]
then
export PGUSER=postgres
export POSTGRES_DB_1=pg1
export POSTGRES_DB_2=pg2
# Start the database
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
jenkins/prep_sytest_for_postgres.sh
# Make the test databases for the two Synapse servers that will be spun up
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
fi
if [ -n "$OFFLINE" ]; then
# if we're in offline mode, just put synapse into the virtualenv, and
# hope that the deps are up-to-date.
#
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
# so we just run setup.py explicitly.)
#
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
else
# We've already created the virtualenv, but lets double check we have all
# deps.
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
/venv/bin/pip install -q --upgrade --no-cache-dir \
lxml psycopg2 coverage codecov tap.py
# Make sure all Perl deps are installed -- this is done in the docker build
# so will only install packages added since the last Docker build
./install-deps.pl
fi
# Run the tests
>&2 echo "+++ Running tests"
RUN_TESTS=(
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
)
TEST_STATUS=0
if [ -n "$WORKERS" ]; then
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
else
RUN_TESTS+=(-I Synapse)
fi
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
if [ $TEST_STATUS -ne 0 ]; then
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
else
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
fi
>&2 echo "--- Copying assets"
# Copy out the logs
mkdir -p /logs
cp results.tap /logs/results.tap
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
# Upload coverage to codecov and upload files, if running on Buildkite
if [ -n "$BUILDKITE" ]
then
/venv/bin/coverage combine || true
/venv/bin/coverage xml || true
/venv/bin/codecov -X gcov -f coverage.xml
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
tar xvf buildkite.tar.gz
chmod +x ./buildkite-agent
# Upload the files
./buildkite-agent artifact upload "/logs/**/*.log*"
./buildkite-agent artifact upload "/logs/results.tap"
if [ $TEST_STATUS -ne 0 ]; then
# Annotate, if failure
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
fi
fi
exit $TEST_STATUS

View File

@@ -8,13 +8,6 @@ jobs:
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadreleaseshhs:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} --build-arg PYTHON_VERSION=3.7 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
dockerhubuploadlatest:
machine: true
steps:
@@ -34,12 +27,6 @@ workflows:
only: /v[0-9].[0-9]+.[0-9]+.*/
branches:
ignore: /.*/
- dockerhubuploadreleaseshhs:
filters:
tags:
only: /shhs-v[0-9]+.[0-9]+.*/
branches:
ignore: /.*/
- dockerhubuploadlatest:
filters:
branches:

2
.gitignore vendored
View File

@@ -19,7 +19,6 @@ _trial_temp*/
/*.signing.key
/env/
/homeserver*.yaml
/logs
/media_store/
/uploads
@@ -38,3 +37,4 @@ _trial_temp*/
/docs/build/
/htmlcov
/pip-wheel-metadata/

View File

@@ -1,129 +1,3 @@
Synapse 1.2.0 (2019-07-25)
==========================
No significant changes.
Synapse 1.2.0rc2 (2019-07-24)
=============================
Bugfixes
--------
- Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. ([\#5734](https://github.com/matrix-org/synapse/issues/5734))
Synapse 1.2.0rc1 (2019-07-22)
=============================
Features
--------
- Add support for opentracing. ([\#5544](https://github.com/matrix-org/synapse/issues/5544), [\#5712](https://github.com/matrix-org/synapse/issues/5712))
- Add ability to pull all locally stored events out of synapse that a particular user can see. ([\#5589](https://github.com/matrix-org/synapse/issues/5589))
- Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. ([\#5597](https://github.com/matrix-org/synapse/issues/5597))
- Add `sender` and `origin_server_ts` fields to `m.replace`. ([\#5613](https://github.com/matrix-org/synapse/issues/5613))
- Add default push rule to ignore reactions. ([\#5623](https://github.com/matrix-org/synapse/issues/5623))
- Include the original event when asking for its relations. ([\#5626](https://github.com/matrix-org/synapse/issues/5626))
- Implement `session_lifetime` configuration option, after which access tokens will expire. ([\#5660](https://github.com/matrix-org/synapse/issues/5660))
- Return "This account has been deactivated" when a deactivated user tries to login. ([\#5674](https://github.com/matrix-org/synapse/issues/5674))
- Enable aggregations support by default ([\#5714](https://github.com/matrix-org/synapse/issues/5714))
Bugfixes
--------
- Fix 'utime went backwards' errors on daemonization. ([\#5609](https://github.com/matrix-org/synapse/issues/5609))
- Various minor fixes to the federation request rate limiter. ([\#5621](https://github.com/matrix-org/synapse/issues/5621))
- Forbid viewing relations on an event once it has been redacted. ([\#5629](https://github.com/matrix-org/synapse/issues/5629))
- Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. ([\#5638](https://github.com/matrix-org/synapse/issues/5638))
- Fix newly-registered users not being able to lookup their own profile without joining a room. ([\#5644](https://github.com/matrix-org/synapse/issues/5644))
- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654))
- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658))
- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707))
- Ignore redactions of m.room.create events. ([\#5701](https://github.com/matrix-org/synapse/issues/5701))
Updates to the Docker image
---------------------------
- Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). ([\#5619](https://github.com/matrix-org/synapse/issues/5619))
- Add missing space in default logging file format generated by the Docker image. ([\#5620](https://github.com/matrix-org/synapse/issues/5620))
Improved Documentation
----------------------
- Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! ([\#5397](https://github.com/matrix-org/synapse/issues/5397))
- --no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. ([\#5651](https://github.com/matrix-org/synapse/issues/5651))
- Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! ([\#5661](https://github.com/matrix-org/synapse/issues/5661))
- Minor tweaks to postgres documentation. ([\#5675](https://github.com/matrix-org/synapse/issues/5675))
Deprecations and Removals
-------------------------
- Remove support for the `invite_3pid_guest` configuration setting. ([\#5625](https://github.com/matrix-org/synapse/issues/5625))
Internal Changes
----------------
- Move logging code out of `synapse.util` and into `synapse.logging`. ([\#5606](https://github.com/matrix-org/synapse/issues/5606), [\#5617](https://github.com/matrix-org/synapse/issues/5617))
- Add a blacklist file to the repo to blacklist certain sytests from failing CI. ([\#5611](https://github.com/matrix-org/synapse/issues/5611))
- Make runtime errors surrounding password reset emails much clearer. ([\#5616](https://github.com/matrix-org/synapse/issues/5616))
- Remove dead code for persiting outgoing federation transactions. ([\#5622](https://github.com/matrix-org/synapse/issues/5622))
- Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. ([\#5627](https://github.com/matrix-org/synapse/issues/5627))
- Move RegistrationHandler.get_or_create_user to test code. ([\#5628](https://github.com/matrix-org/synapse/issues/5628))
- Add some more common python virtual-environment paths to the black exclusion list. ([\#5630](https://github.com/matrix-org/synapse/issues/5630))
- Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. ([\#5636](https://github.com/matrix-org/synapse/issues/5636))
- Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637))
- Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639))
- Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640))
- Remove access-token support from `RegistrationHandler.register`, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641))
- Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642))
- Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643))
- Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645))
- Fix a small typo in a code comment. ([\#5655](https://github.com/matrix-org/synapse/issues/5655))
- Clean up exception handling around client access tokens. ([\#5656](https://github.com/matrix-org/synapse/issues/5656))
- Add a mechanism for per-test homeserver configuration in the unit tests. ([\#5657](https://github.com/matrix-org/synapse/issues/5657))
- Inline issue_access_token. ([\#5659](https://github.com/matrix-org/synapse/issues/5659))
- Update the sytest BuildKite configuration to checkout Synapse in `/src`. ([\#5664](https://github.com/matrix-org/synapse/issues/5664))
- Add a `docker` type to the towncrier configuration. ([\#5673](https://github.com/matrix-org/synapse/issues/5673))
- Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. ([\#5689](https://github.com/matrix-org/synapse/issues/5689))
- Documentation for opentracing. ([\#5703](https://github.com/matrix-org/synapse/issues/5703))
Synapse 1.1.0 (2019-07-04)
==========================
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
This release also deprecates the use of environment variables to configure the
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
for more details.
No changes since 1.1.0rc2.
Synapse 1.1.0rc2 (2019-07-03)
=============================
Bugfixes
--------
- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593))
- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596))
- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605))
Internal Changes
----------------
- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552))
Synapse 1.1.0rc1 (2019-07-02)
=============================

View File

@@ -30,10 +30,11 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
continuous integration. Buildkite builds need to be authorised by a
maintainer. If your change breaks the build, this will be shown in GitHub, so
please keep an eye on the pull request for feedback.
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite
<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration.
Buildkite builds need to be authorised by a maintainer. If your change breaks
the build, this will be shown in GitHub, so please keep an eye on the pull
request for feedback.
To run unit tests in a local development environment, you can use:
@@ -69,21 +70,13 @@ All changes, even minor ones, need a corresponding changelog / newsfragment
entry. These are managed by Towncrier
(https://github.com/hawkowl/towncrier).
To create a changelog entry, make a new file in the ``changelog.d`` file named
in the format of ``PRnumber.type``. The type can be one of the following:
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes).
* ``feature``.
* ``bugfix``.
* ``docker`` (for updates to the Docker image).
* ``doc`` (for updates to the documentation).
* ``removal`` (also used for deprecations).
* ``misc`` (for internal-only changes).
The content of the file is your changelog entry, which should be a short
description of your change in the same style as the rest of our `changelog
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
contain Markdown formatting, and should end with a full stop ('.') for
consistency.
The content of the file is your changelog entry, which can contain Markdown
formatting. The entry should end with a full stop ('.') for consistency.
Adding credits to the changelog is encouraged, we value your
contributions and would like to have you shouted out in the release notes!

View File

@@ -7,7 +7,6 @@ include demo/README
include demo/demo.tls.dh
include demo/*.py
include demo/*.sh
include sytest-blacklist
recursive-include synapse/storage/schema *.sql
recursive-include synapse/storage/schema *.sql.postgres

View File

@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
virtualenv -p python3 env
source env/bin/activate
python -m pip install --no-use-pep517 -e .[all]
python -m pip install --no-pep-517 -e .[all]
This will run a process of downloading and installing all the needed
dependencies into a virtual env.

View File

@@ -49,13 +49,6 @@ returned by the Client-Server API:
# configured on port 443.
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
Upgrading to v1.2.0
===================
Some counter metrics have been renamed, with the old names deprecated. See
`the metrics documentation <docs/metrics-howto.rst#renaming-of-metrics--deprecation-of-old-names-in-12>`_
for details.
Upgrading to v1.1.0
===================

View File

@@ -1 +0,0 @@
Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events). This option can be used to prevent adverse performance on resource-constrained homeservers.

View File

@@ -1 +0,0 @@
Python 2 has been removed from the CI.

1
changelog.d/5552.misc Normal file
View File

@@ -0,0 +1 @@
Update github templates.

1
changelog.d/5596.bugfix Normal file
View File

@@ -0,0 +1 @@
Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. Users are advised to remove `SYNAPSE_SMTP_HOST`, `SYNAPSE_SMTP_PORT`, `SYNAPSE_SMTP_USER`, `SYNAPSE_SMTP_PASSWORD` and `SYNAPSE_SMTP_FROM` environment variables from their docker run commands.

View File

@@ -1,7 +1,7 @@
# Example log_config file for synapse. To enable, point `log_config` to it in
# Example log_config file for synapse. To enable, point `log_config` to it in
# `homeserver.yaml`, and restart synapse.
#
# This configuration will produce similar results to the defaults within
# This configuration will produce similar results to the defaults within
# synapse, but can be edited to give more flexibility.
version: 1
@@ -12,7 +12,7 @@ formatters:
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
@@ -35,7 +35,7 @@ handlers:
root:
level: INFO
handlers: [console] # to use file handler instead, switch to [file]
loggers:
synapse:
level: INFO

View File

@@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid
from synapse.app.homeserver import SynapseHomeServer
# from synapse.logging.utils import log_function
# from synapse.util.logutils import log_function
from twisted.internet import reactor, defer
from twisted.python import log

View File

@@ -8,7 +8,7 @@ formatters:
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:

20
debian/changelog vendored
View File

@@ -1,25 +1,9 @@
matrix-synapse-py3 (1.2.0) stable; urgency=medium
[ Amber Brown ]
* Update logging config defaults to match API changes in Synapse.
[ Richard van der Hoff ]
* Add Recommends and Depends for some libraries which you probably want.
[ Synapse Packaging team ]
* New synapse release 1.2.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 25 Jul 2019 14:10:07 +0100
matrix-synapse-py3 (1.1.0) stable; urgency=medium
matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium
[ Silke Hofstra ]
* Include systemd-python to allow logging to the systemd journal.
[ Synapse Packaging team ]
* New synapse release 1.1.0.
-- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100
-- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200
matrix-synapse-py3 (1.0.0) stable; urgency=medium

7
debian/control vendored
View File

@@ -2,20 +2,16 @@ Source: matrix-synapse-py3
Section: contrib/python
Priority: extra
Maintainer: Synapse Packaging team <packages@matrix.org>
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
Build-Depends:
debhelper (>= 9),
dh-systemd,
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,
lsb-release,
python3-dev,
python3,
python3-setuptools,
python3-pip,
python3-venv,
libsqlite3-dev,
tar,
Standards-Version: 3.9.8
Homepage: https://github.com/matrix-org/synapse
@@ -32,12 +28,9 @@ Depends:
debconf,
python3-distutils|libpython3-stdlib (<< 3.6),
${misc:Depends},
${shlibs:Depends},
${synapse:pydepends},
# some of our scripts use perl, but none of them are important,
# so we put perl:Depends in Suggests rather than Depends.
Recommends:
${shlibs1:Recommends},
Suggests:
sqlite3,
${perl:Depends},

2
debian/log.yaml vendored
View File

@@ -7,7 +7,7 @@ formatters:
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:

14
debian/rules vendored
View File

@@ -3,29 +3,15 @@
# Build Debian package using https://github.com/spotify/dh-virtualenv
#
# assume we only have one package
PACKAGE_NAME:=`dh_listpackages`
override_dh_systemd_enable:
dh_systemd_enable --name=matrix-synapse
override_dh_installinit:
dh_installinit --name=matrix-synapse
# we don't really want to strip the symbols from our object files.
override_dh_strip:
override_dh_shlibdeps:
# make the postgres package's dependencies a recommendation
# rather than a hard dependency.
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
-pshlibs1 -dRecommends
# all the other dependencies can be normal 'Depends' requirements,
# except for PIL's, which is self-contained and which confuses
# dpkg-shlibdeps.
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
override_dh_virtualenv:
./debian/build_virtualenv

View File

@@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
###
### Stage 0: builder
###
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
# install the OS build deps
@@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
### Stage 1: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
# xmlsec is required for saml support
RUN apk add --no-cache --virtual .runtime_deps \

View File

@@ -43,9 +43,6 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
FROM ${distro}
# Install the build dependencies
#
# NB: keep this list in sync with the list of build-deps in debian/control
# TODO: it would be nice to do that automatically.
RUN apt-get update -qq -o Acquire::Languages=none \
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \

View File

@@ -2,11 +2,11 @@ version: 1
formatters:
precise:
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:

View File

@@ -1,4 +1,4 @@
Log Contexts
Log contexts
============
.. contents::
@@ -12,7 +12,7 @@ record.
Logcontexts are also used for CPU and database accounting, so that we can track
which requests were responsible for high CPU use or database activity.
The ``synapse.logging.context`` module provides a facilities for managing the
The ``synapse.util.logcontext`` module provides a facilities for managing the
current log context (as well as providing the ``LoggingContextFilter`` class).
Deferreds make the whole thing complicated, so this document describes how it
@@ -27,19 +27,19 @@ found them:
.. code:: python
from synapse.logging import context # omitted from future snippets
from synapse.util import logcontext # omitted from future snippets
def handle_request(request_id):
request_context = context.LoggingContext()
request_context = logcontext.LoggingContext()
calling_context = context.LoggingContext.current_context()
context.LoggingContext.set_current_context(request_context)
calling_context = logcontext.LoggingContext.current_context()
logcontext.LoggingContext.set_current_context(request_context)
try:
request_context.request = request_id
do_request_handling()
logger.debug("finished")
finally:
context.LoggingContext.set_current_context(calling_context)
logcontext.LoggingContext.set_current_context(calling_context)
def do_request_handling():
logger.debug("phew") # this will be logged against request_id
@@ -51,7 +51,7 @@ written much more succinctly as:
.. code:: python
def handle_request(request_id):
with context.LoggingContext() as request_context:
with logcontext.LoggingContext() as request_context:
request_context.request = request_id
do_request_handling()
logger.debug("finished")
@@ -74,7 +74,7 @@ blocking operation, and returns a deferred:
@defer.inlineCallbacks
def handle_request(request_id):
with context.LoggingContext() as request_context:
with logcontext.LoggingContext() as request_context:
request_context.request = request_id
yield do_request_handling()
logger.debug("finished")
@@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from
external code. We need to make it follow our rules.
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
which returns a deferred which will run its callbacks after a given number of
seconds. That might look like:
@@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with
This technique works equally for external functions which return deferreds,
or deferreds we have made ourselves.
You can also use ``context.make_deferred_yieldable``, which just does the
You can also use ``logcontext.make_deferred_yieldable``, which just does the
boilerplate for you, so the above could be written:
.. code:: python
def sleep(seconds):
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
Fire-and-forget
@@ -279,7 +279,7 @@ Obviously that option means that the operations done in
that might be fixed by setting a different logcontext via a ``with
LoggingContext(...)`` in ``background_operation``).
The second option is to use ``context.run_in_background``, which wraps a
The second option is to use ``logcontext.run_in_background``, which wraps a
function so that it doesn't reset the logcontext even when it returns an
incomplete deferred, and adds a callback to the returned deferred to reset the
logcontext. In other words, it turns a function that follows the Synapse rules
@@ -293,7 +293,7 @@ It can be used like this:
def do_request_handling():
yield foreground_operation()
context.run_in_background(background_operation)
logcontext.run_in_background(background_operation)
# this will now be logged against the request context
logger.debug("Request handling complete")
@@ -332,7 +332,7 @@ gathered:
result = yield defer.gatherResults([d1, d2])
In this case particularly, though, option two, of using
``context.preserve_fn`` almost certainly makes more sense, so that
``logcontext.preserve_fn`` almost certainly makes more sense, so that
``operation1`` and ``operation2`` are both logged against the original
logcontext. This looks like:
@@ -340,8 +340,8 @@ logcontext. This looks like:
@defer.inlineCallbacks
def do_request_handling():
d1 = context.preserve_fn(operation1)()
d2 = context.preserve_fn(operation2)()
d1 = logcontext.preserve_fn(operation1)()
d2 = logcontext.preserve_fn(operation2)()
with PreserveLoggingContext():
result = yield defer.gatherResults([d1, d2])
@@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it:
.. code:: python
def handle_request(request_id):
with context.LoggingContext() as request_context:
with logcontext.LoggingContext() as request_context:
request_context.request = request_id
d = do_request_handling()
@@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy.
The business of a Deferred which runs its callbacks in the original logcontext
isn't hard to achieve — we have it today, in the shape of
``context._PreservingContextDeferred``:
``logcontext._PreservingContextDeferred``:
.. code:: python

View File

@@ -59,108 +59,6 @@ How to monitor Synapse metrics using Prometheus
Restart Prometheus.
Renaming of metrics & deprecation of old names in 1.2
-----------------------------------------------------
Synapse 1.2 updates the Prometheus metrics to match the naming convention of the
upstream ``prometheus_client``. The old names are considered deprecated and will
be removed in a future version of Synapse.
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| New Name | Old Name |
+=============================================================================+=======================================================================+
| python_gc_objects_collected_total | python_gc_objects_collected |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| python_gc_collections_total | python_gc_collections |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| process_cpu_seconds_total | process_cpu_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_background_process_start_count_total | synapse_background_process_start_count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
---------------------------------------------------------------------------------

View File

@@ -1,100 +0,0 @@
===========
OpenTracing
===========
Background
----------
OpenTracing is a semi-standard being adopted by a number of distributed tracing
platforms. It is a common api for facilitating vendor-agnostic tracing
instrumentation. That is, we can use the OpenTracing api and select one of a
number of tracer implementations to do the heavy lifting in the background.
Our current selected implementation is Jaeger.
OpenTracing is a tool which gives an insight into the causal relationship of
work done in and between servers. The servers each track events and report them
to a centralised server - in Synapse's case: Jaeger. The basic unit used to
represent events is the span. The span roughly represents a single piece of work
that was done and the time at which it occurred. A span can have child spans,
meaning that the work of the child had to be completed for the parent span to
complete, or it can have follow-on spans which represent work that is undertaken
as a result of the parent but is not depended on by the parent to in order to
finish.
Since this is undertaken in a distributed environment a request to another
server, such as an RPC or a simple GET, can be considered a span (a unit or
work) for the local server. This causal link is what OpenTracing aims to
capture and visualise. In order to do this metadata about the local server's
span, i.e the 'span context', needs to be included with the request to the
remote.
It is up to the remote server to decide what it does with the spans
it creates. This is called the sampling policy and it can be configured
through Jaeger's settings.
For OpenTracing concepts see
https://opentracing.io/docs/overview/what-is-tracing/.
For more information about Jaeger's implementation see
https://www.jaegertracing.io/docs/
=====================
Seting up OpenTracing
=====================
To receive OpenTracing spans, start up a Jaeger server. This can be done
using docker like so:
.. code-block:: bash
docker run -d --name jaeger
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 14268:14268 \
jaegertracing/all-in-one:1.13
Latest documentation is probably at
https://www.jaegertracing.io/docs/1.13/getting-started/
Enable OpenTracing in Synapse
-----------------------------
OpenTracing is not enabled by default. It must be enabled in the homeserver
config by uncommenting the config options under ``opentracing`` as shown in
the `sample config <./sample_config.yaml>`_. For example:
.. code-block:: yaml
opentracing:
tracer_enabled: true
homeserver_whitelist:
- "mytrustedhomeserver.org"
- "*.myotherhomeservers.com"
Homeserver whitelisting
-----------------------
The homeserver whitelist is configured using regular expressions. A list of regular
expressions can be given and their union will be compared when propagating any
spans contexts to another homeserver.
Though it's mostly safe to send and receive span contexts to and from
untrusted users since span contexts are usually opaque ids it can lead to
two problems, namely:
- If the span context is marked as sampled by the sending homeserver the receiver will
sample it. Therefore two homeservers with wildly different sampling policies
could incur higher sampling counts than intended.
- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse
but that doesn't prevent another server sending you baggage which will be logged
to OpenTracing's logs.
==================
Configuring Jaeger
==================
Sampling strategies can be set as in this document:
https://www.jaegertracing.io/docs/1.13/sampling/

View File

@@ -11,9 +11,7 @@ a postgres database.
* If you are using the `matrix.org debian/ubuntu
packages <../INSTALL.md#matrixorg-packages>`_,
the necessary python library will already be installed, but you will need to
ensure the low-level postgres library is installed, which you can do with
``apt install libpq5``.
the necessary libraries will already be installed.
* For other pre-built packages, please consult the documentation from the
relevant package.
@@ -36,14 +34,9 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user
su - postgres
createuser --pwprompt synapse_user
Before you can authenticate with the ``synapse_user``, you must create a
database that it can access. To create a database, first connect to the database
with your database user::
su - postgres
psql
and then run::
The PostgreSQL database used *must* have the correct encoding set, otherwise it
would not be able to store UTF8 strings. To create a database with the correct
encoding use, e.g.::
CREATE DATABASE synapse
ENCODING 'UTF8'
@@ -53,13 +46,7 @@ and then run::
OWNER synapse_user;
This would create an appropriate database named ``synapse`` owned by the
``synapse_user`` user (which must already have been created as above).
Note that the PostgreSQL database *must* have the correct encoding set (as
shown above), otherwise it will not be able to store UTF8 strings.
You may need to enable password authentication so ``synapse_user`` can connect
to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html.
``synapse_user`` user (which must already exist).
Tuning Postgres
===============

View File

@@ -48,8 +48,6 @@ Let's assume that we expect clients to connect to our server at
proxy_set_header X-Forwarded-For $remote_addr;
}
}
Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI.
* Caddy::

View File

@@ -278,17 +278,6 @@ listeners:
# Used by phonehome stats to group together related servers.
#server_context: context
# Resource-constrained Homeserver Settings
#
# If limit_large_remote_room_joins is True, the room complexity will be
# checked before a user joins a new remote room. If it is above
# limit_large_remote_room_complexity, it will disallow joining or
# instantly leave.
#
# Uncomment the below lines to enable:
#limit_large_remote_room_joins: True
#limit_large_remote_room_complexity: 1.0
# Whether to require a user to be in the room to add an alias to it.
# Defaults to 'true'.
#
@@ -797,17 +786,6 @@ uploads_path: "DATADIR/uploads"
# renew_at: 1w
# renew_email_subject: "Renew your %(app)s account"
# Time that a user's session remains valid for, after they log in.
#
# Note that this is not currently compatible with guest logins.
#
# Note also that this is calculated at login time: changes are not applied
# retrospectively to users who have already logged in.
#
# By default, this is infinite.
#
#session_lifetime: 24h
# The user must provide all of the below types of 3PID when registering.
#
#registrations_require_3pid:
@@ -1417,27 +1395,3 @@ password_config:
# module: "my_custom_project.SuperRulesSet"
# config:
# example_option: 'things'
## Opentracing ##
# These settings enable opentracing, which implements distributed tracing.
# This allows you to observe the causal chains of events across servers
# including requests, key lookups etc., across any server running
# synapse or any other other services which supports opentracing
# (specifically those implemented with Jaeger).
#
opentracing:
# tracing is disabled by default. Uncomment the following line to enable it.
#
#enabled: true
# The list of homeservers we wish to send and receive span contexts and span baggage.
# See docs/opentracing.rst
# This is a list of regexes which are matched against the server_name of the
# homeserver.
#
# By defult, it is empty, so no servers are matched.
#
#homeserver_whitelist:
# - ".*"

View File

@@ -14,11 +14,6 @@
name = "Bugfixes"
showcontent = true
[[tool.towncrier.type]]
directory = "docker"
name = "Updates to the Docker image"
showcontent = true
[[tool.towncrier.type]]
directory = "doc"
name = "Improved Documentation"
@@ -44,8 +39,6 @@ exclude = '''
| \.git # root of the project
| \.tox
| \.venv
| \.env
| env
| _build
| _trial_temp.*
| build

View File

@@ -1,12 +0,0 @@
#!/bin/sh
#
# Runs linting scripts over the local Synapse checkout
# isort - sorts import statements
# flake8 - lints and finds mistakes
# black - opinionated code formatter
set -e
isort -y -rc synapse tests scripts-dev scripts
flake8 synapse tests
python3 -m black synapse tests scripts-dev scripts

View File

@@ -35,4 +35,4 @@ try:
except ImportError:
pass
__version__ = "1.2.0"
__version__ = "1.1.0rc1"

View File

@@ -25,13 +25,7 @@ from twisted.internet import defer
import synapse.types
from synapse import event_auth
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.errors import (
AuthError,
Codes,
InvalidClientTokenError,
MissingClientTokenError,
ResourceLimitError,
)
from synapse.api.errors import AuthError, Codes, ResourceLimitError
from synapse.config.server import is_threepid_reserved
from synapse.types import UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
@@ -69,6 +63,7 @@ class Auth(object):
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
register_cache("cache", "token_cache", self.token_cache)
@@ -194,17 +189,18 @@ class Auth(object):
Returns:
defer.Deferred: resolves to a ``synapse.types.Requester`` object
Raises:
InvalidClientCredentialsError if no user by that token exists or the token
is invalid.
AuthError if access is denied for the user in the access token
AuthError if no user by that token exists or the token is invalid.
"""
# Can optionally look elsewhere in the request (e.g. headers)
try:
ip_addr = self.hs.get_ip_from_request(request)
user_agent = request.requestHeaders.getRawHeaders(
b"User-Agent", default=[b""]
)[0].decode("ascii", "surrogateescape")
access_token = self.get_access_token_from_request(request)
access_token = self.get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
user_id, app_service = yield self._get_appservice_user_id(request)
if user_id:
@@ -268,12 +264,18 @@ class Auth(object):
)
)
except KeyError:
raise MissingClientTokenError()
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Missing access token.",
errcode=Codes.MISSING_TOKEN,
)
@defer.inlineCallbacks
def _get_appservice_user_id(self, request):
app_service = self.store.get_app_service_by_token(
self.get_access_token_from_request(request)
self.get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
)
if app_service is None:
defer.returnValue((None, None))
@@ -311,25 +313,13 @@ class Auth(object):
`token_id` (int|None): access token id. May be None if guest
`device_id` (str|None): device corresponding to access token
Raises:
InvalidClientCredentialsError if no user by that token exists or the token
is invalid.
AuthError if no user by that token exists or the token is invalid.
"""
if rights == "access":
# first look in the database
r = yield self._look_up_user_by_access_token(token)
if r:
valid_until_ms = r["valid_until_ms"]
if (
valid_until_ms is not None
and valid_until_ms < self.clock.time_msec()
):
# there was a valid access token, but it has expired.
# soft-logout the user.
raise InvalidClientTokenError(
msg="Access token has expired", soft_logout=True
)
defer.returnValue(r)
# otherwise it needs to be a valid macaroon
@@ -341,7 +331,11 @@ class Auth(object):
if not guest:
# non-guest access tokens must be in the database
logger.warning("Unrecognised access token - not in store.")
raise InvalidClientTokenError()
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN,
)
# Guest access tokens are not stored in the database (there can
# only be one access token per guest, anyway).
@@ -356,10 +350,16 @@ class Auth(object):
# guest tokens.
stored_user = yield self.store.get_user_by_id(user_id)
if not stored_user:
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unknown user_id %s" % user_id,
errcode=Codes.UNKNOWN_TOKEN,
)
if not stored_user["is_guest"]:
raise InvalidClientTokenError(
"Guest access token used for regular user"
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Guest access token used for regular user",
errcode=Codes.UNKNOWN_TOKEN,
)
ret = {
"user": user,
@@ -386,7 +386,11 @@ class Auth(object):
ValueError,
) as e:
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
raise InvalidClientTokenError("Invalid macaroon passed.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Invalid macaroon passed.",
errcode=Codes.UNKNOWN_TOKEN,
)
def _parse_and_validate_macaroon(self, token, rights="access"):
"""Takes a macaroon and tries to parse and validate it. This is cached
@@ -426,7 +430,11 @@ class Auth(object):
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
)
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
raise InvalidClientTokenError("Invalid macaroon passed.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Invalid macaroon passed.",
errcode=Codes.UNKNOWN_TOKEN,
)
if not has_expiry and rights == "access":
self.token_cache[token] = (user_id, guest)
@@ -445,14 +453,17 @@ class Auth(object):
(str) user id
Raises:
InvalidClientCredentialsError if there is no user_id caveat in the
macaroon
AuthError if there is no user_id caveat in the macaroon
"""
user_prefix = "user_id = "
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(user_prefix):
return caveat.caveat_id[len(user_prefix) :]
raise InvalidClientTokenError("No user caveat in macaroon")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"No user caveat in macaroon",
errcode=Codes.UNKNOWN_TOKEN,
)
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
"""
@@ -516,18 +527,26 @@ class Auth(object):
"token_id": ret.get("token_id", None),
"is_guest": False,
"device_id": ret.get("device_id"),
"valid_until_ms": ret.get("valid_until_ms"),
}
defer.returnValue(user_info)
def get_appservice_by_req(self, request):
token = self.get_access_token_from_request(request)
service = self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token.")
raise InvalidClientTokenError()
request.authenticated_entity = service.sender
return defer.succeed(service)
try:
token = self.get_access_token_from_request(
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
)
service = self.store.get_app_service_by_token(token)
if not service:
logger.warn("Unrecognised appservice access token.")
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS,
"Unrecognised access token.",
errcode=Codes.UNKNOWN_TOKEN,
)
request.authenticated_entity = service.sender
return defer.succeed(service)
except KeyError:
raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.")
def is_server_admin(self, user):
""" Check if the given user is a local server admin.
@@ -606,6 +625,21 @@ class Auth(object):
defer.returnValue(auth_ids)
def check_redaction(self, room_version, event, auth_events):
"""Check whether the event sender is allowed to redact the target event.
Returns:
True if the the sender is allowed to redact the target event if the
target event was created by them.
False if the sender is allowed to redact the target event with no
further checks.
Raises:
AuthError if the event sender is definitely not allowed to redact
the target event.
"""
return event_auth.check_redaction(room_version, event, auth_events)
@defer.inlineCallbacks
def check_can_change_room_list(self, room_id, user):
"""Check if the user is allowed to edit the room's entry in the
@@ -658,16 +692,20 @@ class Auth(object):
return bool(query_params) or bool(auth_headers)
@staticmethod
def get_access_token_from_request(request):
def get_access_token_from_request(request, token_not_found_http_status=401):
"""Extracts the access_token from the request.
Args:
request: The http request.
token_not_found_http_status(int): The HTTP status code to set in the
AuthError if the token isn't found. This is used in some of the
legacy APIs to change the status code to 403 from the default of
401 since some of the old clients depended on auth errors returning
403.
Returns:
unicode: The access_token
Raises:
MissingClientTokenError: If there isn't a single access_token in the
request
AuthError: If there isn't an access_token in the request.
"""
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
@@ -676,20 +714,34 @@ class Auth(object):
# Try the get the access_token from a "Authorization: Bearer"
# header
if query_params is not None:
raise MissingClientTokenError(
"Mixing Authorization headers and access_token query parameters."
raise AuthError(
token_not_found_http_status,
"Mixing Authorization headers and access_token query parameters.",
errcode=Codes.MISSING_TOKEN,
)
if len(auth_headers) > 1:
raise MissingClientTokenError("Too many Authorization headers.")
raise AuthError(
token_not_found_http_status,
"Too many Authorization headers.",
errcode=Codes.MISSING_TOKEN,
)
parts = auth_headers[0].split(b" ")
if parts[0] == b"Bearer" and len(parts) == 2:
return parts[1].decode("ascii")
else:
raise MissingClientTokenError("Invalid Authorization header.")
raise AuthError(
token_not_found_http_status,
"Invalid Authorization header.",
errcode=Codes.MISSING_TOKEN,
)
else:
# Try to get the access_token from the query params.
if not query_params:
raise MissingClientTokenError()
raise AuthError(
token_not_found_http_status,
"Missing access token.",
errcode=Codes.MISSING_TOKEN,
)
return query_params[0].decode("ascii")

View File

@@ -139,22 +139,6 @@ class ConsentNotGivenError(SynapseError):
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
class UserDeactivatedError(SynapseError):
"""The error returned to the client when the user attempted to access an
authenticated endpoint, but the account has been deactivated.
"""
def __init__(self, msg):
"""Constructs a UserDeactivatedError
Args:
msg (str): The human-readable error message
"""
super(UserDeactivatedError, self).__init__(
code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN
)
class RegistrationError(SynapseError):
"""An error raised when a registration event fails."""
@@ -226,9 +210,7 @@ class NotFoundError(SynapseError):
class AuthError(SynapseError):
"""An error raised when there was a problem authorising an event, and at various
other poorly-defined times.
"""
"""An error raised when there was a problem authorising an event."""
def __init__(self, *args, **kwargs):
if "errcode" not in kwargs:
@@ -236,41 +218,6 @@ class AuthError(SynapseError):
super(AuthError, self).__init__(*args, **kwargs)
class InvalidClientCredentialsError(SynapseError):
"""An error raised when there was a problem with the authorisation credentials
in a client request.
https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens:
When credentials are required but missing or invalid, the HTTP call will
return with a status of 401 and the error code, M_MISSING_TOKEN or
M_UNKNOWN_TOKEN respectively.
"""
def __init__(self, msg, errcode):
super().__init__(code=401, msg=msg, errcode=errcode)
class MissingClientTokenError(InvalidClientCredentialsError):
"""Raised when we couldn't find the access token in a request"""
def __init__(self, msg="Missing access token"):
super().__init__(msg=msg, errcode="M_MISSING_TOKEN")
class InvalidClientTokenError(InvalidClientCredentialsError):
"""Raised when we didn't understand the access token in a request"""
def __init__(self, msg="Unrecognised access token", soft_logout=False):
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
self._soft_logout = soft_logout
def error_dict(self):
d = super().error_dict()
d["soft_logout"] = self._soft_logout
return d
class ResourceLimitError(SynapseError):
"""
Any error raised when there is a problem with resource usage.

View File

@@ -27,7 +27,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory
import synapse
from synapse.app import check_bind_error
from synapse.crypto import context_factory
from synapse.logging.context import PreserveLoggingContext
from synapse.util import PreserveLoggingContext
from synapse.util.async_helpers import Linearizer
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
@@ -48,7 +48,7 @@ def register_sighup(func):
_sighup_callbacks.append(func)
def start_worker_reactor(appname, config, run_command=reactor.run):
def start_worker_reactor(appname, config):
""" Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
@@ -57,7 +57,6 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
Args:
appname (str): application name which will be sent to syslog
config (synapse.config.Config): config object
run_command (Callable[]): callable that actually runs the reactor
"""
logger = logging.getLogger(config.worker_app)
@@ -70,19 +69,11 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
daemonize=config.worker_daemonize,
print_pidfile=config.print_pidfile,
logger=logger,
run_command=run_command,
)
def start_reactor(
appname,
soft_file_limit,
gc_thresholds,
pid_file,
daemonize,
print_pidfile,
logger,
run_command=reactor.run,
appname, soft_file_limit, gc_thresholds, pid_file, daemonize, print_pidfile, logger
):
""" Run the reactor in the main process
@@ -97,42 +88,38 @@ def start_reactor(
daemonize (bool): true to run the reactor in a background process
print_pidfile (bool): whether to print the pid file, if daemonize is True
logger (logging.Logger): logger instance to pass to Daemonize
run_command (Callable[]): callable that actually runs the reactor
"""
install_dns_limiter(reactor)
def run():
logger.info("Running")
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
run_command()
# make sure that we run the reactor with the sentinel log context,
# otherwise other PreserveLoggingContext instances will get confused
# and complain when they see the logcontext arbitrarily swapping
# between the sentinel and `run` logcontexts.
with PreserveLoggingContext():
logger.info("Running")
# make sure that we run the reactor with the sentinel log context,
# otherwise other PreserveLoggingContext instances will get confused
# and complain when they see the logcontext arbitrarily swapping
# between the sentinel and `run` logcontexts.
#
# We also need to drop the logcontext before forking if we're daemonizing,
# otherwise the cputime metrics get confused about the per-thread resource usage
# appearing to go backwards.
with PreserveLoggingContext():
if daemonize:
if print_pidfile:
print(pid_file)
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
reactor.run()
daemon = Daemonize(
app=appname,
pid=pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
if daemonize:
if print_pidfile:
print(pid_file)
daemon = Daemonize(
app=appname,
pid=pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
def quit_with_error(error_string):
@@ -149,7 +136,8 @@ def listen_metrics(bind_addresses, port):
"""
Start Prometheus metrics server.
"""
from synapse.metrics import RegistryProxy, start_http_server
from synapse.metrics import RegistryProxy
from prometheus_client import start_http_server
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
@@ -252,9 +240,6 @@ def start(hs, listeners=None):
# Load the certificate from disk.
refresh_certificate(hs)
# Start the tracer
synapse.logging.opentracing.init_tracer(hs.config)
# It is now safe to start your Synapse.
hs.start_listening(listeners)
hs.get_datastore().start_profiling()

View File

@@ -1,264 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import tempfile
from canonicaljson import json
from twisted.internet import defer, task
import synapse
from synapse.app import _base
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.handlers.admin import ExfiltrationWriter
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.logcontext import LoggingContext
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
SlavedReceiptsStore,
SlavedAccountDataStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedFilteringStore,
SlavedPresenceStore,
SlavedGroupServerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedClientIpStore,
RoomStore,
BaseSlavedStore,
):
pass
class AdminCmdServer(HomeServer):
DATASTORE_CLASS = AdminCmdSlavedStore
def _listen_http(self, listener_config):
pass
def start_listening(self, listeners):
pass
def build_tcp_replication(self):
return AdminCmdReplicationHandler(self)
class AdminCmdReplicationHandler(ReplicationClientHandler):
@defer.inlineCallbacks
def on_rdata(self, stream_name, token, rows):
pass
def get_streams_to_replicate(self):
return {}
@defer.inlineCallbacks
def export_data_command(hs, args):
"""Export data for a user.
Args:
hs (HomeServer)
args (argparse.Namespace)
"""
user_id = args.user_id
directory = args.output_directory
res = yield hs.get_handlers().admin_handler.export_user_data(
user_id, FileExfiltrationWriter(user_id, directory=directory)
)
print(res)
class FileExfiltrationWriter(ExfiltrationWriter):
"""An ExfiltrationWriter that writes the users data to a directory.
Returns the directory location on completion.
Note: This writes to disk on the main reactor thread.
Args:
user_id (str): The user whose data is being exfiltrated.
directory (str|None): The directory to write the data to, if None then
will write to a temporary directory.
"""
def __init__(self, user_id, directory=None):
self.user_id = user_id
if directory:
self.base_directory = directory
else:
self.base_directory = tempfile.mkdtemp(
prefix="synapse-exfiltrate__%s__" % (user_id,)
)
os.makedirs(self.base_directory, exist_ok=True)
if list(os.listdir(self.base_directory)):
raise Exception("Directory must be empty")
def write_events(self, room_id, events):
room_directory = os.path.join(self.base_directory, "rooms", room_id)
os.makedirs(room_directory, exist_ok=True)
events_file = os.path.join(room_directory, "events")
with open(events_file, "a") as f:
for event in events:
print(json.dumps(event.get_pdu_json()), file=f)
def write_state(self, room_id, event_id, state):
room_directory = os.path.join(self.base_directory, "rooms", room_id)
state_directory = os.path.join(room_directory, "state")
os.makedirs(state_directory, exist_ok=True)
event_file = os.path.join(state_directory, event_id)
with open(event_file, "a") as f:
for event in state.values():
print(json.dumps(event.get_pdu_json()), file=f)
def write_invite(self, room_id, event, state):
self.write_events(room_id, [event])
# We write the invite state somewhere else as they aren't full events
# and are only a subset of the state at the event.
room_directory = os.path.join(self.base_directory, "rooms", room_id)
os.makedirs(room_directory, exist_ok=True)
invite_state = os.path.join(room_directory, "invite_state")
with open(invite_state, "a") as f:
for event in state.values():
print(json.dumps(event), file=f)
def finished(self):
return self.base_directory
def start(config_options):
parser = argparse.ArgumentParser(description="Synapse Admin Command")
HomeServerConfig.add_arguments_to_parser(parser)
subparser = parser.add_subparsers(
title="Admin Commands",
required=True,
dest="command",
metavar="<admin_command>",
help="The admin command to perform.",
)
export_data_parser = subparser.add_parser(
"export-data", help="Export all data for a user"
)
export_data_parser.add_argument("user_id", help="User to extra data from")
export_data_parser.add_argument(
"--output-directory",
action="store",
metavar="DIRECTORY",
required=False,
help="The directory to store the exported data in. Must be empty. Defaults"
" to creating a temp directory.",
)
export_data_parser.set_defaults(func=export_data_command)
try:
config, args = HomeServerConfig.load_config_with_parser(parser, config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
if config.worker_app is not None:
assert config.worker_app == "synapse.app.admin_cmd"
# Update the config with some basic overrides so that don't have to specify
# a full worker config.
config.worker_app = "synapse.app.admin_cmd"
if (
not config.worker_daemonize
and not config.worker_log_file
and not config.worker_log_config
):
# Since we're meant to be run as a "command" let's not redirect stdio
# unless we've actually set log config.
config.no_redirect_stdio = True
# Explicitly disable background processes
config.update_user_directory = False
config.start_pushers = False
config.send_federation = False
setup_logging(config, use_worker_options=True)
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
database_engine = create_engine(config.database_config)
ss = AdminCmdServer(
config.server_name,
db_config=config.database_config,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
# We use task.react as the basic run command as it correctly handles tearing
# down the reactor when the deferreds resolve and setting the return value.
# We also make sure that `_base.start` gets run before we actually run the
# command.
@defer.inlineCallbacks
def run(_reactor):
with LoggingContext("command"):
yield _base.start(ss, [])
yield args.func(ss, args)
_base.start_worker_reactor(
"synapse-admin-cmd", config, run_command=lambda: task.react(run)
)
if __name__ == "__main__":
with LoggingContext("main"):
start(sys.argv[1:])

View File

@@ -26,8 +26,8 @@ from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -36,6 +36,7 @@ from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -27,8 +27,8 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -64,6 +64,7 @@ from synapse.rest.client.versions import VersionsRestServlet
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -27,8 +27,8 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -59,6 +59,7 @@ from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.storage.user_directory import UserDirectoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -28,8 +28,8 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -48,6 +48,7 @@ from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -27,9 +27,9 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.federation import send_queue
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -44,6 +44,7 @@ from synapse.storage.engines import create_engine
from synapse.types import ReadReceipt
from synapse.util.async_helpers import Linearizer
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -29,8 +29,8 @@ from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
@@ -41,6 +41,7 @@ from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -54,9 +54,9 @@ from synapse.federation.transport.server import TransportLayerServer
from synapse.http.additional_resource import AdditionalResource
from synapse.http.server import RootRedirect
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.module_api import ModuleApi
from synapse.python_dependencies import check_requirements
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
@@ -72,6 +72,7 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.module_loader import load_module
from synapse.util.rlimit import change_resource_limit

View File

@@ -27,8 +27,8 @@ from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
@@ -40,6 +40,7 @@ from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.storage.media_repository import MediaRepositoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -26,8 +26,8 @@ from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.events import SlavedEventStore
@@ -38,6 +38,7 @@ from synapse.server import HomeServer
from synapse.storage import DataStore
from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -31,8 +31,8 @@ from synapse.config.logger import setup_logging
from synapse.handlers.presence import PresenceHandler, get_interested_parties
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
@@ -57,6 +57,7 @@ from synapse.server import HomeServer
from synapse.storage.engines import create_engine
from synapse.storage.presence import UserPresenceState
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.manhole import manhole
from synapse.util.stringutils import random_string
from synapse.util.versionstring import get_version_string

View File

@@ -28,8 +28,8 @@ from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.server import JsonResource
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext, run_in_background
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.metrics import RegistryProxy
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
@@ -46,6 +46,7 @@ from synapse.storage.engines import create_engine
from synapse.storage.user_directory import UserDirectoryStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext, run_in_background
from synapse.util.manhole import manhole
from synapse.util.versionstring import get_version_string

View File

@@ -53,8 +53,8 @@ import logging
from twisted.internet import defer
from synapse.appservice import ApplicationServiceState
from synapse.logging.context import run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util.logcontext import run_in_background
logger = logging.getLogger(__name__)

View File

@@ -137,42 +137,12 @@ class Config(object):
return file_stream.read()
def invoke_all(self, name, *args, **kargs):
"""Invoke all instance methods with the given name and arguments in the
class's MRO.
Args:
name (str): Name of function to invoke
*args
**kwargs
Returns:
list: The list of the return values from each method called
"""
results = []
for cls in type(self).mro():
if name in cls.__dict__:
results.append(getattr(cls, name)(self, *args, **kargs))
return results
@classmethod
def invoke_all_static(cls, name, *args, **kargs):
"""Invoke all static methods with the given name and arguments in the
class's MRO.
Args:
name (str): Name of function to invoke
*args
**kwargs
Returns:
list: The list of the return values from each method called
"""
results = []
for c in cls.mro():
if name in c.__dict__:
results.append(getattr(c, name)(*args, **kargs))
return results
def generate_config(
self,
config_dir_path,
@@ -232,23 +202,6 @@ class Config(object):
Returns: Config object.
"""
config_parser = argparse.ArgumentParser(description=description)
cls.add_arguments_to_parser(config_parser)
obj, _ = cls.load_config_with_parser(config_parser, argv)
return obj
@classmethod
def add_arguments_to_parser(cls, config_parser):
"""Adds all the config flags to an ArgumentParser.
Doesn't support config-file-generation: used by the worker apps.
Used for workers where we want to add extra flags/subcommands.
Args:
config_parser (ArgumentParser): App description
"""
config_parser.add_argument(
"-c",
"--config-path",
@@ -266,34 +219,16 @@ class Config(object):
" Defaults to the directory containing the last config file",
)
cls.invoke_all_static("add_arguments", config_parser)
@classmethod
def load_config_with_parser(cls, parser, argv):
"""Parse the commandline and config files with the given parser
Doesn't support config-file-generation: used by the worker apps.
Used for workers where we want to add extra flags/subcommands.
Args:
parser (ArgumentParser)
argv (list[str])
Returns:
tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed
config object and the parsed argparse.Namespace object from
`parser.parse_args(..)`
"""
obj = cls()
config_args = parser.parse_args(argv)
obj.invoke_all("add_arguments", config_parser)
config_args = config_parser.parse_args(argv)
config_files = find_config_files(search_paths=config_args.config_path)
if not config_files:
parser.error("Must supply a config file.")
config_parser.error("Must supply a config file.")
if config_args.keys_directory:
config_dir_path = config_args.keys_directory
@@ -309,7 +244,7 @@ class Config(object):
obj.invoke_all("read_arguments", config_args)
return obj, config_args
return obj
@classmethod
def load_or_generate_config(cls, description, argv):
@@ -466,7 +401,7 @@ class Config(object):
formatter_class=argparse.RawDescriptionHelpFormatter,
)
obj.invoke_all_static("add_arguments", parser)
obj.invoke_all("add_arguments", parser)
args = parser.parse_args(remaining_args)
config_dict = read_config_files(config_files)

View File

@@ -69,8 +69,7 @@ class DatabaseConfig(Config):
if database_path is not None:
self.database_config["args"]["database"] = database_path
@staticmethod
def add_arguments(parser):
def add_arguments(self, parser):
db_group = parser.add_argument_group("database")
db_group.add_argument(
"-d",

View File

@@ -112,17 +112,13 @@ class EmailConfig(Config):
missing = []
for k in required:
if k not in email_config:
missing.append("email." + k)
if config.get("public_baseurl") is None:
missing.append("public_base_url")
missing.append(k)
if len(missing) > 0:
raise RuntimeError(
"Password resets emails are configured to be sent from "
"this homeserver due to a partial 'email' block. "
"However, the following required keys are missing: %s"
% (", ".join(missing),)
"email.password_reset_behaviour is set to 'local' "
"but required keys are missing: %s"
% (", ".join(["email." + k for k in missing]),)
)
# Templates for password reset emails
@@ -160,6 +156,13 @@ class EmailConfig(Config):
filepath, "email.password_reset_template_success_html"
)
if config.get("public_baseurl") is None:
raise RuntimeError(
"email.password_reset_behaviour is set to 'local' but no "
"public_baseurl is set. This is necessary to generate password "
"reset links"
)
if self.email_enable_notifs:
required = [
"smtp_host",

View File

@@ -40,7 +40,6 @@ from .spam_checker import SpamCheckerConfig
from .stats import StatsConfig
from .third_party_event_rules import ThirdPartyRulesConfig
from .tls import TlsConfig
from .tracer import TracerConfig
from .user_directory import UserDirectoryConfig
from .voip import VoipConfig
from .workers import WorkerConfig
@@ -76,6 +75,5 @@ class HomeServerConfig(
ServerNoticesConfig,
RoomDirectoryConfig,
ThirdPartyRulesConfig,
TracerConfig,
):
pass

View File

@@ -24,7 +24,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner
import synapse
from synapse.app import _base as appbase
from synapse.logging.context import LoggingContextFilter
from synapse.util.logcontext import LoggingContextFilter
from synapse.util.versionstring import get_version_string
from ._base import Config
@@ -40,7 +40,7 @@ formatters:
filters:
context:
(): synapse.logging.context.LoggingContextFilter
(): synapse.util.logcontext.LoggingContextFilter
request: ""
handlers:
@@ -103,8 +103,7 @@ class LoggingConfig(Config):
if args.log_file is not None:
self.log_file = args.log_file
@staticmethod
def add_arguments(parser):
def add_arguments(cls, parser):
logging_group = parser.add_argument_group("logging")
logging_group.add_argument(
"-v",

View File

@@ -23,7 +23,7 @@ class RateLimitConfig(object):
class FederationRateLimitConfig(object):
_items_and_default = {
"window_size": 1000,
"window_size": 10000,
"sleep_limit": 10,
"sleep_delay": 500,
"reject_limit": 50,
@@ -54,7 +54,7 @@ class RatelimitConfig(Config):
# Load the new-style federation config, if it exists. Otherwise, fall
# back to the old method.
if "rc_federation" in config:
if "federation_rc" in config:
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
else:
self.rc_federation = FederationRateLimitConfig(

View File

@@ -71,8 +71,9 @@ class RegistrationConfig(Config):
self.default_identity_server = config.get("default_identity_server")
self.allow_guest_access = config.get("allow_guest_access", False)
if config.get("invite_3pid_guest", False):
raise ConfigError("invite_3pid_guest is no longer supported")
self.invite_3pid_guest = self.allow_guest_access and config.get(
"invite_3pid_guest", False
)
self.auto_join_rooms = config.get("auto_join_rooms", [])
for room_alias in self.auto_join_rooms:
@@ -84,11 +85,6 @@ class RegistrationConfig(Config):
"disable_msisdn_registration", False
)
session_lifetime = config.get("session_lifetime")
if session_lifetime is not None:
session_lifetime = self.parse_duration(session_lifetime)
self.session_lifetime = session_lifetime
def generate_config_section(self, generate_secrets=False, **kwargs):
if generate_secrets:
registration_shared_secret = 'registration_shared_secret: "%s"' % (
@@ -146,17 +142,6 @@ class RegistrationConfig(Config):
# renew_at: 1w
# renew_email_subject: "Renew your %%(app)s account"
# Time that a user's session remains valid for, after they log in.
#
# Note that this is not currently compatible with guest logins.
#
# Note also that this is calculated at login time: changes are not applied
# retrospectively to users who have already logged in.
#
# By default, this is infinite.
#
#session_lifetime: 24h
# The user must provide all of the below types of 3PID when registering.
#
#registrations_require_3pid:
@@ -237,8 +222,7 @@ class RegistrationConfig(Config):
% locals()
)
@staticmethod
def add_arguments(parser):
def add_arguments(self, parser):
reg_group = parser.add_argument_group("registration")
reg_group.add_argument(
"--enable-registration",

View File

@@ -87,22 +87,13 @@ def parse_thumbnail_requirements(thumbnail_sizes):
class ContentRepositoryConfig(Config):
def read_config(self, config, **kwargs):
self.enable_media_repo = config.get("enable_media_repo", True)
self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M"))
self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M"))
self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M"))
if self.enable_media_repo:
self.media_store_path = self.ensure_directory(
config.get("media_store_path", "media_store")
)
self.uploads_path = self.ensure_directory(
config.get("uploads_path", "uploads")
)
else:
self.media_store_path = None
self.uploads_path = None
self.media_store_path = self.ensure_directory(
config.get("media_store_path", "media_store")
)
backup_media_store_path = config.get("backup_media_store_path")
@@ -159,6 +150,7 @@ class ContentRepositoryConfig(Config):
(provider_class, parsed_config, wrapper_config)
)
self.uploads_path = self.ensure_directory(config.get("uploads_path", "uploads"))
self.dynamic_thumbnails = config.get("dynamic_thumbnails", False)
self.thumbnail_requirements = parse_thumbnail_requirements(
config.get("thumbnail_sizes", DEFAULT_THUMBNAIL_SIZES)

View File

@@ -136,7 +136,7 @@ class ServerConfig(Config):
# Whether to enable experimental MSC1849 (aka relations) support
self.experimental_msc1849_support_enabled = config.get(
"experimental_msc1849_support_enabled", True
"experimental_msc1849_support_enabled", False
)
# Options to control access by tracking MAU
@@ -247,12 +247,6 @@ class ServerConfig(Config):
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
# Resource-constrained Homeserver Configuration
self.limit_large_room_joins = config.get("limit_large_remote_room_joins", False)
self.limit_large_room_complexity = config.get(
"limit_large_remote_room_complexity", 1.0
)
bind_port = config.get("bind_port")
if bind_port:
if config.get("no_tls", False):
@@ -623,17 +617,6 @@ class ServerConfig(Config):
# Used by phonehome stats to group together related servers.
#server_context: context
# Resource-constrained Homeserver Settings
#
# If limit_large_remote_room_joins is True, the room complexity will be
# checked before a user joins a new remote room. If it is above
# limit_large_remote_room_complexity, it will disallow joining or
# instantly leave.
#
# Uncomment the below lines to enable:
#limit_large_remote_room_joins: True
#limit_large_remote_room_complexity: 1.0
# Whether to require a user to be in the room to add an alias to it.
# Defaults to 'true'.
#
@@ -656,8 +639,7 @@ class ServerConfig(Config):
if args.print_pidfile is not None:
self.print_pidfile = args.print_pidfile
@staticmethod
def add_arguments(parser):
def add_arguments(self, parser):
server_group = parser.add_argument_group("server")
server_group.add_argument(
"-D",

View File

@@ -1,59 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.d
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config, ConfigError
class TracerConfig(Config):
def read_config(self, config, **kwargs):
opentracing_config = config.get("opentracing")
if opentracing_config is None:
opentracing_config = {}
self.opentracer_enabled = opentracing_config.get("enabled", False)
if not self.opentracer_enabled:
return
# The tracer is enabled so sanitize the config
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
if not isinstance(self.opentracer_whitelist, list):
raise ConfigError("Tracer homeserver_whitelist config is malformed")
def generate_config_section(cls, **kwargs):
return """\
## Opentracing ##
# These settings enable opentracing, which implements distributed tracing.
# This allows you to observe the causal chains of events across servers
# including requests, key lookups etc., across any server running
# synapse or any other other services which supports opentracing
# (specifically those implemented with Jaeger).
#
opentracing:
# tracing is disabled by default. Uncomment the following line to enable it.
#
#enabled: true
# The list of homeservers we wish to send and receive span contexts and span baggage.
# See docs/opentracing.rst
# This is a list of regexes which are matched against the server_name of the
# homeserver.
#
# By defult, it is empty, so no servers are matched.
#
#homeserver_whitelist:
# - ".*"
"""

View File

@@ -44,16 +44,15 @@ from synapse.api.errors import (
RequestSendFailed,
SynapseError,
)
from synapse.logging.context import (
from synapse.storage.keys import FetchKeyResult
from synapse.util import logcontext, unwrapFirstError
from synapse.util.async_helpers import yieldable_gather_results
from synapse.util.logcontext import (
LoggingContext,
PreserveLoggingContext,
make_deferred_yieldable,
preserve_fn,
run_in_background,
)
from synapse.storage.keys import FetchKeyResult
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import yieldable_gather_results
from synapse.util.metrics import Measure
from synapse.util.retryutils import NotRetryingDestination
@@ -141,7 +140,7 @@ class Keyring(object):
"""
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
requests = (req,)
return make_deferred_yieldable(self._verify_objects(requests)[0])
return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
def verify_json_objects_for_server(self, server_and_json):
"""Bulk verifies signatures of json objects, bulk fetching keys as
@@ -558,7 +557,7 @@ class BaseV2KeyFetcher(object):
signed_key_json_bytes = encode_canonical_json(signed_key_json)
yield make_deferred_yieldable(
yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
@@ -613,7 +612,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
defer.returnValue({})
results = yield make_deferred_yieldable(
results = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[run_in_background(get_key, server) for server in self.key_servers],
consumeErrors=True,

View File

@@ -104,17 +104,6 @@ class _EventInternalMetadata(object):
"""
return getattr(self, "proactively_send", True)
def is_redacted(self):
"""Whether the event has been redacted.
This is used for efficiently checking whether an event has been
marked as redacted without needing to make another database call.
Returns:
bool
"""
return getattr(self, "redacted", False)
def _event_dict_property(key):
# We want to be able to use hasattr with the event dict properties.

View File

@@ -19,7 +19,7 @@ from frozendict import frozendict
from twisted.internet import defer
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
class EventContext(object):

View File

@@ -52,15 +52,10 @@ def prune_event(event):
from . import event_type_from_format_version
pruned_event = event_type_from_format_version(event.format_version)(
return event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
# Mark the event as redacted
pruned_event.internal_metadata.redacted = True
return pruned_event
def prune_event_dict(event_dict):
"""Redacts the event_dict in the same way as `prune_event`, except it
@@ -365,12 +360,9 @@ class EventClientSerializer(object):
event_id = event.event_id
serialized_event = serialize_event(event, time_now, **kwargs)
# If MSC1849 is enabled then we need to look if there are any relations
# we need to bundle in with the event.
# Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations
):
# If MSC1849 is enabled then we need to look if thre are any relations
# we need to bundle in with the event
if self.experimental_msc1849_support_enabled and bundle_aggregations:
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
references = yield self.store.get_relations_for_event(
event_id, RelationTypes.REFERENCE, direction="f"
@@ -400,11 +392,7 @@ class EventClientSerializer(object):
serialized_event["content"].pop("m.relates_to", None)
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REPLACE] = {
"event_id": edit.event_id,
"origin_server_ts": edit.origin_server_ts,
"sender": edit.sender,
}
r[RelationTypes.REPLACE] = {"event_id": edit.event_id}
defer.returnValue(serialized_event)

View File

@@ -27,14 +27,8 @@ from synapse.crypto.event_signing import check_event_content_hash
from synapse.events import event_type_from_format_version
from synapse.events.utils import prune_event
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.context import (
LoggingContext,
PreserveLoggingContext,
make_deferred_yieldable,
preserve_fn,
)
from synapse.types import get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util import logcontext, unwrapFirstError
logger = logging.getLogger(__name__)
@@ -79,7 +73,7 @@ class FederationBase(object):
@defer.inlineCallbacks
def handle_check_result(pdu, deferred):
try:
res = yield make_deferred_yieldable(deferred)
res = yield logcontext.make_deferred_yieldable(deferred)
except SynapseError:
res = None
@@ -108,10 +102,10 @@ class FederationBase(object):
defer.returnValue(res)
handle = preserve_fn(handle_check_result)
handle = logcontext.preserve_fn(handle_check_result)
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
valid_pdus = yield make_deferred_yieldable(
valid_pdus = yield logcontext.make_deferred_yieldable(
defer.gatherResults(deferreds2, consumeErrors=True)
).addErrback(unwrapFirstError)
@@ -121,7 +115,7 @@ class FederationBase(object):
defer.returnValue([p for p in valid_pdus if p])
def _check_sigs_and_hash(self, room_version, pdu):
return make_deferred_yieldable(
return logcontext.make_deferred_yieldable(
self._check_sigs_and_hashes(room_version, [pdu])[0]
)
@@ -139,14 +133,14 @@ class FederationBase(object):
* returns a redacted version of the event (if the signature
matched but the hash did not)
* throws a SynapseError if the signature check failed.
The deferreds run their callbacks in the sentinel
The deferreds run their callbacks in the sentinel logcontext.
"""
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
ctx = LoggingContext.current_context()
ctx = logcontext.LoggingContext.current_context()
def callback(_, pdu):
with PreserveLoggingContext(ctx):
with logcontext.PreserveLoggingContext(ctx):
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
# redacted (which are somewhat expected) vs actual ball-tampering
@@ -184,7 +178,7 @@ class FederationBase(object):
def errback(failure, pdu):
failure.trap(SynapseError)
with PreserveLoggingContext(ctx):
with logcontext.PreserveLoggingContext(ctx):
logger.warn(
"Signature check failed for %s: %s",
pdu.event_id,

View File

@@ -39,10 +39,10 @@ from synapse.api.room_versions import (
)
from synapse.events import builder, room_version_to_event_format
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.utils import log_function
from synapse.util import unwrapFirstError
from synapse.util import logcontext, unwrapFirstError
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.logutils import log_function
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__)
@@ -207,7 +207,7 @@ class FederationClient(FederationBase):
]
# FIXME: We should handle signature failures more gracefully.
pdus[:] = yield make_deferred_yieldable(
pdus[:] = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
).addErrback(unwrapFirstError)
@@ -995,39 +995,3 @@ class FederationClient(FederationBase):
)
raise RuntimeError("Failed to send to any server.")
@defer.inlineCallbacks
def get_room_complexity(self, destination, room_id):
"""
Fetch the complexity of a remote room from another server.
Args:
destination (str): The remote server
room_id (str): The room ID to ask about.
Returns:
Deferred[dict] or Deferred[None]: Dict contains the complexity
metric versions, while None means we could not fetch the complexity.
"""
try:
complexity = yield self.transport_layer.get_room_complexity(
destination=destination, room_id=room_id
)
defer.returnValue(complexity)
except CodeMessageException as e:
# We didn't manage to get it -- probably a 404. We are okay if other
# servers don't give it to us.
logger.debug(
"Failed to fetch room complexity via %s for %s, got a %d",
destination,
room_id,
e.code,
)
except Exception:
logger.exception(
"Failed to fetch room complexity via %s for %s", destination, room_id
)
# If we don't manage to find it, return None. It's not an error if a
# server doesn't give it to us.
defer.returnValue(None)

View File

@@ -42,8 +42,6 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.endpoint import parse_server_name
from synapse.logging.context import nested_logging_context
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationFederationSendEduRestServlet,
ReplicationGetQueryRestServlet,
@@ -52,6 +50,8 @@ from synapse.types import get_domain_from_id
from synapse.util import glob_to_regex
from synapse.util.async_helpers import Linearizer, concurrently_execute
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.logcontext import nested_logging_context
from synapse.util.logutils import log_function
# when processing incoming transactions, we try to handle multiple rooms in
# parallel, up to this limit.

View File

@@ -21,7 +21,9 @@ These actions are mostly only used by the :py:mod:`.replication` module.
import logging
from synapse.logging.utils import log_function
from twisted.internet import defer
from synapse.util.logutils import log_function
logger = logging.getLogger(__name__)
@@ -61,3 +63,33 @@ class TransactionActions(object):
return self.store.set_received_txn_response(
transaction.transaction_id, origin, code, response
)
@defer.inlineCallbacks
@log_function
def prepare_to_send(self, transaction):
""" Persists the `Transaction` we are about to send and works out the
correct value for the `prev_ids` key.
Returns:
Deferred
"""
transaction.prev_ids = yield self.store.prep_send_transaction(
transaction.transaction_id,
transaction.destination,
transaction.origin_server_ts,
)
@log_function
def delivered(self, transaction, response_code, response_dict):
""" Marks the given `Transaction` as having been successfully
delivered to the remote homeserver, and what the response was.
Returns:
Deferred
"""
return self.store.delivered_txn(
transaction.transaction_id,
transaction.destination,
response_code,
response_dict,
)

View File

@@ -26,11 +26,6 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue
from synapse.federation.sender.transaction_manager import TransactionManager
from synapse.federation.units import Edu
from synapse.handlers.presence import get_interested_remotes
from synapse.logging.context import (
make_deferred_yieldable,
preserve_fn,
run_in_background,
)
from synapse.metrics import (
LaterGauge,
event_processing_loop_counter,
@@ -38,6 +33,7 @@ from synapse.metrics import (
events_processed_counter,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util import logcontext
from synapse.util.metrics import measure_func
logger = logging.getLogger(__name__)
@@ -214,10 +210,10 @@ class FederationSender(object):
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
yield make_deferred_yieldable(
yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(handle_room_events, evs)
logcontext.run_in_background(handle_room_events, evs)
for evs in itervalues(events_by_room)
],
consumeErrors=True,
@@ -364,7 +360,7 @@ class FederationSender(object):
for queue in queues:
queue.flush_read_receipts_for_room(room_id)
@preserve_fn # the caller should not yield on this
@logcontext.preserve_fn # the caller should not yield on this
@defer.inlineCallbacks
def send_presence(self, states):
"""Send the new presence states to the appropriate destinations.

View File

@@ -63,6 +63,8 @@ class TransactionManager(object):
len(edus),
)
logger.debug("TX [%s] Persisting transaction...", destination)
transaction = Transaction.create_new(
origin_server_ts=int(self.clock.time_msec()),
transaction_id=txn_id,
@@ -74,6 +76,9 @@ class TransactionManager(object):
self._next_txn_id += 1
yield self._transaction_actions.prepare_to_send(transaction)
logger.debug("TX [%s] Persisted transaction", destination)
logger.info(
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
destination,
@@ -113,6 +118,10 @@ class TransactionManager(object):
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
yield self._transaction_actions.delivered(transaction, code, response)
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
if code == 200:
for e_id, r in response.get("pdus", {}).items():
if "error" in r:

View File

@@ -21,12 +21,8 @@ from six.moves import urllib
from twisted.internet import defer
from synapse.api.constants import Membership
from synapse.api.urls import (
FEDERATION_UNSTABLE_PREFIX,
FEDERATION_V1_PREFIX,
FEDERATION_V2_PREFIX,
)
from synapse.logging.utils import log_function
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
from synapse.util.logutils import log_function
logger = logging.getLogger(__name__)
@@ -939,23 +935,6 @@ class TransportLayerClient(object):
destination=destination, path=path, data=content, ignore_backoff=True
)
def get_room_complexity(self, destination, room_id):
"""
Args:
destination (str): The remote server
room_id (str): The room ID to ask about.
"""
path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id)
return self.client.get_json(destination=destination, path=path)
def _create_path(federation_prefix, path, *args):
"""
Ensures that all args are url encoded.
"""
return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
def _create_v1_path(path, *args):
"""Creates a path against V1 federation API from the path template and
@@ -972,7 +951,9 @@ def _create_v1_path(path, *args):
Returns:
str
"""
return _create_path(FEDERATION_V1_PREFIX, path, *args)
return FEDERATION_V1_PREFIX + path % tuple(
urllib.parse.quote(arg, "") for arg in args
)
def _create_v2_path(path, *args):
@@ -990,4 +971,6 @@ def _create_v2_path(path, *args):
Returns:
str
"""
return _create_path(FEDERATION_V2_PREFIX, path, *args)
return FEDERATION_V2_PREFIX + path % tuple(
urllib.parse.quote(arg, "") for arg in args
)

File diff suppressed because it is too large Load Diff

View File

@@ -43,9 +43,9 @@ from signedjson.sign import sign_json
from twisted.internet import defer
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.logging.context import run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util.logcontext import run_in_background
logger = logging.getLogger(__name__)

View File

@@ -22,10 +22,9 @@ from email.mime.text import MIMEText
from twisted.internet import defer
from synapse.api.errors import StoreError
from synapse.logging.context import make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import UserID
from synapse.util import stringutils
from synapse.util.logcontext import make_deferred_yieldable
try:
from synapse.push.mailer import load_jinja2_templates
@@ -68,14 +67,7 @@ class AccountValidityHandler(object):
)
# Check the renewal emails to send and send them every 30min.
def send_emails():
# run as a background process to make sure that the database transactions
# have a logcontext to report to
return run_as_background_process(
"send_renewals", self.send_renewal_emails
)
self.clock.looping_call(send_emails, 30 * 60 * 1000)
self.clock.looping_call(self.send_renewal_emails, 30 * 60 * 1000)
@defer.inlineCallbacks
def send_renewal_emails(self):

View File

@@ -17,10 +17,6 @@ import logging
from twisted.internet import defer
from synapse.api.constants import Membership
from synapse.types import RoomStreamToken
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
logger = logging.getLogger(__name__)
@@ -93,182 +89,3 @@ class AdminHandler(BaseHandler):
ret = yield self.store.search_users(term)
defer.returnValue(ret)
@defer.inlineCallbacks
def export_user_data(self, user_id, writer):
"""Write all data we have on the user to the given writer.
Args:
user_id (str)
writer (ExfiltrationWriter)
Returns:
defer.Deferred: Resolves when all data for a user has been written.
The returned value is that returned by `writer.finished()`.
"""
# Get all rooms the user is in or has been in
rooms = yield self.store.get_rooms_for_user_where_membership_is(
user_id,
membership_list=(
Membership.JOIN,
Membership.LEAVE,
Membership.BAN,
Membership.INVITE,
),
)
# We only try and fetch events for rooms the user has been in. If
# they've been e.g. invited to a room without joining then we handle
# those seperately.
rooms_user_has_been_in = yield self.store.get_rooms_user_has_been_in(user_id)
for index, room in enumerate(rooms):
room_id = room.room_id
logger.info(
"[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms)
)
forgotten = yield self.store.did_forget(user_id, room_id)
if forgotten:
logger.info("[%s] User forgot room %d, ignoring", user_id, room_id)
continue
if room_id not in rooms_user_has_been_in:
# If we haven't been in the rooms then the filtering code below
# won't return anything, so we need to handle these cases
# explicitly.
if room.membership == Membership.INVITE:
event_id = room.event_id
invite = yield self.store.get_event(event_id, allow_none=True)
if invite:
invited_state = invite.unsigned["invite_room_state"]
writer.write_invite(room_id, invite, invited_state)
continue
# We only want to bother fetching events up to the last time they
# were joined. We estimate that point by looking at the
# stream_ordering of the last membership if it wasn't a join.
if room.membership == Membership.JOIN:
stream_ordering = yield self.store.get_room_max_stream_ordering()
else:
stream_ordering = room.stream_ordering
from_key = str(RoomStreamToken(0, 0))
to_key = str(RoomStreamToken(None, stream_ordering))
written_events = set() # Events that we've processed in this room
# We need to track gaps in the events stream so that we can then
# write out the state at those events. We do this by keeping track
# of events whose prev events we haven't seen.
# Map from event ID to prev events that haven't been processed,
# dict[str, set[str]].
event_to_unseen_prevs = {}
# The reverse mapping to above, i.e. map from unseen event to events
# that have the unseen event in their prev_events, i.e. the unseen
# events "children". dict[str, set[str]]
unseen_to_child_events = {}
# We fetch events in the room the user could see by fetching *all*
# events that we have and then filtering, this isn't the most
# efficient method perhaps but it does guarantee we get everything.
while True:
events, _ = yield self.store.paginate_room_events(
room_id, from_key, to_key, limit=100, direction="f"
)
if not events:
break
from_key = events[-1].internal_metadata.after
events = yield filter_events_for_client(self.store, user_id, events)
writer.write_events(room_id, events)
# Update the extremity tracking dicts
for event in events:
# Check if we have any prev events that haven't been
# processed yet, and add those to the appropriate dicts.
unseen_events = set(event.prev_event_ids()) - written_events
if unseen_events:
event_to_unseen_prevs[event.event_id] = unseen_events
for unseen in unseen_events:
unseen_to_child_events.setdefault(unseen, set()).add(
event.event_id
)
# Now check if this event is an unseen prev event, if so
# then we remove this event from the appropriate dicts.
for child_id in unseen_to_child_events.pop(event.event_id, []):
event_to_unseen_prevs[child_id].discard(event.event_id)
written_events.add(event.event_id)
logger.info(
"Written %d events in room %s", len(written_events), room_id
)
# Extremities are the events who have at least one unseen prev event.
extremities = (
event_id
for event_id, unseen_prevs in event_to_unseen_prevs.items()
if unseen_prevs
)
for event_id in extremities:
if not event_to_unseen_prevs[event_id]:
continue
state = yield self.store.get_state_for_event(event_id)
writer.write_state(room_id, event_id, state)
defer.returnValue(writer.finished())
class ExfiltrationWriter(object):
"""Interface used to specify how to write exported data.
"""
def write_events(self, room_id, events):
"""Write a batch of events for a room.
Args:
room_id (str)
events (list[FrozenEvent])
"""
pass
def write_state(self, room_id, event_id, state):
"""Write the state at the given event in the room.
This only gets called for backward extremities rather than for each
event.
Args:
room_id (str)
event_id (str)
state (dict[tuple[str, str], FrozenEvent])
"""
pass
def write_invite(self, room_id, event, state):
"""Write an invite for the room, with associated invite state.
Args:
room_id (str)
event (FrozenEvent)
state (dict[tuple[str, str], dict]): A subset of the state at the
invite, with a subset of the event keys (type, state_key
content and sender)
"""
def finished(self):
"""Called when all data has succesfully been exported and written.
This functions return value is passed to the caller of
`export_user_data`.
"""
pass

View File

@@ -23,13 +23,13 @@ from twisted.internet import defer
import synapse
from synapse.api.constants import EventTypes
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.metrics import (
event_processing_loop_counter,
event_processing_loop_room_count,
)
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.util import log_failure
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)

View File

@@ -15,7 +15,6 @@
# limitations under the License.
import logging
import time
import unicodedata
import attr
@@ -35,12 +34,11 @@ from synapse.api.errors import (
LoginError,
StoreError,
SynapseError,
UserDeactivatedError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import defer_to_thread
from synapse.module_api import ModuleApi
from synapse.types import UserID
from synapse.util import logcontext
from synapse.util.caches.expiringcache import ExpiringCache
from ._base import BaseHandler
@@ -560,7 +558,7 @@ class AuthHandler(BaseHandler):
return self.sessions[session_id]
@defer.inlineCallbacks
def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms):
def get_access_token_for_user_id(self, user_id, device_id=None):
"""
Creates a new access token for the user with the given user ID.
@@ -574,27 +572,15 @@ class AuthHandler(BaseHandler):
device_id (str|None): the device ID to associate with the tokens.
None to leave the tokens unassociated with a device (deprecated:
we should always have a device ID)
valid_until_ms (int|None): when the token is valid until. None for
no expiry.
Returns:
The access token for the user's session.
Raises:
StoreError if there was a problem storing the token.
"""
fmt_expiry = ""
if valid_until_ms is not None:
fmt_expiry = time.strftime(
" until %Y-%m-%d %H:%M:%S", time.localtime(valid_until_ms / 1000.0)
)
logger.info("Logging in user %s on device %s%s", user_id, device_id, fmt_expiry)
logger.info("Logging in user %s on device %s", user_id, device_id)
access_token = yield self.issue_access_token(user_id, device_id)
yield self.auth.check_auth_blocking(user_id)
access_token = self.macaroon_gen.generate_access_token(user_id)
yield self.store.add_access_token_to_user(
user_id, access_token, device_id, valid_until_ms
)
# the device *should* have been registered before we got here; however,
# it's possible we raced against a DELETE operation. The thing we
# really don't want is active access_tokens without a record of the
@@ -624,7 +610,6 @@ class AuthHandler(BaseHandler):
Raises:
LimitExceededError if the ratelimiter's login requests count for this
user is too high too proceed.
UserDeactivatedError if a user is found but is deactivated.
"""
self.ratelimit_login_per_account(user_id)
res = yield self._find_user_id_and_pwd_hash(user_id)
@@ -840,19 +825,18 @@ class AuthHandler(BaseHandler):
if not lookupres:
defer.returnValue(None)
(user_id, password_hash) = lookupres
# If the password hash is None, the account has likely been deactivated
if not password_hash:
deactivated = yield self.store.get_user_deactivated_status(user_id)
if deactivated:
raise UserDeactivatedError("This account has been deactivated")
result = yield self.validate_hash(password, password_hash)
if not result:
logger.warn("Failed password login for user %s", user_id)
defer.returnValue(None)
defer.returnValue(user_id)
@defer.inlineCallbacks
def issue_access_token(self, user_id, device_id=None):
access_token = self.macaroon_gen.generate_access_token(user_id)
yield self.store.add_access_token_to_user(user_id, access_token, device_id)
defer.returnValue(access_token)
@defer.inlineCallbacks
def validate_short_term_login_token_and_get_user_id(self, login_token):
auth_api = self.hs.get_auth()
@@ -1003,7 +987,7 @@ class AuthHandler(BaseHandler):
bcrypt.gensalt(self.bcrypt_rounds),
).decode("ascii")
return defer_to_thread(self.hs.get_reactor(), _do_hash)
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash)
def validate_hash(self, password, stored_hash):
"""Validates that self.hash(password) == stored_hash.
@@ -1029,7 +1013,7 @@ class AuthHandler(BaseHandler):
if not isinstance(stored_hash, bytes):
stored_hash = stored_hash.encode("ascii")
return defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
else:
return defer.succeed(False)

View File

@@ -22,9 +22,9 @@ from canonicaljson import encode_canonical_json, json
from twisted.internet import defer
from synapse.api.errors import CodeMessageException, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError
from synapse.types import UserID, get_domain_from_id
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__)
@@ -350,6 +350,9 @@ def _exception_to_failure(e):
if isinstance(e, NotRetryingDestination):
return {"status": 503, "message": "Not ready for retry"}
if isinstance(e, FederationDeniedError):
return {"status": 403, "message": "Federation Denied"}
# include ConnectionRefused and other errors
#
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't

View File

@@ -21,8 +21,8 @@ from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, SynapseError
from synapse.events import EventBase
from synapse.logging.utils import log_function
from synapse.types import UserID
from synapse.util.logutils import log_function
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler

View File

@@ -45,13 +45,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
from synapse.crypto.event_signing import compute_event_signature
from synapse.event_auth import auth_types_for_event
from synapse.events.validator import EventValidator
from synapse.logging.context import (
make_deferred_yieldable,
nested_logging_context,
preserve_fn,
run_in_background,
)
from synapse.logging.utils import log_function
from synapse.replication.http.federation import (
ReplicationCleanRoomRestServlet,
ReplicationFederationSendEventsRestServlet,
@@ -59,9 +52,10 @@ from synapse.replication.http.federation import (
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
from synapse.state import StateResolutionStore, resolve_events_with_store
from synapse.types import UserID, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util import logcontext, unwrapFirstError
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room
from synapse.util.logutils import log_function
from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server
@@ -344,7 +338,7 @@ class FederationHandler(BaseHandler):
room_version = yield self.store.get_room_version(room_id)
with nested_logging_context(p):
with logcontext.nested_logging_context(p):
# note that if any of the missing prevs share missing state or
# auth events, the requests to fetch those events are deduped
# by the get_pdu_cache in federation_client.
@@ -538,7 +532,7 @@ class FederationHandler(BaseHandler):
event_id,
ev.event_id,
)
with nested_logging_context(ev.event_id):
with logcontext.nested_logging_context(ev.event_id):
try:
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
except FederationError as e:
@@ -731,10 +725,10 @@ class FederationHandler(BaseHandler):
missing_auth - failed_to_fetch,
)
results = yield make_deferred_yieldable(
results = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
logcontext.run_in_background(
self.federation_client.get_pdu,
[dest],
event_id,
@@ -1000,8 +994,10 @@ class FederationHandler(BaseHandler):
event_ids = list(extremities.keys())
logger.debug("calling resolve_state_groups in _maybe_backfill")
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
states = yield make_deferred_yieldable(
resolve = logcontext.preserve_fn(
self.state_handler.resolve_state_groups_for_events
)
states = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
)
@@ -1175,7 +1171,7 @@ class FederationHandler(BaseHandler):
# lots of requests for missing prev_events which we do actually
# have. Hence we fire off the deferred, but don't wait for it.
run_in_background(self._handle_queued_pdus, room_queue)
logcontext.run_in_background(self._handle_queued_pdus, room_queue)
defer.returnValue(True)
@@ -1195,7 +1191,7 @@ class FederationHandler(BaseHandler):
p.event_id,
p.room_id,
)
with nested_logging_context(p.event_id):
with logcontext.nested_logging_context(p.event_id):
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
except Exception as e:
logger.warn(
@@ -1614,7 +1610,7 @@ class FederationHandler(BaseHandler):
success = True
finally:
if not success:
run_in_background(
logcontext.run_in_background(
self.store.remove_push_actions_from_staging, event.event_id
)
@@ -1633,7 +1629,7 @@ class FederationHandler(BaseHandler):
@defer.inlineCallbacks
def prep(ev_info):
event = ev_info["event"]
with nested_logging_context(suffix=event.event_id):
with logcontext.nested_logging_context(suffix=event.event_id):
res = yield self._prep_event(
origin,
event,
@@ -1643,9 +1639,12 @@ class FederationHandler(BaseHandler):
)
defer.returnValue(res)
contexts = yield make_deferred_yieldable(
contexts = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[run_in_background(prep, ev_info) for ev_info in event_infos],
[
logcontext.run_in_background(prep, ev_info)
for ev_info in event_infos
],
consumeErrors=True,
)
)
@@ -2107,10 +2106,10 @@ class FederationHandler(BaseHandler):
room_version = yield self.store.get_room_version(event.room_id)
different_events = yield make_deferred_yieldable(
different_events = yield logcontext.make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(
logcontext.run_in_background(
self.store.get_event, d, allow_none=True, allow_rejected=False
)
for d in different_auth
@@ -2765,28 +2764,3 @@ class FederationHandler(BaseHandler):
)
else:
return user_joined_room(self.distributor, user, room_id)
@defer.inlineCallbacks
def get_room_complexity(self, remote_room_hosts, room_id):
"""
Fetch the complexity of a remote room over federation.
Args:
remote_room_hosts (list[str]): The remote servers to ask.
room_id (str): The room ID to ask about.
Returns:
Deferred[dict] or Deferred[None]: Dict contains the complexity
metric versions, while None means we could not fetch the complexity.
"""
for host in remote_room_hosts:
res = yield self.federation_client.get_room_complexity(host, room_id)
# We got a result, return it.
if res:
defer.returnValue(res)
# We fell off the bottom, couldn't get the complexity from anyone. Oh
# well.
defer.returnValue(None)

View File

@@ -118,7 +118,7 @@ class IdentityHandler(BaseHandler):
raise SynapseError(400, "No client_secret in creds")
try:
data = yield self.http_client.post_json_get_json(
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"),
{"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid},
)

View File

@@ -21,12 +21,12 @@ from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.events.validator import EventValidator
from synapse.handlers.presence import format_user_presence_state
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.streams.config import PaginationConfig
from synapse.types import StreamToken, UserID
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.snapshot_cache import SnapshotCache
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler

View File

@@ -23,7 +23,6 @@ from canonicaljson import encode_canonical_json, json
from twisted.internet import defer
from twisted.internet.defer import succeed
from synapse import event_auth
from synapse.api.constants import EventTypes, Membership, RelationTypes
from synapse.api.errors import (
AuthError,
@@ -35,13 +34,13 @@ from synapse.api.errors import (
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import ConsentURIBuilder
from synapse.events.validator import EventValidator
from synapse.logging.context import run_in_background
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.storage.state import StateFilter
from synapse.types import RoomAlias, UserID, create_requester
from synapse.util.async_helpers import Linearizer
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import run_in_background
from synapse.util.metrics import measure_func
from synapse.visibility import filter_events_for_client
@@ -785,20 +784,6 @@ class EventCreationHandler(object):
event.signatures.update(returned_invite.signatures)
if event.type == EventTypes.Redaction:
original_event = yield self.store.get_event(
event.redacts,
check_redacted=False,
get_prev_content=False,
allow_rejected=False,
allow_none=True,
check_room_id=event.room_id,
)
# we can make some additional checks now if we have the original event.
if original_event:
if original_event.type == EventTypes.Create:
raise AuthError(403, "Redacting create events is not permitted")
prev_state_ids = yield context.get_prev_state_ids(self.store)
auth_events_ids = yield self.auth.compute_auth_events(
event, prev_state_ids, for_verification=True
@@ -806,18 +791,18 @@ class EventCreationHandler(object):
auth_events = yield self.store.get_events(auth_events_ids)
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
room_version = yield self.store.get_room_version(event.room_id)
if event_auth.check_redaction(room_version, event, auth_events=auth_events):
# this user doesn't have 'redact' rights, so we need to do some more
# checks on the original event. Let's start by checking the original
# event exists.
if not original_event:
raise NotFoundError("Could not find event %s" % (event.redacts,))
if self.auth.check_redaction(room_version, event, auth_events=auth_events):
original_event = yield self.store.get_event(
event.redacts,
check_redacted=False,
get_prev_content=False,
allow_rejected=False,
allow_none=False,
)
if event.user_id != original_event.user_id:
raise AuthError(403, "You don't have permission to redact events")
# all the checks are done.
# We've already checked.
event.internal_metadata.recheck_redaction = False
if event.type == EventTypes.Create:

View File

@@ -20,10 +20,10 @@ from twisted.python.failure import Failure
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import SynapseError
from synapse.logging.context import run_in_background
from synapse.storage.state import StateFilter
from synapse.types import RoomStreamToken
from synapse.util.async_helpers import ReadWriteLock
from synapse.util.logcontext import run_in_background
from synapse.util.stringutils import random_string
from synapse.visibility import filter_events_for_client

View File

@@ -34,14 +34,14 @@ from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.errors import SynapseError
from synapse.logging.context import run_in_background
from synapse.logging.utils import log_function
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.presence import UserPresenceState
from synapse.types import UserID, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.descriptors import cachedInlineCallbacks
from synapse.util.logcontext import run_in_background
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer

View File

@@ -303,10 +303,6 @@ class BaseProfileHandler(BaseHandler):
if not self.hs.config.require_auth_for_profile_requests or not requester:
return
# Always allow the user to query their own profile.
if target_user.to_string() == requester.to_string():
return
try:
requester_rooms = yield self.store.get_rooms_for_user(requester.to_string())
target_user_rooms = yield self.store.get_rooms_for_user(

View File

@@ -84,8 +84,6 @@ class RegistrationHandler(BaseHandler):
self.device_handler = hs.get_device_handler()
self.pusher_pool = hs.get_pusherpool()
self.session_lifetime = hs.config.session_lifetime
@defer.inlineCallbacks
def check_username(self, localpart, guest_access_token=None, assigned_user_id=None):
if types.contains_invalid_mxid_characters(localpart):
@@ -140,10 +138,11 @@ class RegistrationHandler(BaseHandler):
)
@defer.inlineCallbacks
def register_user(
def register(
self,
localpart=None,
password=None,
generate_token=True,
guest_access_token=None,
make_guest=False,
admin=False,
@@ -161,6 +160,11 @@ class RegistrationHandler(BaseHandler):
password (unicode) : The password to assign to this user so they can
login again. This can be None which means they cannot login again
via a password (e.g. the user is an application service user).
generate_token (bool): Whether a new access token should be
generated. Having this be True should be considered deprecated,
since it offers no means of associating a device_id with the
access_token. Instead you should call auth_handler.issue_access_token
after registration.
user_type (str|None): type of user. One of the values from
api.constants.UserTypes, or None for a normal user.
default_display_name (unicode|None): if set, the new user's displayname
@@ -168,7 +172,7 @@ class RegistrationHandler(BaseHandler):
address (str|None): the IP address used to perform the registration.
bind_emails (List[str]): list of emails to bind to this account.
Returns:
Deferred[str]: user_id
A tuple of (user_id, access_token).
Raises:
RegistrationError if there was a problem registering.
"""
@@ -202,8 +206,12 @@ class RegistrationHandler(BaseHandler):
elif default_display_name is None:
default_display_name = localpart
token = None
if generate_token:
token = self.macaroon_gen.generate_access_token(user_id)
yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
was_guest=was_guest,
make_guest=make_guest,
@@ -222,17 +230,21 @@ class RegistrationHandler(BaseHandler):
else:
# autogen a sequential user ID
attempts = 0
token = None
user = None
while not user:
localpart = yield self._generate_user_id(attempts > 0)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
yield self.check_user_id_not_appservice_exclusive(user_id)
if generate_token:
token = self.macaroon_gen.generate_access_token(user_id)
if default_display_name is None:
default_display_name = localpart
try:
yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
make_guest=make_guest,
create_profile_with_displayname=default_display_name,
@@ -242,15 +254,10 @@ class RegistrationHandler(BaseHandler):
# if user id is taken, just generate another
user = None
user_id = None
token = None
attempts += 1
if not self.hs.config.user_consent_at_registration:
yield self._auto_join_rooms(user_id)
else:
logger.info(
"Skipping auto-join for %s because consent is required at registration",
user_id,
)
# Bind any specified emails to this account
current_time = self.hs.get_clock().time_msec()
@@ -265,7 +272,7 @@ class RegistrationHandler(BaseHandler):
# Bind email to new account
yield self._register_email_threepid(user_id, threepid_dict, None, False)
defer.returnValue(user_id)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def _auto_join_rooms(self, user_id):
@@ -291,7 +298,6 @@ class RegistrationHandler(BaseHandler):
count = yield self.store.count_all_users()
should_auto_create_rooms = count == 1
for r in self.hs.config.auto_join_rooms:
logger.info("Auto-joining %s to %s", user_id, r)
try:
if should_auto_create_rooms:
room_alias = RoomAlias.from_string(r)
@@ -499,6 +505,87 @@ class RegistrationHandler(BaseHandler):
)
defer.returnValue(data)
@defer.inlineCallbacks
def get_or_create_user(self, requester, localpart, displayname, password_hash=None):
"""Creates a new user if the user does not exist,
else revokes all previous access tokens and generates a new one.
Args:
localpart : The local part of the user ID to register. If None,
one will be randomly generated.
Returns:
A tuple of (user_id, access_token).
Raises:
RegistrationError if there was a problem registering.
NB this is only used in tests. TODO: move it to the test package!
"""
if localpart is None:
raise SynapseError(400, "Request must include user id")
yield self.auth.check_auth_blocking()
need_register = True
try:
yield self.check_username(localpart)
except SynapseError as e:
if e.errcode == Codes.USER_IN_USE:
need_register = False
else:
raise
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
token = self.macaroon_gen.generate_access_token(user_id)
if need_register:
yield self.register_with_store(
user_id=user_id,
token=token,
password_hash=password_hash,
create_profile_with_displayname=user.localpart,
)
else:
yield self._auth_handler.delete_access_tokens_for_user(user_id)
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
if displayname is not None:
logger.info("setting user display name: %s -> %s", user_id, displayname)
yield self.profile_handler.set_displayname(
user, requester, displayname, by_admin=True
)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
"""Get a guest access token for a 3PID, creating a guest account if
one doesn't already exist.
Args:
medium (str)
address (str)
inviter_user_id (str): The user ID who is trying to invite the
3PID
Returns:
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
3PID guest account.
"""
access_token = yield self.store.get_3pid_guest_access_token(medium, address)
if access_token:
user_info = yield self.auth.get_user_by_access_token(access_token)
defer.returnValue((user_info["user"].to_string(), access_token))
user_id, access_token = yield self.register(
generate_token=True, make_guest=True
)
access_token = yield self.store.save_or_get_3pid_guest_access_token(
medium, address, access_token, inviter_user_id
)
defer.returnValue((user_id, access_token))
@defer.inlineCallbacks
def _join_user_to_room(self, requester, room_identifier):
room_id = None
@@ -528,6 +615,7 @@ class RegistrationHandler(BaseHandler):
def register_with_store(
self,
user_id,
token=None,
password_hash=None,
was_guest=False,
make_guest=False,
@@ -541,6 +629,9 @@ class RegistrationHandler(BaseHandler):
Args:
user_id (str): The desired user ID to register.
token (str): The desired access token to use for this user. If this
is not None, the given access token is associated with the user
id.
password_hash (str|None): Optional. The password hash for this user.
was_guest (bool): Optional. Whether this is a guest account being
upgraded to a non-guest account.
@@ -576,6 +667,7 @@ class RegistrationHandler(BaseHandler):
if self.hs.config.worker_app:
return self._register_client(
user_id=user_id,
token=token,
password_hash=password_hash,
was_guest=was_guest,
make_guest=make_guest,
@@ -586,8 +678,9 @@ class RegistrationHandler(BaseHandler):
address=address,
)
else:
return self.store.register_user(
return self.store.register(
user_id=user_id,
token=token,
password_hash=password_hash,
was_guest=was_guest,
make_guest=make_guest,
@@ -601,8 +694,6 @@ class RegistrationHandler(BaseHandler):
def register_device(self, user_id, device_id, initial_display_name, is_guest=False):
"""Register a device for a user and generate an access token.
The access token will be limited by the homeserver's session_lifetime config.
Args:
user_id (str): full canonical @user:id
device_id (str|None): The device ID to check, or None to generate
@@ -623,29 +714,20 @@ class RegistrationHandler(BaseHandler):
is_guest=is_guest,
)
defer.returnValue((r["device_id"], r["access_token"]))
valid_until_ms = None
if self.session_lifetime is not None:
if is_guest:
raise Exception(
"session_lifetime is not currently implemented for guest access"
)
valid_until_ms = self.clock.time_msec() + self.session_lifetime
device_id = yield self.device_handler.check_device_registered(
user_id, device_id, initial_display_name
)
if is_guest:
assert valid_until_ms is None
access_token = self.macaroon_gen.generate_access_token(
user_id, ["guest = true"]
)
else:
access_token = yield self._auth_handler.get_access_token_for_user_id(
user_id, device_id=device_id, valid_until_ms=valid_until_ms
device_id = yield self.device_handler.check_device_registered(
user_id, device_id, initial_display_name
)
if is_guest:
access_token = self.macaroon_gen.generate_access_token(
user_id, ["guest = true"]
)
else:
access_token = yield self._auth_handler.get_access_token_for_user_id(
user_id, device_id=device_id
)
defer.returnValue((device_id, access_token))
defer.returnValue((device_id, access_token))
@defer.inlineCallbacks
def post_registration_actions(

View File

@@ -26,9 +26,10 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse import types
import synapse.server
import synapse.types
from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
from synapse.api.errors import AuthError, Codes, SynapseError
from synapse.types import RoomID, UserID
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_joined_room, user_left_room
@@ -38,11 +39,6 @@ from ._base import BaseHandler
logger = logging.getLogger(__name__)
id_server_scheme = "https://"
ROOM_COMPLEXITY_TOO_GREAT = (
"Your homeserver is unable to join rooms this large or complex. "
"Please speak to your server administrator, or upgrade your instance "
"to join this room."
)
class RoomMemberHandler(object):
@@ -122,6 +118,24 @@ class RoomMemberHandler(object):
"""
raise NotImplementedError()
@abc.abstractmethod
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
"""Get a guest access token for a 3PID, creating a guest account if
one doesn't already exist.
Args:
requester (Requester)
medium (str)
address (str)
inviter_user_id (str): The user ID who is trying to invite the
3PID
Returns:
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
3PID guest account.
"""
raise NotImplementedError()
@abc.abstractmethod
def _user_joined_room(self, target, room_id):
"""Notifies distributor on master process that the user has joined the
@@ -547,7 +561,7 @@ class RoomMemberHandler(object):
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
else:
requester = types.create_requester(target_user)
requester = synapse.types.create_requester(target_user)
prev_event = yield self.event_creation_handler.deduplicate_state_event(
event, context
@@ -876,23 +890,24 @@ class RoomMemberHandler(object):
"sender_avatar_url": inviter_avatar_url,
}
try:
data = yield self.simple_http_client.post_json_get_json(
is_url, invite_config
)
except HttpResponseException as e:
# Some identity servers may only support application/x-www-form-urlencoded
# types. This is especially true with old instances of Sydent, see
# https://github.com/matrix-org/sydent/pull/170
logger.info(
"Failed to POST %s with JSON, falling back to urlencoded form: %s",
is_url,
e,
)
data = yield self.simple_http_client.post_urlencoded_get_json(
is_url, invite_config
if self.config.invite_3pid_guest:
guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest(
requester=requester,
medium=medium,
address=address,
inviter_user_id=inviter_user_id,
)
invite_config.update(
{
"guest_access_token": guest_access_token,
"guest_user_id": guest_user_id,
}
)
data = yield self.simple_http_client.post_urlencoded_get_json(
is_url, invite_config
)
# TODO: Check for success
token = data["token"]
public_keys = data.get("public_keys", [])
@@ -949,47 +964,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
self.distributor.declare("user_joined_room")
self.distributor.declare("user_left_room")
@defer.inlineCallbacks
def _is_remote_room_too_complex(self, room_id, remote_room_hosts):
"""
Check if complexity of a remote room is too great.
Args:
room_id (str)
remote_room_hosts (list[str])
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
max_complexity = self.hs.config.limit_large_room_complexity
complexity = yield self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
if complexity:
if complexity["v1"] > max_complexity:
return True
return False
return None
@defer.inlineCallbacks
def _is_local_room_too_complex(self, room_id):
"""
Check if the complexity of a local room is too great.
Args:
room_id (str)
Returns: bool
"""
max_complexity = self.hs.config.limit_large_room_complexity
complexity = yield self.store.get_room_complexity(room_id)
if complexity["v1"] > max_complexity:
return True
return False
@defer.inlineCallbacks
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
"""Implements RoomMemberHandler._remote_join
@@ -997,6 +971,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
# and if it is the only entry we'd like to return a 404 rather than a
# 500.
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
@@ -1004,18 +979,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
if self.hs.config.limit_large_room_joins:
# Fetch the room complexity
too_complex = yield self._is_remote_room_too_complex(
room_id, remote_room_hosts
)
if too_complex is True:
raise SynapseError(
code=400,
msg=ROOM_COMPLEXITY_TOO_GREAT,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
# We don't do an auth check if we are doing an invite
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
@@ -1025,31 +988,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
)
yield self._user_joined_room(user, room_id)
# Check the room we just joined wasn't too large, if we didn't fetch the
# complexity of it before.
if self.hs.config.limit_large_room_joins:
if too_complex is False:
# We checked, and we're under the limit.
return
# Check again, but with the local state events
too_complex = yield self._is_local_room_too_complex(room_id)
if too_complex is False:
# We're under the limit.
return
# The room is too large. Leave.
requester = types.create_requester(user, None, False, None)
yield self.update_membership(
requester=requester, target=user, room_id=room_id, action="leave"
)
raise SynapseError(
code=400,
msg=ROOM_COMPLEXITY_TOO_GREAT,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
@defer.inlineCallbacks
def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target):
"""Implements RoomMemberHandler._remote_reject_invite
@@ -1072,6 +1010,12 @@ class RoomMemberMasterHandler(RoomMemberHandler):
yield self.store.locally_reject_invite(target.to_string(), room_id)
defer.returnValue({})
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
"""Implements RoomMemberHandler.get_or_register_3pid_guest
"""
rg = self.registration_handler
return rg.get_or_register_3pid_guest(medium, address, inviter_user_id)
def _user_joined_room(self, target, room_id):
"""Implements RoomMemberHandler._user_joined_room
"""

View File

@@ -20,6 +20,7 @@ from twisted.internet import defer
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.replication.http.membership import (
ReplicationRegister3PIDGuestRestServlet as Repl3PID,
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
@@ -32,6 +33,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
def __init__(self, hs):
super(RoomMemberWorkerHandler, self).__init__(hs)
self._get_register_3pid_client = Repl3PID.make_client(hs)
self._remote_join_client = ReplRemoteJoin.make_client(hs)
self._remote_reject_client = ReplRejectInvite.make_client(hs)
self._notify_change_client = ReplJoinedLeft.make_client(hs)
@@ -78,3 +80,13 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
return self._notify_change_client(
user_id=target.to_string(), room_id=room_id, change="left"
)
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
"""Implements RoomMemberHandler.get_or_register_3pid_guest
"""
return self._get_register_3pid_client(
requester=requester,
medium=medium,
address=address,
inviter_user_id=inviter_user_id,
)

View File

@@ -25,7 +25,6 @@ from prometheus_client import Counter
from twisted.internet import defer
from synapse.api.constants import EventTypes, Membership
from synapse.logging.context import LoggingContext
from synapse.push.clientformat import format_push_rules_for_user
from synapse.storage.roommember import MemberSummary
from synapse.storage.state import StateFilter
@@ -34,6 +33,7 @@ from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.logcontext import LoggingContext
from synapse.util.metrics import Measure, measure_func
from synapse.visibility import filter_events_for_client

View File

@@ -19,9 +19,9 @@ from collections import namedtuple
from twisted.internet import defer
from synapse.api.errors import AuthError, SynapseError
from synapse.logging.context import run_in_background
from synapse.types import UserID, get_domain_from_id
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.logcontext import run_in_background
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer

View File

@@ -45,9 +45,9 @@ from synapse.http import (
cancelled_to_request_timed_out_error,
redact_uri,
)
from synapse.logging.context import make_deferred_yieldable
from synapse.util.async_helpers import timeout_deferred
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.logcontext import make_deferred_yieldable
logger = logging.getLogger(__name__)

View File

@@ -30,9 +30,9 @@ from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
from synapse.logging.context import make_deferred_yieldable
from synapse.util import Clock
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.logcontext import make_deferred_yieldable
from synapse.util.metrics import Measure
# period to cache .well-known results for by default

View File

@@ -25,7 +25,7 @@ from twisted.internet.error import ConnectError
from twisted.names import client, dns
from twisted.names.error import DNSNameError, DomainError
from synapse.logging.context import make_deferred_yieldable
from synapse.util.logcontext import make_deferred_yieldable
logger = logging.getLogger(__name__)

View File

@@ -36,7 +36,6 @@ from twisted.internet.task import _EPSILON, Cooperator
from twisted.web._newclient import ResponseDone
from twisted.web.http_headers import Headers
import synapse.logging.opentracing as opentracing
import synapse.metrics
import synapse.util.retryutils
from synapse.api.errors import (
@@ -49,8 +48,8 @@ from synapse.api.errors import (
from synapse.http import QuieterFileBodyProducer
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
from synapse.logging.context import make_deferred_yieldable
from synapse.util.async_helpers import timeout_deferred
from synapse.util.logcontext import make_deferred_yieldable
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
@@ -340,25 +339,9 @@ class MatrixFederationHttpClient(object):
else:
query_bytes = b""
# Retreive current span
scope = opentracing.start_active_span(
"outgoing-federation-request",
tags={
opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT,
opentracing.tags.PEER_ADDRESS: request.destination,
opentracing.tags.HTTP_METHOD: request.method,
opentracing.tags.HTTP_URL: request.path,
},
finish_on_close=True,
)
headers_dict = {b"User-Agent": [self.version_string_bytes]}
# Inject the span into the headers
headers_dict = {}
opentracing.inject_active_span_byte_dict(headers_dict, request.destination)
headers_dict[b"User-Agent"] = [self.version_string_bytes]
with limiter, scope:
with limiter:
# XXX: Would be much nicer to retry only at the transaction-layer
# (once we have reliable transactions in place)
if long_retries:
@@ -436,10 +419,6 @@ class MatrixFederationHttpClient(object):
response.phrase.decode("ascii", errors="replace"),
)
opentracing.set_tag(
opentracing.tags.HTTP_STATUS_CODE, response.code
)
if 200 <= response.code < 300:
pass
else:
@@ -520,7 +499,8 @@ class MatrixFederationHttpClient(object):
_flatten_response_never_received(e),
)
raise
defer.returnValue(response)
defer.returnValue(response)
def build_auth_headers(
self, destination, method, url_bytes, content=None, destination_is=None

View File

@@ -19,8 +19,8 @@ import threading
from prometheus_client.core import Counter, Histogram
from synapse.logging.context import LoggingContext
from synapse.metrics import LaterGauge
from synapse.util.logcontext import LoggingContext
logger = logging.getLogger(__name__)

View File

@@ -39,8 +39,8 @@ from synapse.api.errors import (
SynapseError,
UnrecognizedRequestError,
)
from synapse.logging.context import preserve_fn
from synapse.util.caches import intern_dict
from synapse.util.logcontext import preserve_fn
logger = logging.getLogger(__name__)
@@ -65,8 +65,8 @@ def wrap_json_request_handler(h):
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
The handler must return a deferred or a coroutine. If the deferred succeeds
we assume that a response has been sent. If the deferred fails with a SynapseError we use
The handler must return a deferred. If the deferred succeeds we assume that
a response has been sent. If the deferred fails with a SynapseError we use
it to send a JSON response with the appropriate HTTP reponse code. If the
deferred fails with any other type of error we send a 500 reponse.
"""
@@ -245,9 +245,7 @@ class JsonResource(HttpServer, resource.Resource):
isLeaf = True
_PathEntry = collections.namedtuple(
"_PathEntry", ["pattern", "callback", "servlet_classname"]
)
_PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"])
def __init__(self, hs, canonical_json=True):
resource.Resource.__init__(self)
@@ -257,28 +255,12 @@ class JsonResource(HttpServer, resource.Resource):
self.path_regexs = {}
self.hs = hs
def register_paths(self, method, path_patterns, callback, servlet_classname):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
handler for that request.
Args:
method (str): GET, POST etc
path_patterns (Iterable[str]): A list of regular expressions to which
the request URLs are compared.
callback (function): The handler for the request. Usually a Servlet
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
"""
def register_paths(self, method, path_patterns, callback):
method = method.encode("utf-8") # method is bytes on py3
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
self._PathEntry(path_pattern, callback, servlet_classname)
self._PathEntry(path_pattern, callback)
)
def render(self, request):
@@ -293,9 +275,13 @@ class JsonResource(HttpServer, resource.Resource):
This checks if anyone has registered a callback for that method and
path.
"""
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
callback, group_dict = self._get_handler_for_request(request)
# Make sure we have a name for this handler in prometheus.
servlet_instance = getattr(callback, "__self__", None)
if servlet_instance is not None:
servlet_classname = servlet_instance.__class__.__name__
else:
servlet_classname = "%r" % callback
request.request_metrics.name = servlet_classname
# Now trigger the callback. If it returns a response, we send it
@@ -325,8 +311,7 @@ class JsonResource(HttpServer, resource.Resource):
request (twisted.web.http.Request):
Returns:
Tuple[Callable, str, dict[unicode, unicode]]: callback method, the
label to use for that method in prometheus metrics, and the
Tuple[Callable, dict[unicode, unicode]]: callback method, and the
dict mapping keys to path components as specified in the
handler's path match regexp.
@@ -335,7 +320,7 @@ class JsonResource(HttpServer, resource.Resource):
None, or a tuple of (http code, response body).
"""
if request.method == b"OPTIONS":
return _options_handler, "options_request_handler", {}
return _options_handler, {}
# Loop through all the registered callbacks to check if the method
# and path regex match
@@ -343,10 +328,10 @@ class JsonResource(HttpServer, resource.Resource):
m = path_entry.pattern.match(request.path.decode("ascii"))
if m:
# We found a match!
return path_entry.callback, path_entry.servlet_classname, m.groupdict()
return path_entry.callback, m.groupdict()
# Huh. No one wanted to handle that? Fiiiiiine. Send 400.
return _unrecognised_request_handler, "unrecognised_request_handler", {}
return _unrecognised_request_handler, {}
def _send_response(
self, request, code, response_json_object, response_code_message=None
@@ -368,22 +353,16 @@ class DirectServeResource(resource.Resource):
"""
Render the request, using an asynchronous render handler if it exists.
"""
async_render_callback_name = "_async_render_" + request.method.decode("ascii")
render_callback_name = "_async_render_" + request.method.decode("ascii")
# Try and get the async renderer
callback = getattr(self, async_render_callback_name, None)
if hasattr(self, render_callback_name):
# Call the handler
callback = getattr(self, render_callback_name)
defer.ensureDeferred(callback(request))
# No async renderer for this request method.
if not callback:
return super().render(request)
resp = callback(request)
# If it's a coroutine, turn it into a Deferred
if isinstance(resp, types.CoroutineType):
defer.ensureDeferred(resp)
return NOT_DONE_YET
return NOT_DONE_YET
else:
super().render(request)
def _options_handler(request):

View File

@@ -20,7 +20,6 @@ import logging
from canonicaljson import json
from synapse.api.errors import Codes, SynapseError
from synapse.logging.opentracing import trace_servlet
logger = logging.getLogger(__name__)
@@ -290,14 +289,8 @@ class RestServlet(object):
for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"):
if hasattr(self, "on_%s" % (method,)):
servlet_classname = self.__class__.__name__
method_handler = getattr(self, "on_%s" % (method,))
http_server.register_paths(
method,
patterns,
trace_servlet(servlet_classname, method_handler),
servlet_classname,
)
http_server.register_paths(method, patterns, method_handler)
else:
raise NotImplementedError("RestServlet must register something.")

View File

@@ -19,7 +19,7 @@ from twisted.web.server import Request, Site
from synapse.http import redact_uri
from synapse.http.request_metrics import RequestMetrics, requests_counter
from synapse.logging.context import LoggingContext, PreserveLoggingContext
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
logger = logging.getLogger(__name__)

View File

@@ -1,697 +0,0 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Thread-local-alike tracking of log contexts within synapse
This module provides objects and utilities for tracking contexts through
synapse code, so that log lines can include a request identifier, and so that
CPU and database activity can be accounted for against the request that caused
them.
See doc/log_contexts.rst for details on how this works.
"""
import logging
import threading
import types
from twisted.internet import defer, threads
logger = logging.getLogger(__name__)
try:
import resource
# Python doesn't ship with a definition of RUSAGE_THREAD but it's defined
# to be 1 on linux so we hard code it.
RUSAGE_THREAD = 1
# If the system doesn't support RUSAGE_THREAD then this should throw an
# exception.
resource.getrusage(RUSAGE_THREAD)
def get_thread_resource_usage():
return resource.getrusage(RUSAGE_THREAD)
except Exception:
# If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
# won't track resource usage by returning None.
def get_thread_resource_usage():
return None
# get an id for the current thread.
#
# threading.get_ident doesn't actually return an OS-level tid, and annoyingly,
# on Linux it actually returns the same value either side of a fork() call. However
# we only fork in one place, so it's not worth the hoop-jumping to get a real tid.
#
get_thread_id = threading.get_ident
class ContextResourceUsage(object):
"""Object for tracking the resources used by a log context
Attributes:
ru_utime (float): user CPU time (in seconds)
ru_stime (float): system CPU time (in seconds)
db_txn_count (int): number of database transactions done
db_sched_duration_sec (float): amount of time spent waiting for a
database connection
db_txn_duration_sec (float): amount of time spent doing database
transactions (excluding scheduling time)
evt_db_fetch_count (int): number of events requested from the database
"""
__slots__ = [
"ru_stime",
"ru_utime",
"db_txn_count",
"db_txn_duration_sec",
"db_sched_duration_sec",
"evt_db_fetch_count",
]
def __init__(self, copy_from=None):
"""Create a new ContextResourceUsage
Args:
copy_from (ContextResourceUsage|None): if not None, an object to
copy stats from
"""
if copy_from is None:
self.reset()
else:
self.ru_utime = copy_from.ru_utime
self.ru_stime = copy_from.ru_stime
self.db_txn_count = copy_from.db_txn_count
self.db_txn_duration_sec = copy_from.db_txn_duration_sec
self.db_sched_duration_sec = copy_from.db_sched_duration_sec
self.evt_db_fetch_count = copy_from.evt_db_fetch_count
def copy(self):
return ContextResourceUsage(copy_from=self)
def reset(self):
self.ru_stime = 0.0
self.ru_utime = 0.0
self.db_txn_count = 0
self.db_txn_duration_sec = 0
self.db_sched_duration_sec = 0
self.evt_db_fetch_count = 0
def __repr__(self):
return (
"<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
"db_txn_count='%r', db_txn_duration_sec='%r', "
"db_sched_duration_sec='%r', evt_db_fetch_count='%r'>"
) % (
self.ru_stime,
self.ru_utime,
self.db_txn_count,
self.db_txn_duration_sec,
self.db_sched_duration_sec,
self.evt_db_fetch_count,
)
def __iadd__(self, other):
"""Add another ContextResourceUsage's stats to this one's.
Args:
other (ContextResourceUsage): the other resource usage object
"""
self.ru_utime += other.ru_utime
self.ru_stime += other.ru_stime
self.db_txn_count += other.db_txn_count
self.db_txn_duration_sec += other.db_txn_duration_sec
self.db_sched_duration_sec += other.db_sched_duration_sec
self.evt_db_fetch_count += other.evt_db_fetch_count
return self
def __isub__(self, other):
self.ru_utime -= other.ru_utime
self.ru_stime -= other.ru_stime
self.db_txn_count -= other.db_txn_count
self.db_txn_duration_sec -= other.db_txn_duration_sec
self.db_sched_duration_sec -= other.db_sched_duration_sec
self.evt_db_fetch_count -= other.evt_db_fetch_count
return self
def __add__(self, other):
res = ContextResourceUsage(copy_from=self)
res += other
return res
def __sub__(self, other):
res = ContextResourceUsage(copy_from=self)
res -= other
return res
class LoggingContext(object):
"""Additional context for log formatting. Contexts are scoped within a
"with" block.
If a parent is given when creating a new context, then:
- logging fields are copied from the parent to the new context on entry
- when the new context exits, the cpu usage stats are copied from the
child to the parent
Args:
name (str): Name for the context for debugging.
parent_context (LoggingContext|None): The parent of the new context
"""
__slots__ = [
"previous_context",
"name",
"parent_context",
"_resource_usage",
"usage_start",
"main_thread",
"alive",
"request",
"tag",
"scope",
]
thread_local = threading.local()
class Sentinel(object):
"""Sentinel to represent the root context"""
__slots__ = []
def __str__(self):
return "sentinel"
def copy_to(self, record):
pass
def start(self):
pass
def stop(self):
pass
def add_database_transaction(self, duration_sec):
pass
def add_database_scheduled(self, sched_sec):
pass
def record_event_fetch(self, event_count):
pass
def __nonzero__(self):
return False
__bool__ = __nonzero__ # python3
sentinel = Sentinel()
def __init__(self, name=None, parent_context=None, request=None):
self.previous_context = LoggingContext.current_context()
self.name = name
# track the resources used by this context so far
self._resource_usage = ContextResourceUsage()
# If alive has the thread resource usage when the logcontext last
# became active.
self.usage_start = None
self.main_thread = get_thread_id()
self.request = None
self.tag = ""
self.alive = True
self.scope = None
self.parent_context = parent_context
if self.parent_context is not None:
self.parent_context.copy_to(self)
if request is not None:
# the request param overrides the request from the parent context
self.request = request
def __str__(self):
if self.request:
return str(self.request)
return "%s@%x" % (self.name, id(self))
@classmethod
def current_context(cls):
"""Get the current logging context from thread local storage
Returns:
LoggingContext: the current logging context
"""
return getattr(cls.thread_local, "current_context", cls.sentinel)
@classmethod
def set_current_context(cls, context):
"""Set the current logging context in thread local storage
Args:
context(LoggingContext): The context to activate.
Returns:
The context that was previously active
"""
current = cls.current_context()
if current is not context:
current.stop()
cls.thread_local.current_context = context
context.start()
return current
def __enter__(self):
"""Enters this logging context into thread local storage"""
old_context = self.set_current_context(self)
if self.previous_context != old_context:
logger.warn(
"Expected previous context %r, found %r",
self.previous_context,
old_context,
)
self.alive = True
return self
def __exit__(self, type, value, traceback):
"""Restore the logging context in thread local storage to the state it
was before this context was entered.
Returns:
None to avoid suppressing any exceptions that were thrown.
"""
current = self.set_current_context(self.previous_context)
if current is not self:
if current is self.sentinel:
logger.warning("Expected logging context %s was lost", self)
else:
logger.warning(
"Expected logging context %s but found %s", self, current
)
self.previous_context = None
self.alive = False
# if we have a parent, pass our CPU usage stats on
if self.parent_context is not None and hasattr(
self.parent_context, "_resource_usage"
):
self.parent_context._resource_usage += self._resource_usage
# reset them in case we get entered again
self._resource_usage.reset()
def copy_to(self, record):
"""Copy logging fields from this context to a log record or
another LoggingContext
"""
# we track the current request
record.request = self.request
# we also track the current scope:
record.scope = self.scope
def start(self):
if get_thread_id() != self.main_thread:
logger.warning("Started logcontext %s on different thread", self)
return
# If we haven't already started record the thread resource usage so
# far
if not self.usage_start:
self.usage_start = get_thread_resource_usage()
def stop(self):
if get_thread_id() != self.main_thread:
logger.warning("Stopped logcontext %s on different thread", self)
return
# When we stop, let's record the cpu used since we started
if not self.usage_start:
logger.warning("Called stop on logcontext %s without calling start", self)
return
utime_delta, stime_delta = self._get_cputime()
self._resource_usage.ru_utime += utime_delta
self._resource_usage.ru_stime += stime_delta
self.usage_start = None
def get_resource_usage(self):
"""Get resources used by this logcontext so far.
Returns:
ContextResourceUsage: a *copy* of the object tracking resource
usage so far
"""
# we always return a copy, for consistency
res = self._resource_usage.copy()
# If we are on the correct thread and we're currently running then we
# can include resource usage so far.
is_main_thread = get_thread_id() == self.main_thread
if self.alive and self.usage_start and is_main_thread:
utime_delta, stime_delta = self._get_cputime()
res.ru_utime += utime_delta
res.ru_stime += stime_delta
return res
def _get_cputime(self):
"""Get the cpu usage time so far
Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
"""
current = get_thread_resource_usage()
utime_delta = current.ru_utime - self.usage_start.ru_utime
stime_delta = current.ru_stime - self.usage_start.ru_stime
# sanity check
if utime_delta < 0:
logger.error(
"utime went backwards! %f < %f",
current.ru_utime,
self.usage_start.ru_utime,
)
utime_delta = 0
if stime_delta < 0:
logger.error(
"stime went backwards! %f < %f",
current.ru_stime,
self.usage_start.ru_stime,
)
stime_delta = 0
return utime_delta, stime_delta
def add_database_transaction(self, duration_sec):
if duration_sec < 0:
raise ValueError("DB txn time can only be non-negative")
self._resource_usage.db_txn_count += 1
self._resource_usage.db_txn_duration_sec += duration_sec
def add_database_scheduled(self, sched_sec):
"""Record a use of the database pool
Args:
sched_sec (float): number of seconds it took us to get a
connection
"""
if sched_sec < 0:
raise ValueError("DB scheduling time can only be non-negative")
self._resource_usage.db_sched_duration_sec += sched_sec
def record_event_fetch(self, event_count):
"""Record a number of events being fetched from the db
Args:
event_count (int): number of events being fetched
"""
self._resource_usage.evt_db_fetch_count += event_count
class LoggingContextFilter(logging.Filter):
"""Logging filter that adds values from the current logging context to each
record.
Args:
**defaults: Default values to avoid formatters complaining about
missing fields
"""
def __init__(self, **defaults):
self.defaults = defaults
def filter(self, record):
"""Add each fields from the logging contexts to the record.
Returns:
True to include the record in the log output.
"""
context = LoggingContext.current_context()
for key, value in self.defaults.items():
setattr(record, key, value)
# context should never be None, but if it somehow ends up being, then
# we end up in a death spiral of infinite loops, so let's check, for
# robustness' sake.
if context is not None:
context.copy_to(record)
return True
class PreserveLoggingContext(object):
"""Captures the current logging context and restores it when the scope is
exited. Used to restore the context after a function using
@defer.inlineCallbacks is resumed by a callback from the reactor."""
__slots__ = ["current_context", "new_context", "has_parent"]
def __init__(self, new_context=None):
if new_context is None:
new_context = LoggingContext.sentinel
self.new_context = new_context
def __enter__(self):
"""Captures the current logging context"""
self.current_context = LoggingContext.set_current_context(self.new_context)
if self.current_context:
self.has_parent = self.current_context.previous_context is not None
if not self.current_context.alive:
logger.debug("Entering dead context: %s", self.current_context)
def __exit__(self, type, value, traceback):
"""Restores the current logging context"""
context = LoggingContext.set_current_context(self.current_context)
if context != self.new_context:
if context is LoggingContext.sentinel:
logger.warning("Expected logging context %s was lost", self.new_context)
else:
logger.warning(
"Expected logging context %s but found %s",
self.new_context,
context,
)
if self.current_context is not LoggingContext.sentinel:
if not self.current_context.alive:
logger.debug("Restoring dead context: %s", self.current_context)
def nested_logging_context(suffix, parent_context=None):
"""Creates a new logging context as a child of another.
The nested logging context will have a 'request' made up of the parent context's
request, plus the given suffix.
CPU/db usage stats will be added to the parent context's on exit.
Normal usage looks like:
with nested_logging_context(suffix):
# ... do stuff
Args:
suffix (str): suffix to add to the parent context's 'request'.
parent_context (LoggingContext|None): parent context. Will use the current context
if None.
Returns:
LoggingContext: new logging context.
"""
if parent_context is None:
parent_context = LoggingContext.current_context()
return LoggingContext(
parent_context=parent_context, request=parent_context.request + "-" + suffix
)
def preserve_fn(f):
"""Function decorator which wraps the function with run_in_background"""
def g(*args, **kwargs):
return run_in_background(f, *args, **kwargs)
return g
def run_in_background(f, *args, **kwargs):
"""Calls a function, ensuring that the current context is restored after
return from the function, and that the sentinel context is set once the
deferred returned by the function completes.
Useful for wrapping functions that return a deferred or coroutine, which you don't
yield or await on (for instance because you want to pass it to
deferred.gatherResults()).
Note that if you completely discard the result, you should make sure that
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
CRITICAL error about an unhandled error will be logged without much
indication about where it came from.
"""
current = LoggingContext.current_context()
try:
res = f(*args, **kwargs)
except: # noqa: E722
# the assumption here is that the caller doesn't want to be disturbed
# by synchronous exceptions, so let's turn them into Failures.
return defer.fail()
if isinstance(res, types.CoroutineType):
res = defer.ensureDeferred(res)
if not isinstance(res, defer.Deferred):
return res
if res.called and not res.paused:
# The function should have maintained the logcontext, so we can
# optimise out the messing about
return res
# The function may have reset the context before returning, so
# we need to restore it now.
ctx = LoggingContext.set_current_context(current)
# The original context will be restored when the deferred
# completes, but there is nothing waiting for it, so it will
# get leaked into the reactor or some other function which
# wasn't expecting it. We therefore need to reset the context
# here.
#
# (If this feels asymmetric, consider it this way: we are
# effectively forking a new thread of execution. We are
# probably currently within a ``with LoggingContext()`` block,
# which is supposed to have a single entry and exit point. But
# by spawning off another deferred, we are effectively
# adding a new exit point.)
res.addBoth(_set_context_cb, ctx)
return res
def make_deferred_yieldable(deferred):
"""Given a deferred, make it follow the Synapse logcontext rules:
If the deferred has completed (or is not actually a Deferred), essentially
does nothing (just returns another completed deferred with the
result/failure).
If the deferred has not yet completed, resets the logcontext before
returning a deferred. Then, when the deferred completes, restores the
current logcontext before running callbacks/errbacks.
(This is more-or-less the opposite operation to run_in_background.)
"""
if not isinstance(deferred, defer.Deferred):
return deferred
if deferred.called and not deferred.paused:
# it looks like this deferred is ready to run any callbacks we give it
# immediately. We may as well optimise out the logcontext faffery.
return deferred
# ok, we can't be sure that a yield won't block, so let's reset the
# logcontext, and add a callback to the deferred to restore it.
prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
deferred.addBoth(_set_context_cb, prev_context)
return deferred
def _set_context_cb(result, context):
"""A callback function which just sets the logging context"""
LoggingContext.set_current_context(context)
return result
def defer_to_thread(reactor, f, *args, **kwargs):
"""
Calls the function `f` using a thread from the reactor's default threadpool and
returns the result as a Deferred.
Creates a new logcontext for `f`, which is created as a child of the current
logcontext (so its CPU usage metrics will get attributed to the current
logcontext). `f` should preserve the logcontext it is given.
The result deferred follows the Synapse logcontext rules: you should `yield`
on it.
Args:
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
the Deferred will be invoked, and whose threadpool we should use for the
function.
Normally this will be hs.get_reactor().
f (callable): The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
Deferred: A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
"""
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
logcontexts correctly.
Calls the function `f` using a thread from the given threadpool and returns
the result as a Deferred.
Creates a new logcontext for `f`, which is created as a child of the current
logcontext (so its CPU usage metrics will get attributed to the current
logcontext). `f` should preserve the logcontext it is given.
The result deferred follows the Synapse logcontext rules: you should `yield`
on it.
Args:
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
the Deferred will be invoked. Normally this will be hs.get_reactor().
threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
running `f`. Normally this will be hs.get_reactor().getThreadPool().
f (callable): The function to call.
args: positional arguments to pass to f.
kwargs: keyword arguments to pass to f.
Returns:
Deferred: A Deferred which fires a callback with the result of `f`, or an
errback if `f` throws an exception.
"""
logcontext = LoggingContext.current_context()
def g():
with LoggingContext(parent_context=logcontext):
return f(*args, **kwargs)
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))

View File

@@ -1,53 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from six import StringIO
class LogFormatter(logging.Formatter):
"""Log formatter which gives more detail for exceptions
This is the same as the standard log formatter, except that when logging
exceptions [typically via log.foo("msg", exc_info=1)], it prints the
sequence that led up to the point at which the exception was caught.
(Normally only stack frames between the point the exception was raised and
where it was caught are logged).
"""
def __init__(self, *args, **kwargs):
super(LogFormatter, self).__init__(*args, **kwargs)
def formatException(self, ei):
sio = StringIO()
(typ, val, tb) = ei
# log the stack above the exception capture point if possible, but
# check that we actually have an f_back attribute to work around
# https://twistedmatrix.com/trac/ticket/9305
if tb and hasattr(tb.tb_frame, "f_back"):
sio.write("Capture point (most recent call last):\n")
traceback.print_stack(tb.tb_frame.f_back, None, sio)
traceback.print_exception(typ, val, tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s

Some files were not shown because too many files have changed in this diff Show More