Compare commits
42 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| dffba9994a | |||
| a0cbb61323 | |||
| 90007d6921 | |||
| 6af4230192 | |||
| 65434da75d | |||
| 7b3bc755a3 | |||
| d88421ab03 | |||
| af67c7c1de | |||
| 824707383b | |||
| 73cb716b3c | |||
| 5e01e9ac19 | |||
| f3615a8aa5 | |||
| 7556851665 | |||
| 43d175d17a | |||
| b70e080b59 | |||
| 57eacee4f4 | |||
| c142e5d16a | |||
| 4b1f7febc7 | |||
| f9e99f9534 | |||
| 1af2fcd492 | |||
| f05c7d62bc | |||
| 1a807dfe68 | |||
| 589d43d9cd | |||
| 9b1b79f3f5 | |||
| ad8b909ce9 | |||
| 80cc82a445 | |||
| a6a776f3d8 | |||
| 9481707a52 | |||
| 0e5434264f | |||
| 1ee268d33d | |||
| ee91ac179c | |||
| 822a0f0435 | |||
| 54283f3ed4 | |||
| 20332b278d | |||
| f6608a8805 | |||
| 426854e7bc | |||
| 463b072b12 | |||
| cb8d568cf9 | |||
| 463d5a8fde | |||
| 91753cae59 | |||
| c7b48bd42d | |||
| 0ee9076ffe |
@@ -173,11 +173,12 @@ steps:
|
|||||||
queue: "medium"
|
queue: "medium"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash .buildkite/synapse_sytest.sh"
|
- "bash /synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
always-pull: true
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
@@ -192,11 +193,12 @@ steps:
|
|||||||
POSTGRES: "1"
|
POSTGRES: "1"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash .buildkite/synapse_sytest.sh"
|
- "bash /synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
always-pull: true
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1
|
- exit_status: -1
|
||||||
@@ -212,11 +214,12 @@ steps:
|
|||||||
WORKERS: "1"
|
WORKERS: "1"
|
||||||
command:
|
command:
|
||||||
- "bash .buildkite/merge_base_branch.sh"
|
- "bash .buildkite/merge_base_branch.sh"
|
||||||
- "bash .buildkite/synapse_sytest.sh"
|
- "bash /synapse_sytest.sh"
|
||||||
plugins:
|
plugins:
|
||||||
- docker#v3.0.1:
|
- docker#v3.0.1:
|
||||||
image: "matrixdotorg/sytest-synapse:py35"
|
image: "matrixdotorg/sytest-synapse:py35"
|
||||||
propagate-environment: true
|
propagate-environment: true
|
||||||
|
always-pull: true
|
||||||
soft_fail: true
|
soft_fail: true
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
|
|||||||
@@ -1,145 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
|
|
||||||
# sytest-synapse docker images.
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
if [ -n "$BUILDKITE" ]
|
|
||||||
then
|
|
||||||
SYNAPSE_DIR=`pwd`
|
|
||||||
else
|
|
||||||
SYNAPSE_DIR="/src"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Attempt to find a sytest to use.
|
|
||||||
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
|
|
||||||
if [ -d "/sytest" ]; then
|
|
||||||
# If the user has mounted in a SyTest checkout, use that.
|
|
||||||
echo "Using local sytests..."
|
|
||||||
|
|
||||||
# create ourselves a working directory and dos2unix some scripts therein
|
|
||||||
mkdir -p /work/jenkins
|
|
||||||
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
|
|
||||||
dos2unix -n "/sytest/$i" "/work/$i"
|
|
||||||
done
|
|
||||||
ln -sf /sytest/tests /work
|
|
||||||
ln -sf /sytest/keys /work
|
|
||||||
SYTEST_LIB="/sytest/lib"
|
|
||||||
else
|
|
||||||
if [ -n "BUILDKITE_BRANCH" ]
|
|
||||||
then
|
|
||||||
branch_name=$BUILDKITE_BRANCH
|
|
||||||
else
|
|
||||||
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
|
|
||||||
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Try and fetch the branch
|
|
||||||
echo "Trying to get same-named sytest branch..."
|
|
||||||
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
|
|
||||||
# Probably a 404, fall back to develop
|
|
||||||
echo "Using develop instead..."
|
|
||||||
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
|
|
||||||
}
|
|
||||||
|
|
||||||
mkdir -p /work
|
|
||||||
tar -C /work --strip-components=1 -xf sytest.tar.gz
|
|
||||||
SYTEST_LIB="/work/lib"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd /work
|
|
||||||
|
|
||||||
# PostgreSQL setup
|
|
||||||
if [ -n "$POSTGRES" ]
|
|
||||||
then
|
|
||||||
export PGUSER=postgres
|
|
||||||
export POSTGRES_DB_1=pg1
|
|
||||||
export POSTGRES_DB_2=pg2
|
|
||||||
|
|
||||||
# Start the database
|
|
||||||
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
|
||||||
|
|
||||||
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
|
|
||||||
jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
# Make the test databases for the two Synapse servers that will be spun up
|
|
||||||
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
|
|
||||||
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$OFFLINE" ]; then
|
|
||||||
# if we're in offline mode, just put synapse into the virtualenv, and
|
|
||||||
# hope that the deps are up-to-date.
|
|
||||||
#
|
|
||||||
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
|
|
||||||
# so we just run setup.py explicitly.)
|
|
||||||
#
|
|
||||||
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
|
|
||||||
else
|
|
||||||
# We've already created the virtualenv, but lets double check we have all
|
|
||||||
# deps.
|
|
||||||
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
|
|
||||||
/venv/bin/pip install -q --upgrade --no-cache-dir \
|
|
||||||
lxml psycopg2 coverage codecov tap.py
|
|
||||||
|
|
||||||
# Make sure all Perl deps are installed -- this is done in the docker build
|
|
||||||
# so will only install packages added since the last Docker build
|
|
||||||
./install-deps.pl
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Run the tests
|
|
||||||
>&2 echo "+++ Running tests"
|
|
||||||
|
|
||||||
RUN_TESTS=(
|
|
||||||
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
|
|
||||||
)
|
|
||||||
|
|
||||||
TEST_STATUS=0
|
|
||||||
|
|
||||||
if [ -n "$WORKERS" ]; then
|
|
||||||
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
|
|
||||||
else
|
|
||||||
RUN_TESTS+=(-I Synapse)
|
|
||||||
fi
|
|
||||||
|
|
||||||
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
|
|
||||||
|
|
||||||
if [ $TEST_STATUS -ne 0 ]; then
|
|
||||||
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
|
|
||||||
else
|
|
||||||
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
|
|
||||||
fi
|
|
||||||
|
|
||||||
>&2 echo "--- Copying assets"
|
|
||||||
|
|
||||||
# Copy out the logs
|
|
||||||
mkdir -p /logs
|
|
||||||
cp results.tap /logs/results.tap
|
|
||||||
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
|
|
||||||
|
|
||||||
# Upload coverage to codecov and upload files, if running on Buildkite
|
|
||||||
if [ -n "$BUILDKITE" ]
|
|
||||||
then
|
|
||||||
/venv/bin/coverage combine || true
|
|
||||||
/venv/bin/coverage xml || true
|
|
||||||
/venv/bin/codecov -X gcov -f coverage.xml
|
|
||||||
|
|
||||||
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
|
|
||||||
tar xvf buildkite.tar.gz
|
|
||||||
chmod +x ./buildkite-agent
|
|
||||||
|
|
||||||
# Upload the files
|
|
||||||
./buildkite-agent artifact upload "/logs/**/*.log*"
|
|
||||||
./buildkite-agent artifact upload "/logs/results.tap"
|
|
||||||
|
|
||||||
if [ $TEST_STATUS -ne 0 ]; then
|
|
||||||
# Annotate, if failure
|
|
||||||
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
exit $TEST_STATUS
|
|
||||||
+30
@@ -1,3 +1,33 @@
|
|||||||
|
Synapse 1.1.0 (2019-07-04)
|
||||||
|
==========================
|
||||||
|
|
||||||
|
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
|
||||||
|
See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
|
||||||
|
|
||||||
|
This release also deprecates the use of environment variables to configure the
|
||||||
|
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
|
||||||
|
for more details.
|
||||||
|
|
||||||
|
No changes since 1.1.0rc2.
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 1.1.0rc2 (2019-07-03)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593))
|
||||||
|
- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596))
|
||||||
|
- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552))
|
||||||
|
|
||||||
|
|
||||||
Synapse 1.1.0rc1 (2019-07-02)
|
Synapse 1.1.0rc1 (2019-07-02)
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ include demo/README
|
|||||||
include demo/demo.tls.dh
|
include demo/demo.tls.dh
|
||||||
include demo/*.py
|
include demo/*.py
|
||||||
include demo/*.sh
|
include demo/*.sh
|
||||||
|
include sytest-blacklist
|
||||||
|
|
||||||
recursive-include synapse/storage/schema *.sql
|
recursive-include synapse/storage/schema *.sql
|
||||||
recursive-include synapse/storage/schema *.sql.postgres
|
recursive-include synapse/storage/schema *.sql.postgres
|
||||||
|
|||||||
+1
-1
@@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
|||||||
|
|
||||||
virtualenv -p python3 env
|
virtualenv -p python3 env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
python -m pip install --no-pep-517 -e .[all]
|
python -m pip install --no-use-pep517 -e .[all]
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
This will run a process of downloading and installing all the needed
|
||||||
dependencies into a virtual env.
|
dependencies into a virtual env.
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
Update github templates.
|
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Move logging code out of `synapse.util` and into `synapse.logging`.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Fix 'utime went backwards' errors on daemonization.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add a blacklist file to the repo to blacklist certain sytests from failing CI.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add `sender` and `origin_server_ts` fields to `m.replace`.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Make runtime errors surrounding password reset emails much clearer.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Move logging code out of `synapse.util` and into `synapse.logging`.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Various minor fixes to the federation request rate limiter.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Remove dead code for persiting outgoing federation transactions.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add default push rule to ignore reactions.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Remove support for the `invite_3pid_guest` configuration setting.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Include the original event when asking for its relations.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Move RegistrationHandler.get_or_create_user to test code.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Add some more common python virtual-environment paths to the black exclusion list.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Unblacklist some user_directory sytests.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Factor out some redundant code in the login implementation.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Update ModuleApi to avoid register(generate_token=True).
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Remove access-token support from RegistrationHandler.register, and rename it.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Improve logging for auto-join when a new user is created.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Fix newly-registered users not being able to lookup their own profile without joining a room.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure.
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
--no-pep517 should be --no-use-pep517 in the documentation to setup the development environment.
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
# `homeserver.yaml`, and restart synapse.
|
# `homeserver.yaml`, and restart synapse.
|
||||||
#
|
#
|
||||||
# This configuration will produce similar results to the defaults within
|
# This configuration will produce similar results to the defaults within
|
||||||
# synapse, but can be edited to give more flexibility.
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
version: 1
|
version: 1
|
||||||
@@ -12,7 +12,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
@@ -35,7 +35,7 @@ handlers:
|
|||||||
root:
|
root:
|
||||||
level: INFO
|
level: INFO
|
||||||
handlers: [console] # to use file handler instead, switch to [file]
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
loggers:
|
loggers:
|
||||||
synapse:
|
synapse:
|
||||||
level: INFO
|
level: INFO
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid
|
|||||||
|
|
||||||
from synapse.app.homeserver import SynapseHomeServer
|
from synapse.app.homeserver import SynapseHomeServer
|
||||||
|
|
||||||
# from synapse.util.logutils import log_function
|
# from synapse.logging.utils import log_function
|
||||||
|
|
||||||
from twisted.internet import reactor, defer
|
from twisted.internet import reactor, defer
|
||||||
from twisted.python import log
|
from twisted.python import log
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
Vendored
+12
-2
@@ -1,9 +1,19 @@
|
|||||||
matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium
|
matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium
|
||||||
|
|
||||||
|
[ Amber Brown ]
|
||||||
|
* Update logging config defaults to match API changes in Synapse.
|
||||||
|
|
||||||
|
-- Erik Johnston <erikj@rae> Thu, 04 Jul 2019 13:59:02 +0100
|
||||||
|
|
||||||
|
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
||||||
|
|
||||||
[ Silke Hofstra ]
|
[ Silke Hofstra ]
|
||||||
* Include systemd-python to allow logging to the systemd journal.
|
* Include systemd-python to allow logging to the systemd journal.
|
||||||
|
|
||||||
-- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200
|
[ Synapse Packaging team ]
|
||||||
|
* New synapse release 1.1.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||||
|
|
||||||
|
|||||||
Vendored
+1
-1
@@ -7,7 +7,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
@@ -207,22 +207,3 @@ perspectives:
|
|||||||
|
|
||||||
password_config:
|
password_config:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
|
||||||
{% if SYNAPSE_SMTP_HOST %}
|
|
||||||
email:
|
|
||||||
enable_notifs: false
|
|
||||||
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
|
||||||
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
|
||||||
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
|
||||||
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
|
||||||
require_transport_security: False
|
|
||||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
|
||||||
app_name: Matrix
|
|
||||||
# if template_dir is unset, uses the example templates that are part of
|
|
||||||
# the Synapse distribution.
|
|
||||||
#template_dir: res/templates
|
|
||||||
notif_template_html: notif_mail.html
|
|
||||||
notif_template_text: notif_mail.txt
|
|
||||||
notif_for_new_users: True
|
|
||||||
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
|
||||||
{% endif %}
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
+19
-19
@@ -1,4 +1,4 @@
|
|||||||
Log contexts
|
Log Contexts
|
||||||
============
|
============
|
||||||
|
|
||||||
.. contents::
|
.. contents::
|
||||||
@@ -12,7 +12,7 @@ record.
|
|||||||
Logcontexts are also used for CPU and database accounting, so that we can track
|
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||||
which requests were responsible for high CPU use or database activity.
|
which requests were responsible for high CPU use or database activity.
|
||||||
|
|
||||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
The ``synapse.logging.context`` module provides a facilities for managing the
|
||||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||||
|
|
||||||
Deferreds make the whole thing complicated, so this document describes how it
|
Deferreds make the whole thing complicated, so this document describes how it
|
||||||
@@ -27,19 +27,19 @@ found them:
|
|||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
from synapse.util import logcontext # omitted from future snippets
|
from synapse.logging import context # omitted from future snippets
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
request_context = logcontext.LoggingContext()
|
request_context = context.LoggingContext()
|
||||||
|
|
||||||
calling_context = logcontext.LoggingContext.current_context()
|
calling_context = context.LoggingContext.current_context()
|
||||||
logcontext.LoggingContext.set_current_context(request_context)
|
context.LoggingContext.set_current_context(request_context)
|
||||||
try:
|
try:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
do_request_handling()
|
do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
finally:
|
finally:
|
||||||
logcontext.LoggingContext.set_current_context(calling_context)
|
context.LoggingContext.set_current_context(calling_context)
|
||||||
|
|
||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
logger.debug("phew") # this will be logged against request_id
|
logger.debug("phew") # this will be logged against request_id
|
||||||
@@ -51,7 +51,7 @@ written much more succinctly as:
|
|||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with logcontext.LoggingContext() as request_context:
|
with context.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
do_request_handling()
|
do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
@@ -74,7 +74,7 @@ blocking operation, and returns a deferred:
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with logcontext.LoggingContext() as request_context:
|
with context.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
yield do_request_handling()
|
yield do_request_handling()
|
||||||
logger.debug("finished")
|
logger.debug("finished")
|
||||||
@@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from
|
|||||||
external code. We need to make it follow our rules.
|
external code. We need to make it follow our rules.
|
||||||
|
|
||||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||||
which returns a deferred which will run its callbacks after a given number of
|
which returns a deferred which will run its callbacks after a given number of
|
||||||
seconds. That might look like:
|
seconds. That might look like:
|
||||||
|
|
||||||
@@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with
|
|||||||
This technique works equally for external functions which return deferreds,
|
This technique works equally for external functions which return deferreds,
|
||||||
or deferreds we have made ourselves.
|
or deferreds we have made ourselves.
|
||||||
|
|
||||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
You can also use ``context.make_deferred_yieldable``, which just does the
|
||||||
boilerplate for you, so the above could be written:
|
boilerplate for you, so the above could be written:
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def sleep(seconds):
|
def sleep(seconds):
|
||||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||||
|
|
||||||
|
|
||||||
Fire-and-forget
|
Fire-and-forget
|
||||||
@@ -279,7 +279,7 @@ Obviously that option means that the operations done in
|
|||||||
that might be fixed by setting a different logcontext via a ``with
|
that might be fixed by setting a different logcontext via a ``with
|
||||||
LoggingContext(...)`` in ``background_operation``).
|
LoggingContext(...)`` in ``background_operation``).
|
||||||
|
|
||||||
The second option is to use ``logcontext.run_in_background``, which wraps a
|
The second option is to use ``context.run_in_background``, which wraps a
|
||||||
function so that it doesn't reset the logcontext even when it returns an
|
function so that it doesn't reset the logcontext even when it returns an
|
||||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||||
@@ -293,7 +293,7 @@ It can be used like this:
|
|||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
yield foreground_operation()
|
yield foreground_operation()
|
||||||
|
|
||||||
logcontext.run_in_background(background_operation)
|
context.run_in_background(background_operation)
|
||||||
|
|
||||||
# this will now be logged against the request context
|
# this will now be logged against the request context
|
||||||
logger.debug("Request handling complete")
|
logger.debug("Request handling complete")
|
||||||
@@ -332,7 +332,7 @@ gathered:
|
|||||||
result = yield defer.gatherResults([d1, d2])
|
result = yield defer.gatherResults([d1, d2])
|
||||||
|
|
||||||
In this case particularly, though, option two, of using
|
In this case particularly, though, option two, of using
|
||||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
``context.preserve_fn`` almost certainly makes more sense, so that
|
||||||
``operation1`` and ``operation2`` are both logged against the original
|
``operation1`` and ``operation2`` are both logged against the original
|
||||||
logcontext. This looks like:
|
logcontext. This looks like:
|
||||||
|
|
||||||
@@ -340,8 +340,8 @@ logcontext. This looks like:
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def do_request_handling():
|
def do_request_handling():
|
||||||
d1 = logcontext.preserve_fn(operation1)()
|
d1 = context.preserve_fn(operation1)()
|
||||||
d2 = logcontext.preserve_fn(operation2)()
|
d2 = context.preserve_fn(operation2)()
|
||||||
|
|
||||||
with PreserveLoggingContext():
|
with PreserveLoggingContext():
|
||||||
result = yield defer.gatherResults([d1, d2])
|
result = yield defer.gatherResults([d1, d2])
|
||||||
@@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it:
|
|||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
def handle_request(request_id):
|
def handle_request(request_id):
|
||||||
with logcontext.LoggingContext() as request_context:
|
with context.LoggingContext() as request_context:
|
||||||
request_context.request = request_id
|
request_context.request = request_id
|
||||||
d = do_request_handling()
|
d = do_request_handling()
|
||||||
|
|
||||||
@@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy.
|
|||||||
|
|
||||||
The business of a Deferred which runs its callbacks in the original logcontext
|
The business of a Deferred which runs its callbacks in the original logcontext
|
||||||
isn't hard to achieve — we have it today, in the shape of
|
isn't hard to achieve — we have it today, in the shape of
|
||||||
``logcontext._PreservingContextDeferred``:
|
``context._PreservingContextDeferred``:
|
||||||
|
|
||||||
.. code:: python
|
.. code:: python
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,8 @@ exclude = '''
|
|||||||
| \.git # root of the project
|
| \.git # root of the project
|
||||||
| \.tox
|
| \.tox
|
||||||
| \.venv
|
| \.venv
|
||||||
|
| \.env
|
||||||
|
| env
|
||||||
| _build
|
| _build
|
||||||
| _trial_temp.*
|
| _trial_temp.*
|
||||||
| build
|
| build
|
||||||
|
|||||||
+1
-1
@@ -35,4 +35,4 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
__version__ = "1.1.0rc1"
|
__version__ = "1.1.0"
|
||||||
|
|||||||
+29
-26
@@ -27,7 +27,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory
|
|||||||
import synapse
|
import synapse
|
||||||
from synapse.app import check_bind_error
|
from synapse.app import check_bind_error
|
||||||
from synapse.crypto import context_factory
|
from synapse.crypto import context_factory
|
||||||
from synapse.util import PreserveLoggingContext
|
from synapse.logging.context import PreserveLoggingContext
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
@@ -93,33 +93,36 @@ def start_reactor(
|
|||||||
install_dns_limiter(reactor)
|
install_dns_limiter(reactor)
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
# make sure that we run the reactor with the sentinel log context,
|
logger.info("Running")
|
||||||
# otherwise other PreserveLoggingContext instances will get confused
|
change_resource_limit(soft_file_limit)
|
||||||
# and complain when they see the logcontext arbitrarily swapping
|
if gc_thresholds:
|
||||||
# between the sentinel and `run` logcontexts.
|
gc.set_threshold(*gc_thresholds)
|
||||||
with PreserveLoggingContext():
|
reactor.run()
|
||||||
logger.info("Running")
|
|
||||||
|
|
||||||
change_resource_limit(soft_file_limit)
|
# make sure that we run the reactor with the sentinel log context,
|
||||||
if gc_thresholds:
|
# otherwise other PreserveLoggingContext instances will get confused
|
||||||
gc.set_threshold(*gc_thresholds)
|
# and complain when they see the logcontext arbitrarily swapping
|
||||||
reactor.run()
|
# between the sentinel and `run` logcontexts.
|
||||||
|
#
|
||||||
|
# We also need to drop the logcontext before forking if we're daemonizing,
|
||||||
|
# otherwise the cputime metrics get confused about the per-thread resource usage
|
||||||
|
# appearing to go backwards.
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
if daemonize:
|
||||||
|
if print_pidfile:
|
||||||
|
print(pid_file)
|
||||||
|
|
||||||
if daemonize:
|
daemon = Daemonize(
|
||||||
if print_pidfile:
|
app=appname,
|
||||||
print(pid_file)
|
pid=pid_file,
|
||||||
|
action=run,
|
||||||
daemon = Daemonize(
|
auto_close_fds=False,
|
||||||
app=appname,
|
verbose=True,
|
||||||
pid=pid_file,
|
logger=logger,
|
||||||
action=run,
|
)
|
||||||
auto_close_fds=False,
|
daemon.start()
|
||||||
verbose=True,
|
else:
|
||||||
logger=logger,
|
run()
|
||||||
)
|
|
||||||
daemon.start()
|
|
||||||
else:
|
|
||||||
run()
|
|
||||||
|
|
||||||
|
|
||||||
def quit_with_error(error_string):
|
def quit_with_error(error_string):
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
@@ -36,7 +37,6 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
@@ -64,7 +65,6 @@ from synapse.rest.client.versions import VersionsRestServlet
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
@@ -59,7 +60,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.user_directory import UserDirectoryStore
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
@@ -48,7 +49,6 @@ from synapse.rest.key.v2 import KeyApiV2Resource
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
@@ -44,7 +45,6 @@ from synapse.storage.engines import create_engine
|
|||||||
from synapse.types import ReadReceipt
|
from synapse.types import ReadReceipt
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ from synapse.config.logger import setup_logging
|
|||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
@@ -41,7 +42,6 @@ from synapse.rest.client.v2_alpha._base import client_patterns
|
|||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ from synapse.federation.transport.server import TransportLayerServer
|
|||||||
from synapse.http.additional_resource import AdditionalResource
|
from synapse.http.additional_resource import AdditionalResource
|
||||||
from synapse.http.server import RootRedirect
|
from synapse.http.server import RootRedirect
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
@@ -72,7 +73,6 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
|||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.module_loader import load_module
|
from synapse.util.module_loader import load_module
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
@@ -40,7 +41,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.media_repository import MediaRepositoryStore
|
from synapse.storage.media_repository import MediaRepositoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ from synapse.config._base import ConfigError
|
|||||||
from synapse.config.homeserver import HomeServerConfig
|
from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import __func__
|
from synapse.replication.slave.storage._base import __func__
|
||||||
@@ -38,7 +39,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage import DataStore
|
from synapse.storage import DataStore
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ from synapse.config.logger import setup_logging
|
|||||||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||||
@@ -57,7 +58,6 @@ from synapse.server import HomeServer
|
|||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
@@ -46,7 +47,6 @@ from synapse.storage.engines import create_engine
|
|||||||
from synapse.storage.user_directory import UserDirectoryStore
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -53,8 +53,8 @@ import logging
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.appservice import ApplicationServiceState
|
from synapse.appservice import ApplicationServiceState
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -112,13 +112,17 @@ class EmailConfig(Config):
|
|||||||
missing = []
|
missing = []
|
||||||
for k in required:
|
for k in required:
|
||||||
if k not in email_config:
|
if k not in email_config:
|
||||||
missing.append(k)
|
missing.append("email." + k)
|
||||||
|
|
||||||
|
if config.get("public_baseurl") is None:
|
||||||
|
missing.append("public_base_url")
|
||||||
|
|
||||||
if len(missing) > 0:
|
if len(missing) > 0:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"email.password_reset_behaviour is set to 'local' "
|
"Password resets emails are configured to be sent from "
|
||||||
"but required keys are missing: %s"
|
"this homeserver due to a partial 'email' block. "
|
||||||
% (", ".join(["email." + k for k in missing]),)
|
"However, the following required keys are missing: %s"
|
||||||
|
% (", ".join(missing),)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Templates for password reset emails
|
# Templates for password reset emails
|
||||||
@@ -156,13 +160,6 @@ class EmailConfig(Config):
|
|||||||
filepath, "email.password_reset_template_success_html"
|
filepath, "email.password_reset_template_success_html"
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.get("public_baseurl") is None:
|
|
||||||
raise RuntimeError(
|
|
||||||
"email.password_reset_behaviour is set to 'local' but no "
|
|
||||||
"public_baseurl is set. This is necessary to generate password "
|
|
||||||
"reset links"
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.email_enable_notifs:
|
if self.email_enable_notifs:
|
||||||
required = [
|
required = [
|
||||||
"smtp_host",
|
"smtp_host",
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner
|
|||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
from synapse.app import _base as appbase
|
from synapse.app import _base as appbase
|
||||||
from synapse.util.logcontext import LoggingContextFilter
|
from synapse.logging.context import LoggingContextFilter
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
from ._base import Config
|
from ._base import Config
|
||||||
@@ -40,7 +40,7 @@ formatters:
|
|||||||
|
|
||||||
filters:
|
filters:
|
||||||
context:
|
context:
|
||||||
(): synapse.util.logcontext.LoggingContextFilter
|
(): synapse.logging.context.LoggingContextFilter
|
||||||
request: ""
|
request: ""
|
||||||
|
|
||||||
handlers:
|
handlers:
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ class RateLimitConfig(object):
|
|||||||
|
|
||||||
class FederationRateLimitConfig(object):
|
class FederationRateLimitConfig(object):
|
||||||
_items_and_default = {
|
_items_and_default = {
|
||||||
"window_size": 10000,
|
"window_size": 1000,
|
||||||
"sleep_limit": 10,
|
"sleep_limit": 10,
|
||||||
"sleep_delay": 500,
|
"sleep_delay": 500,
|
||||||
"reject_limit": 50,
|
"reject_limit": 50,
|
||||||
@@ -54,7 +54,7 @@ class RatelimitConfig(Config):
|
|||||||
|
|
||||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||||
# back to the old method.
|
# back to the old method.
|
||||||
if "federation_rc" in config:
|
if "rc_federation" in config:
|
||||||
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
||||||
else:
|
else:
|
||||||
self.rc_federation = FederationRateLimitConfig(
|
self.rc_federation = FederationRateLimitConfig(
|
||||||
|
|||||||
@@ -71,9 +71,8 @@ class RegistrationConfig(Config):
|
|||||||
self.default_identity_server = config.get("default_identity_server")
|
self.default_identity_server = config.get("default_identity_server")
|
||||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||||
|
|
||||||
self.invite_3pid_guest = self.allow_guest_access and config.get(
|
if config.get("invite_3pid_guest", False):
|
||||||
"invite_3pid_guest", False
|
raise ConfigError("invite_3pid_guest is no longer supported")
|
||||||
)
|
|
||||||
|
|
||||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
for room_alias in self.auto_join_rooms:
|
for room_alias in self.auto_join_rooms:
|
||||||
|
|||||||
@@ -44,15 +44,16 @@ from synapse.api.errors import (
|
|||||||
RequestSendFailed,
|
RequestSendFailed,
|
||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.storage.keys import FetchKeyResult
|
from synapse.logging.context import (
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
|
||||||
from synapse.util.async_helpers import yieldable_gather_results
|
|
||||||
from synapse.util.logcontext import (
|
|
||||||
LoggingContext,
|
LoggingContext,
|
||||||
PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
|
make_deferred_yieldable,
|
||||||
preserve_fn,
|
preserve_fn,
|
||||||
run_in_background,
|
run_in_background,
|
||||||
)
|
)
|
||||||
|
from synapse.storage.keys import FetchKeyResult
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
|
from synapse.util.async_helpers import yieldable_gather_results
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
@@ -140,7 +141,7 @@ class Keyring(object):
|
|||||||
"""
|
"""
|
||||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||||
requests = (req,)
|
requests = (req,)
|
||||||
return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
|
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||||
|
|
||||||
def verify_json_objects_for_server(self, server_and_json):
|
def verify_json_objects_for_server(self, server_and_json):
|
||||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||||
@@ -557,7 +558,7 @@ class BaseV2KeyFetcher(object):
|
|||||||
|
|
||||||
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
||||||
|
|
||||||
yield logcontext.make_deferred_yieldable(
|
yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
run_in_background(
|
run_in_background(
|
||||||
@@ -612,7 +613,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||||||
|
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
results = yield logcontext.make_deferred_yieldable(
|
results = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[run_in_background(get_key, server) for server in self.key_servers],
|
[run_in_background(get_key, server) for server in self.key_servers],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from frozendict import frozendict
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
|
|
||||||
|
|
||||||
class EventContext(object):
|
class EventContext(object):
|
||||||
|
|||||||
@@ -392,7 +392,11 @@ class EventClientSerializer(object):
|
|||||||
serialized_event["content"].pop("m.relates_to", None)
|
serialized_event["content"].pop("m.relates_to", None)
|
||||||
|
|
||||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||||
r[RelationTypes.REPLACE] = {"event_id": edit.event_id}
|
r[RelationTypes.REPLACE] = {
|
||||||
|
"event_id": edit.event_id,
|
||||||
|
"origin_server_ts": edit.origin_server_ts,
|
||||||
|
"sender": edit.sender,
|
||||||
|
}
|
||||||
|
|
||||||
defer.returnValue(serialized_event)
|
defer.returnValue(serialized_event)
|
||||||
|
|
||||||
|
|||||||
@@ -27,8 +27,14 @@ from synapse.crypto.event_signing import check_event_content_hash
|
|||||||
from synapse.events import event_type_from_format_version
|
from synapse.events import event_type_from_format_version
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
|
from synapse.logging.context import (
|
||||||
|
LoggingContext,
|
||||||
|
PreserveLoggingContext,
|
||||||
|
make_deferred_yieldable,
|
||||||
|
preserve_fn,
|
||||||
|
)
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -73,7 +79,7 @@ class FederationBase(object):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_check_result(pdu, deferred):
|
def handle_check_result(pdu, deferred):
|
||||||
try:
|
try:
|
||||||
res = yield logcontext.make_deferred_yieldable(deferred)
|
res = yield make_deferred_yieldable(deferred)
|
||||||
except SynapseError:
|
except SynapseError:
|
||||||
res = None
|
res = None
|
||||||
|
|
||||||
@@ -102,10 +108,10 @@ class FederationBase(object):
|
|||||||
|
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
|
|
||||||
handle = logcontext.preserve_fn(handle_check_result)
|
handle = preserve_fn(handle_check_result)
|
||||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
||||||
|
|
||||||
valid_pdus = yield logcontext.make_deferred_yieldable(
|
valid_pdus = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(deferreds2, consumeErrors=True)
|
defer.gatherResults(deferreds2, consumeErrors=True)
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|
||||||
@@ -115,7 +121,7 @@ class FederationBase(object):
|
|||||||
defer.returnValue([p for p in valid_pdus if p])
|
defer.returnValue([p for p in valid_pdus if p])
|
||||||
|
|
||||||
def _check_sigs_and_hash(self, room_version, pdu):
|
def _check_sigs_and_hash(self, room_version, pdu):
|
||||||
return logcontext.make_deferred_yieldable(
|
return make_deferred_yieldable(
|
||||||
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -133,14 +139,14 @@ class FederationBase(object):
|
|||||||
* returns a redacted version of the event (if the signature
|
* returns a redacted version of the event (if the signature
|
||||||
matched but the hash did not)
|
matched but the hash did not)
|
||||||
* throws a SynapseError if the signature check failed.
|
* throws a SynapseError if the signature check failed.
|
||||||
The deferreds run their callbacks in the sentinel logcontext.
|
The deferreds run their callbacks in the sentinel
|
||||||
"""
|
"""
|
||||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
||||||
|
|
||||||
ctx = logcontext.LoggingContext.current_context()
|
ctx = LoggingContext.current_context()
|
||||||
|
|
||||||
def callback(_, pdu):
|
def callback(_, pdu):
|
||||||
with logcontext.PreserveLoggingContext(ctx):
|
with PreserveLoggingContext(ctx):
|
||||||
if not check_event_content_hash(pdu):
|
if not check_event_content_hash(pdu):
|
||||||
# let's try to distinguish between failures because the event was
|
# let's try to distinguish between failures because the event was
|
||||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||||
@@ -178,7 +184,7 @@ class FederationBase(object):
|
|||||||
|
|
||||||
def errback(failure, pdu):
|
def errback(failure, pdu):
|
||||||
failure.trap(SynapseError)
|
failure.trap(SynapseError)
|
||||||
with logcontext.PreserveLoggingContext(ctx):
|
with PreserveLoggingContext(ctx):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Signature check failed for %s: %s",
|
"Signature check failed for %s: %s",
|
||||||
pdu.event_id,
|
pdu.event_id,
|
||||||
|
|||||||
@@ -39,10 +39,10 @@ from synapse.api.room_versions import (
|
|||||||
)
|
)
|
||||||
from synapse.events import builder, room_version_to_event_format
|
from synapse.events import builder, room_version_to_event_format
|
||||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -207,7 +207,7 @@ class FederationClient(FederationBase):
|
|||||||
]
|
]
|
||||||
|
|
||||||
# FIXME: We should handle signature failures more gracefully.
|
# FIXME: We should handle signature failures more gracefully.
|
||||||
pdus[:] = yield logcontext.make_deferred_yieldable(
|
pdus[:] = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
||||||
).addErrback(unwrapFirstError)
|
).addErrback(unwrapFirstError)
|
||||||
|
|||||||
@@ -42,6 +42,8 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
|
|||||||
from synapse.federation.persistence import TransactionActions
|
from synapse.federation.persistence import TransactionActions
|
||||||
from synapse.federation.units import Edu, Transaction
|
from synapse.federation.units import Edu, Transaction
|
||||||
from synapse.http.endpoint import parse_server_name
|
from synapse.http.endpoint import parse_server_name
|
||||||
|
from synapse.logging.context import nested_logging_context
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
from synapse.replication.http.federation import (
|
from synapse.replication.http.federation import (
|
||||||
ReplicationFederationSendEduRestServlet,
|
ReplicationFederationSendEduRestServlet,
|
||||||
ReplicationGetQueryRestServlet,
|
ReplicationGetQueryRestServlet,
|
||||||
@@ -50,8 +52,6 @@ from synapse.types import get_domain_from_id
|
|||||||
from synapse.util import glob_to_regex
|
from synapse.util import glob_to_regex
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
from synapse.util.logcontext import nested_logging_context
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
|
|
||||||
# when processing incoming transactions, we try to handle multiple rooms in
|
# when processing incoming transactions, we try to handle multiple rooms in
|
||||||
# parallel, up to this limit.
|
# parallel, up to this limit.
|
||||||
|
|||||||
@@ -21,9 +21,7 @@ These actions are mostly only used by the :py:mod:`.replication` module.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
from synapse.logging.utils import log_function
|
||||||
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -63,33 +61,3 @@ class TransactionActions(object):
|
|||||||
return self.store.set_received_txn_response(
|
return self.store.set_received_txn_response(
|
||||||
transaction.transaction_id, origin, code, response
|
transaction.transaction_id, origin, code, response
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
@log_function
|
|
||||||
def prepare_to_send(self, transaction):
|
|
||||||
""" Persists the `Transaction` we are about to send and works out the
|
|
||||||
correct value for the `prev_ids` key.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred
|
|
||||||
"""
|
|
||||||
transaction.prev_ids = yield self.store.prep_send_transaction(
|
|
||||||
transaction.transaction_id,
|
|
||||||
transaction.destination,
|
|
||||||
transaction.origin_server_ts,
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_function
|
|
||||||
def delivered(self, transaction, response_code, response_dict):
|
|
||||||
""" Marks the given `Transaction` as having been successfully
|
|
||||||
delivered to the remote homeserver, and what the response was.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred
|
|
||||||
"""
|
|
||||||
return self.store.delivered_txn(
|
|
||||||
transaction.transaction_id,
|
|
||||||
transaction.destination,
|
|
||||||
response_code,
|
|
||||||
response_dict,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -26,6 +26,11 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
|||||||
from synapse.federation.sender.transaction_manager import TransactionManager
|
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||||
from synapse.federation.units import Edu
|
from synapse.federation.units import Edu
|
||||||
from synapse.handlers.presence import get_interested_remotes
|
from synapse.handlers.presence import get_interested_remotes
|
||||||
|
from synapse.logging.context import (
|
||||||
|
make_deferred_yieldable,
|
||||||
|
preserve_fn,
|
||||||
|
run_in_background,
|
||||||
|
)
|
||||||
from synapse.metrics import (
|
from synapse.metrics import (
|
||||||
LaterGauge,
|
LaterGauge,
|
||||||
event_processing_loop_counter,
|
event_processing_loop_counter,
|
||||||
@@ -33,7 +38,6 @@ from synapse.metrics import (
|
|||||||
events_processed_counter,
|
events_processed_counter,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util import logcontext
|
|
||||||
from synapse.util.metrics import measure_func
|
from synapse.util.metrics import measure_func
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -210,10 +214,10 @@ class FederationSender(object):
|
|||||||
for event in events:
|
for event in events:
|
||||||
events_by_room.setdefault(event.room_id, []).append(event)
|
events_by_room.setdefault(event.room_id, []).append(event)
|
||||||
|
|
||||||
yield logcontext.make_deferred_yieldable(
|
yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
logcontext.run_in_background(handle_room_events, evs)
|
run_in_background(handle_room_events, evs)
|
||||||
for evs in itervalues(events_by_room)
|
for evs in itervalues(events_by_room)
|
||||||
],
|
],
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
@@ -360,7 +364,7 @@ class FederationSender(object):
|
|||||||
for queue in queues:
|
for queue in queues:
|
||||||
queue.flush_read_receipts_for_room(room_id)
|
queue.flush_read_receipts_for_room(room_id)
|
||||||
|
|
||||||
@logcontext.preserve_fn # the caller should not yield on this
|
@preserve_fn # the caller should not yield on this
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_presence(self, states):
|
def send_presence(self, states):
|
||||||
"""Send the new presence states to the appropriate destinations.
|
"""Send the new presence states to the appropriate destinations.
|
||||||
|
|||||||
@@ -63,8 +63,6 @@ class TransactionManager(object):
|
|||||||
len(edus),
|
len(edus),
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
|
||||||
|
|
||||||
transaction = Transaction.create_new(
|
transaction = Transaction.create_new(
|
||||||
origin_server_ts=int(self.clock.time_msec()),
|
origin_server_ts=int(self.clock.time_msec()),
|
||||||
transaction_id=txn_id,
|
transaction_id=txn_id,
|
||||||
@@ -76,9 +74,6 @@ class TransactionManager(object):
|
|||||||
|
|
||||||
self._next_txn_id += 1
|
self._next_txn_id += 1
|
||||||
|
|
||||||
yield self._transaction_actions.prepare_to_send(transaction)
|
|
||||||
|
|
||||||
logger.debug("TX [%s] Persisted transaction", destination)
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
||||||
destination,
|
destination,
|
||||||
@@ -118,10 +113,6 @@ class TransactionManager(object):
|
|||||||
|
|
||||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||||
|
|
||||||
yield self._transaction_actions.delivered(transaction, code, response)
|
|
||||||
|
|
||||||
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
|
|
||||||
|
|
||||||
if code == 200:
|
if code == 200:
|
||||||
for e_id, r in response.get("pdus", {}).items():
|
for e_id, r in response.get("pdus", {}).items():
|
||||||
if "error" in r:
|
if "error" in r:
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.constants import Membership
|
from synapse.api.constants import Membership
|
||||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||||
from synapse.util.logutils import log_function
|
from synapse.logging.utils import log_function
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -36,8 +36,8 @@ from synapse.http.servlet import (
|
|||||||
parse_json_object_from_request,
|
parse_json_object_from_request,
|
||||||
parse_string_from_args,
|
parse_string_from_args,
|
||||||
)
|
)
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||||
from synapse.util.versionstring import get_version_string
|
from synapse.util.versionstring import get_version_string
|
||||||
|
|
||||||
|
|||||||
@@ -43,9 +43,9 @@ from signedjson.sign import sign_json
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,10 @@ from email.mime.text import MIMEText
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util import stringutils
|
from synapse.util import stringutils
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from synapse.push.mailer import load_jinja2_templates
|
from synapse.push.mailer import load_jinja2_templates
|
||||||
@@ -67,7 +68,14 @@ class AccountValidityHandler(object):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Check the renewal emails to send and send them every 30min.
|
# Check the renewal emails to send and send them every 30min.
|
||||||
self.clock.looping_call(self.send_renewal_emails, 30 * 60 * 1000)
|
def send_emails():
|
||||||
|
# run as a background process to make sure that the database transactions
|
||||||
|
# have a logcontext to report to
|
||||||
|
return run_as_background_process(
|
||||||
|
"send_renewals", self.send_renewal_emails
|
||||||
|
)
|
||||||
|
|
||||||
|
self.clock.looping_call(send_emails, 30 * 60 * 1000)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_renewal_emails(self):
|
def send_renewal_emails(self):
|
||||||
|
|||||||
@@ -23,13 +23,13 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
import synapse
|
import synapse
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.metrics import (
|
from synapse.metrics import (
|
||||||
event_processing_loop_counter,
|
event_processing_loop_counter,
|
||||||
event_processing_loop_room_count,
|
event_processing_loop_room_count,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.util import log_failure
|
from synapse.util import log_failure
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -36,9 +36,9 @@ from synapse.api.errors import (
|
|||||||
SynapseError,
|
SynapseError,
|
||||||
)
|
)
|
||||||
from synapse.api.ratelimiting import Ratelimiter
|
from synapse.api.ratelimiting import Ratelimiter
|
||||||
|
from synapse.logging.context import defer_to_thread
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util import logcontext
|
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
@@ -987,7 +987,7 @@ class AuthHandler(BaseHandler):
|
|||||||
bcrypt.gensalt(self.bcrypt_rounds),
|
bcrypt.gensalt(self.bcrypt_rounds),
|
||||||
).decode("ascii")
|
).decode("ascii")
|
||||||
|
|
||||||
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash)
|
return defer_to_thread(self.hs.get_reactor(), _do_hash)
|
||||||
|
|
||||||
def validate_hash(self, password, stored_hash):
|
def validate_hash(self, password, stored_hash):
|
||||||
"""Validates that self.hash(password) == stored_hash.
|
"""Validates that self.hash(password) == stored_hash.
|
||||||
@@ -1013,7 +1013,7 @@ class AuthHandler(BaseHandler):
|
|||||||
if not isinstance(stored_hash, bytes):
|
if not isinstance(stored_hash, bytes):
|
||||||
stored_hash = stored_hash.encode("ascii")
|
stored_hash = stored_hash.encode("ascii")
|
||||||
|
|
||||||
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
return defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
||||||
else:
|
else:
|
||||||
return defer.succeed(False)
|
return defer.succeed(False)
|
||||||
|
|
||||||
|
|||||||
@@ -22,9 +22,9 @@ from canonicaljson import encode_canonical_json, json
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError
|
from synapse.api.errors import CodeMessageException, SynapseError
|
||||||
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -350,9 +350,6 @@ def _exception_to_failure(e):
|
|||||||
if isinstance(e, NotRetryingDestination):
|
if isinstance(e, NotRetryingDestination):
|
||||||
return {"status": 503, "message": "Not ready for retry"}
|
return {"status": 503, "message": "Not ready for retry"}
|
||||||
|
|
||||||
if isinstance(e, FederationDeniedError):
|
|
||||||
return {"status": 403, "message": "Federation Denied"}
|
|
||||||
|
|
||||||
# include ConnectionRefused and other errors
|
# include ConnectionRefused and other errors
|
||||||
#
|
#
|
||||||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ from twisted.internet import defer
|
|||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import AuthError, SynapseError
|
from synapse.api.errors import AuthError, SynapseError
|
||||||
from synapse.events import EventBase
|
from synapse.events import EventBase
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|||||||
@@ -45,6 +45,13 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
|||||||
from synapse.crypto.event_signing import compute_event_signature
|
from synapse.crypto.event_signing import compute_event_signature
|
||||||
from synapse.event_auth import auth_types_for_event
|
from synapse.event_auth import auth_types_for_event
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
|
from synapse.logging.context import (
|
||||||
|
make_deferred_yieldable,
|
||||||
|
nested_logging_context,
|
||||||
|
preserve_fn,
|
||||||
|
run_in_background,
|
||||||
|
)
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
from synapse.replication.http.federation import (
|
from synapse.replication.http.federation import (
|
||||||
ReplicationCleanRoomRestServlet,
|
ReplicationCleanRoomRestServlet,
|
||||||
ReplicationFederationSendEventsRestServlet,
|
ReplicationFederationSendEventsRestServlet,
|
||||||
@@ -52,10 +59,9 @@ from synapse.replication.http.federation import (
|
|||||||
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
||||||
from synapse.state import StateResolutionStore, resolve_events_with_store
|
from synapse.state import StateResolutionStore, resolve_events_with_store
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.distributor import user_joined_room
|
from synapse.util.distributor import user_joined_room
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.util.retryutils import NotRetryingDestination
|
from synapse.util.retryutils import NotRetryingDestination
|
||||||
from synapse.visibility import filter_events_for_server
|
from synapse.visibility import filter_events_for_server
|
||||||
|
|
||||||
@@ -338,7 +344,7 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
room_version = yield self.store.get_room_version(room_id)
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
|
|
||||||
with logcontext.nested_logging_context(p):
|
with nested_logging_context(p):
|
||||||
# note that if any of the missing prevs share missing state or
|
# note that if any of the missing prevs share missing state or
|
||||||
# auth events, the requests to fetch those events are deduped
|
# auth events, the requests to fetch those events are deduped
|
||||||
# by the get_pdu_cache in federation_client.
|
# by the get_pdu_cache in federation_client.
|
||||||
@@ -532,7 +538,7 @@ class FederationHandler(BaseHandler):
|
|||||||
event_id,
|
event_id,
|
||||||
ev.event_id,
|
ev.event_id,
|
||||||
)
|
)
|
||||||
with logcontext.nested_logging_context(ev.event_id):
|
with nested_logging_context(ev.event_id):
|
||||||
try:
|
try:
|
||||||
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
||||||
except FederationError as e:
|
except FederationError as e:
|
||||||
@@ -725,10 +731,10 @@ class FederationHandler(BaseHandler):
|
|||||||
missing_auth - failed_to_fetch,
|
missing_auth - failed_to_fetch,
|
||||||
)
|
)
|
||||||
|
|
||||||
results = yield logcontext.make_deferred_yieldable(
|
results = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
logcontext.run_in_background(
|
run_in_background(
|
||||||
self.federation_client.get_pdu,
|
self.federation_client.get_pdu,
|
||||||
[dest],
|
[dest],
|
||||||
event_id,
|
event_id,
|
||||||
@@ -994,10 +1000,8 @@ class FederationHandler(BaseHandler):
|
|||||||
event_ids = list(extremities.keys())
|
event_ids = list(extremities.keys())
|
||||||
|
|
||||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||||
resolve = logcontext.preserve_fn(
|
resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events)
|
||||||
self.state_handler.resolve_state_groups_for_events
|
states = yield make_deferred_yieldable(
|
||||||
)
|
|
||||||
states = yield logcontext.make_deferred_yieldable(
|
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
[resolve(room_id, [e]) for e in event_ids], consumeErrors=True
|
||||||
)
|
)
|
||||||
@@ -1171,7 +1175,7 @@ class FederationHandler(BaseHandler):
|
|||||||
# lots of requests for missing prev_events which we do actually
|
# lots of requests for missing prev_events which we do actually
|
||||||
# have. Hence we fire off the deferred, but don't wait for it.
|
# have. Hence we fire off the deferred, but don't wait for it.
|
||||||
|
|
||||||
logcontext.run_in_background(self._handle_queued_pdus, room_queue)
|
run_in_background(self._handle_queued_pdus, room_queue)
|
||||||
|
|
||||||
defer.returnValue(True)
|
defer.returnValue(True)
|
||||||
|
|
||||||
@@ -1191,7 +1195,7 @@ class FederationHandler(BaseHandler):
|
|||||||
p.event_id,
|
p.event_id,
|
||||||
p.room_id,
|
p.room_id,
|
||||||
)
|
)
|
||||||
with logcontext.nested_logging_context(p.event_id):
|
with nested_logging_context(p.event_id):
|
||||||
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
@@ -1610,7 +1614,7 @@ class FederationHandler(BaseHandler):
|
|||||||
success = True
|
success = True
|
||||||
finally:
|
finally:
|
||||||
if not success:
|
if not success:
|
||||||
logcontext.run_in_background(
|
run_in_background(
|
||||||
self.store.remove_push_actions_from_staging, event.event_id
|
self.store.remove_push_actions_from_staging, event.event_id
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1629,7 +1633,7 @@ class FederationHandler(BaseHandler):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def prep(ev_info):
|
def prep(ev_info):
|
||||||
event = ev_info["event"]
|
event = ev_info["event"]
|
||||||
with logcontext.nested_logging_context(suffix=event.event_id):
|
with nested_logging_context(suffix=event.event_id):
|
||||||
res = yield self._prep_event(
|
res = yield self._prep_event(
|
||||||
origin,
|
origin,
|
||||||
event,
|
event,
|
||||||
@@ -1639,12 +1643,9 @@ class FederationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
defer.returnValue(res)
|
defer.returnValue(res)
|
||||||
|
|
||||||
contexts = yield logcontext.make_deferred_yieldable(
|
contexts = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[run_in_background(prep, ev_info) for ev_info in event_infos],
|
||||||
logcontext.run_in_background(prep, ev_info)
|
|
||||||
for ev_info in event_infos
|
|
||||||
],
|
|
||||||
consumeErrors=True,
|
consumeErrors=True,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -2106,10 +2107,10 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
room_version = yield self.store.get_room_version(event.room_id)
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
|
||||||
different_events = yield logcontext.make_deferred_yieldable(
|
different_events = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
[
|
[
|
||||||
logcontext.run_in_background(
|
run_in_background(
|
||||||
self.store.get_event, d, allow_none=True, allow_rejected=False
|
self.store.get_event, d, allow_none=True, allow_rejected=False
|
||||||
)
|
)
|
||||||
for d in different_auth
|
for d in different_auth
|
||||||
|
|||||||
@@ -21,12 +21,12 @@ from synapse.api.constants import EventTypes, Membership
|
|||||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
|
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.types import StreamToken, UserID
|
from synapse.types import StreamToken, UserID
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
from synapse.util.async_helpers import concurrently_execute
|
from synapse.util.async_helpers import concurrently_execute
|
||||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|||||||
@@ -34,13 +34,13 @@ from synapse.api.errors import (
|
|||||||
from synapse.api.room_versions import RoomVersions
|
from synapse.api.room_versions import RoomVersions
|
||||||
from synapse.api.urls import ConsentURIBuilder
|
from synapse.api.urls import ConsentURIBuilder
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomAlias, UserID, create_requester
|
from synapse.types import RoomAlias, UserID, create_requester
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.frozenutils import frozendict_json_encoder
|
from synapse.util.frozenutils import frozendict_json_encoder
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
from synapse.util.metrics import measure_func
|
from synapse.util.metrics import measure_func
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
|
|||||||
@@ -20,10 +20,10 @@ from twisted.python.failure import Failure
|
|||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import RoomStreamToken
|
from synapse.types import RoomStreamToken
|
||||||
from synapse.util.async_helpers import ReadWriteLock
|
from synapse.util.async_helpers import ReadWriteLock
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
from synapse.util.stringutils import random_string
|
from synapse.util.stringutils import random_string
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
|
|||||||
@@ -34,14 +34,14 @@ from twisted.internet import defer
|
|||||||
import synapse.metrics
|
import synapse.metrics
|
||||||
from synapse.api.constants import EventTypes, Membership, PresenceState
|
from synapse.api.constants import EventTypes, Membership, PresenceState
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
from synapse.metrics import LaterGauge
|
from synapse.metrics import LaterGauge
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.presence import UserPresenceState
|
from synapse.storage.presence import UserPresenceState
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.wheel_timer import WheelTimer
|
from synapse.util.wheel_timer import WheelTimer
|
||||||
|
|
||||||
|
|||||||
@@ -303,6 +303,10 @@ class BaseProfileHandler(BaseHandler):
|
|||||||
if not self.hs.config.require_auth_for_profile_requests or not requester:
|
if not self.hs.config.require_auth_for_profile_requests or not requester:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Always allow the user to query their own profile.
|
||||||
|
if target_user.to_string() == requester.to_string():
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
requester_rooms = yield self.store.get_rooms_for_user(requester.to_string())
|
requester_rooms = yield self.store.get_rooms_for_user(requester.to_string())
|
||||||
target_user_rooms = yield self.store.get_rooms_for_user(
|
target_user_rooms = yield self.store.get_rooms_for_user(
|
||||||
|
|||||||
+10
-105
@@ -138,11 +138,10 @@ class RegistrationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def register(
|
def register_user(
|
||||||
self,
|
self,
|
||||||
localpart=None,
|
localpart=None,
|
||||||
password=None,
|
password=None,
|
||||||
generate_token=True,
|
|
||||||
guest_access_token=None,
|
guest_access_token=None,
|
||||||
make_guest=False,
|
make_guest=False,
|
||||||
admin=False,
|
admin=False,
|
||||||
@@ -160,11 +159,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
password (unicode) : The password to assign to this user so they can
|
password (unicode) : The password to assign to this user so they can
|
||||||
login again. This can be None which means they cannot login again
|
login again. This can be None which means they cannot login again
|
||||||
via a password (e.g. the user is an application service user).
|
via a password (e.g. the user is an application service user).
|
||||||
generate_token (bool): Whether a new access token should be
|
|
||||||
generated. Having this be True should be considered deprecated,
|
|
||||||
since it offers no means of associating a device_id with the
|
|
||||||
access_token. Instead you should call auth_handler.issue_access_token
|
|
||||||
after registration.
|
|
||||||
user_type (str|None): type of user. One of the values from
|
user_type (str|None): type of user. One of the values from
|
||||||
api.constants.UserTypes, or None for a normal user.
|
api.constants.UserTypes, or None for a normal user.
|
||||||
default_display_name (unicode|None): if set, the new user's displayname
|
default_display_name (unicode|None): if set, the new user's displayname
|
||||||
@@ -172,7 +166,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
address (str|None): the IP address used to perform the registration.
|
address (str|None): the IP address used to perform the registration.
|
||||||
bind_emails (List[str]): list of emails to bind to this account.
|
bind_emails (List[str]): list of emails to bind to this account.
|
||||||
Returns:
|
Returns:
|
||||||
A tuple of (user_id, access_token).
|
Deferred[str]: user_id
|
||||||
Raises:
|
Raises:
|
||||||
RegistrationError if there was a problem registering.
|
RegistrationError if there was a problem registering.
|
||||||
"""
|
"""
|
||||||
@@ -206,12 +200,8 @@ class RegistrationHandler(BaseHandler):
|
|||||||
elif default_display_name is None:
|
elif default_display_name is None:
|
||||||
default_display_name = localpart
|
default_display_name = localpart
|
||||||
|
|
||||||
token = None
|
|
||||||
if generate_token:
|
|
||||||
token = self.macaroon_gen.generate_access_token(user_id)
|
|
||||||
yield self.register_with_store(
|
yield self.register_with_store(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
@@ -230,21 +220,17 @@ class RegistrationHandler(BaseHandler):
|
|||||||
else:
|
else:
|
||||||
# autogen a sequential user ID
|
# autogen a sequential user ID
|
||||||
attempts = 0
|
attempts = 0
|
||||||
token = None
|
|
||||||
user = None
|
user = None
|
||||||
while not user:
|
while not user:
|
||||||
localpart = yield self._generate_user_id(attempts > 0)
|
localpart = yield self._generate_user_id(attempts > 0)
|
||||||
user = UserID(localpart, self.hs.hostname)
|
user = UserID(localpart, self.hs.hostname)
|
||||||
user_id = user.to_string()
|
user_id = user.to_string()
|
||||||
yield self.check_user_id_not_appservice_exclusive(user_id)
|
yield self.check_user_id_not_appservice_exclusive(user_id)
|
||||||
if generate_token:
|
|
||||||
token = self.macaroon_gen.generate_access_token(user_id)
|
|
||||||
if default_display_name is None:
|
if default_display_name is None:
|
||||||
default_display_name = localpart
|
default_display_name = localpart
|
||||||
try:
|
try:
|
||||||
yield self.register_with_store(
|
yield self.register_with_store(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
create_profile_with_displayname=default_display_name,
|
create_profile_with_displayname=default_display_name,
|
||||||
@@ -254,10 +240,15 @@ class RegistrationHandler(BaseHandler):
|
|||||||
# if user id is taken, just generate another
|
# if user id is taken, just generate another
|
||||||
user = None
|
user = None
|
||||||
user_id = None
|
user_id = None
|
||||||
token = None
|
|
||||||
attempts += 1
|
attempts += 1
|
||||||
|
|
||||||
if not self.hs.config.user_consent_at_registration:
|
if not self.hs.config.user_consent_at_registration:
|
||||||
yield self._auto_join_rooms(user_id)
|
yield self._auto_join_rooms(user_id)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
"Skipping auto-join for %s because consent is required at registration",
|
||||||
|
user_id,
|
||||||
|
)
|
||||||
|
|
||||||
# Bind any specified emails to this account
|
# Bind any specified emails to this account
|
||||||
current_time = self.hs.get_clock().time_msec()
|
current_time = self.hs.get_clock().time_msec()
|
||||||
@@ -272,7 +263,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
# Bind email to new account
|
# Bind email to new account
|
||||||
yield self._register_email_threepid(user_id, threepid_dict, None, False)
|
yield self._register_email_threepid(user_id, threepid_dict, None, False)
|
||||||
|
|
||||||
defer.returnValue((user_id, token))
|
defer.returnValue(user_id)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _auto_join_rooms(self, user_id):
|
def _auto_join_rooms(self, user_id):
|
||||||
@@ -298,6 +289,7 @@ class RegistrationHandler(BaseHandler):
|
|||||||
count = yield self.store.count_all_users()
|
count = yield self.store.count_all_users()
|
||||||
should_auto_create_rooms = count == 1
|
should_auto_create_rooms = count == 1
|
||||||
for r in self.hs.config.auto_join_rooms:
|
for r in self.hs.config.auto_join_rooms:
|
||||||
|
logger.info("Auto-joining %s to %s", user_id, r)
|
||||||
try:
|
try:
|
||||||
if should_auto_create_rooms:
|
if should_auto_create_rooms:
|
||||||
room_alias = RoomAlias.from_string(r)
|
room_alias = RoomAlias.from_string(r)
|
||||||
@@ -505,87 +497,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
defer.returnValue(data)
|
defer.returnValue(data)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_or_create_user(self, requester, localpart, displayname, password_hash=None):
|
|
||||||
"""Creates a new user if the user does not exist,
|
|
||||||
else revokes all previous access tokens and generates a new one.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
localpart : The local part of the user ID to register. If None,
|
|
||||||
one will be randomly generated.
|
|
||||||
Returns:
|
|
||||||
A tuple of (user_id, access_token).
|
|
||||||
Raises:
|
|
||||||
RegistrationError if there was a problem registering.
|
|
||||||
|
|
||||||
NB this is only used in tests. TODO: move it to the test package!
|
|
||||||
"""
|
|
||||||
if localpart is None:
|
|
||||||
raise SynapseError(400, "Request must include user id")
|
|
||||||
yield self.auth.check_auth_blocking()
|
|
||||||
need_register = True
|
|
||||||
|
|
||||||
try:
|
|
||||||
yield self.check_username(localpart)
|
|
||||||
except SynapseError as e:
|
|
||||||
if e.errcode == Codes.USER_IN_USE:
|
|
||||||
need_register = False
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
user = UserID(localpart, self.hs.hostname)
|
|
||||||
user_id = user.to_string()
|
|
||||||
token = self.macaroon_gen.generate_access_token(user_id)
|
|
||||||
|
|
||||||
if need_register:
|
|
||||||
yield self.register_with_store(
|
|
||||||
user_id=user_id,
|
|
||||||
token=token,
|
|
||||||
password_hash=password_hash,
|
|
||||||
create_profile_with_displayname=user.localpart,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
yield self._auth_handler.delete_access_tokens_for_user(user_id)
|
|
||||||
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
|
|
||||||
|
|
||||||
if displayname is not None:
|
|
||||||
logger.info("setting user display name: %s -> %s", user_id, displayname)
|
|
||||||
yield self.profile_handler.set_displayname(
|
|
||||||
user, requester, displayname, by_admin=True
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue((user_id, token))
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_or_register_3pid_guest(self, medium, address, inviter_user_id):
|
|
||||||
"""Get a guest access token for a 3PID, creating a guest account if
|
|
||||||
one doesn't already exist.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
medium (str)
|
|
||||||
address (str)
|
|
||||||
inviter_user_id (str): The user ID who is trying to invite the
|
|
||||||
3PID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
|
|
||||||
3PID guest account.
|
|
||||||
"""
|
|
||||||
access_token = yield self.store.get_3pid_guest_access_token(medium, address)
|
|
||||||
if access_token:
|
|
||||||
user_info = yield self.auth.get_user_by_access_token(access_token)
|
|
||||||
|
|
||||||
defer.returnValue((user_info["user"].to_string(), access_token))
|
|
||||||
|
|
||||||
user_id, access_token = yield self.register(
|
|
||||||
generate_token=True, make_guest=True
|
|
||||||
)
|
|
||||||
access_token = yield self.store.save_or_get_3pid_guest_access_token(
|
|
||||||
medium, address, access_token, inviter_user_id
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue((user_id, access_token))
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _join_user_to_room(self, requester, room_identifier):
|
def _join_user_to_room(self, requester, room_identifier):
|
||||||
room_id = None
|
room_id = None
|
||||||
@@ -615,7 +526,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
def register_with_store(
|
def register_with_store(
|
||||||
self,
|
self,
|
||||||
user_id,
|
user_id,
|
||||||
token=None,
|
|
||||||
password_hash=None,
|
password_hash=None,
|
||||||
was_guest=False,
|
was_guest=False,
|
||||||
make_guest=False,
|
make_guest=False,
|
||||||
@@ -629,9 +539,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id (str): The desired user ID to register.
|
user_id (str): The desired user ID to register.
|
||||||
token (str): The desired access token to use for this user. If this
|
|
||||||
is not None, the given access token is associated with the user
|
|
||||||
id.
|
|
||||||
password_hash (str|None): Optional. The password hash for this user.
|
password_hash (str|None): Optional. The password hash for this user.
|
||||||
was_guest (bool): Optional. Whether this is a guest account being
|
was_guest (bool): Optional. Whether this is a guest account being
|
||||||
upgraded to a non-guest account.
|
upgraded to a non-guest account.
|
||||||
@@ -667,7 +574,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
if self.hs.config.worker_app:
|
if self.hs.config.worker_app:
|
||||||
return self._register_client(
|
return self._register_client(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
@@ -680,7 +586,6 @@ class RegistrationHandler(BaseHandler):
|
|||||||
else:
|
else:
|
||||||
return self.store.register(
|
return self.store.register(
|
||||||
user_id=user_id,
|
user_id=user_id,
|
||||||
token=token,
|
|
||||||
password_hash=password_hash,
|
password_hash=password_hash,
|
||||||
was_guest=was_guest,
|
was_guest=was_guest,
|
||||||
make_guest=make_guest,
|
make_guest=make_guest,
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ from twisted.internet import defer
|
|||||||
import synapse.server
|
import synapse.server
|
||||||
import synapse.types
|
import synapse.types
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError
|
||||||
from synapse.types import RoomID, UserID
|
from synapse.types import RoomID, UserID
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
from synapse.util.distributor import user_joined_room, user_left_room
|
from synapse.util.distributor import user_joined_room, user_left_room
|
||||||
@@ -118,24 +118,6 @@ class RoomMemberHandler(object):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
|
||||||
"""Get a guest access token for a 3PID, creating a guest account if
|
|
||||||
one doesn't already exist.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
requester (Requester)
|
|
||||||
medium (str)
|
|
||||||
address (str)
|
|
||||||
inviter_user_id (str): The user ID who is trying to invite the
|
|
||||||
3PID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the
|
|
||||||
3PID guest account.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def _user_joined_room(self, target, room_id):
|
def _user_joined_room(self, target, room_id):
|
||||||
"""Notifies distributor on master process that the user has joined the
|
"""Notifies distributor on master process that the user has joined the
|
||||||
@@ -890,24 +872,23 @@ class RoomMemberHandler(object):
|
|||||||
"sender_avatar_url": inviter_avatar_url,
|
"sender_avatar_url": inviter_avatar_url,
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.config.invite_3pid_guest:
|
try:
|
||||||
guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest(
|
data = yield self.simple_http_client.post_json_get_json(
|
||||||
requester=requester,
|
is_url, invite_config
|
||||||
medium=medium,
|
)
|
||||||
address=address,
|
except HttpResponseException as e:
|
||||||
inviter_user_id=inviter_user_id,
|
# Some identity servers may only support application/x-www-form-urlencoded
|
||||||
|
# types. This is especially true with old instances of Sydent, see
|
||||||
|
# https://github.com/matrix-org/sydent/pull/170
|
||||||
|
logger.info(
|
||||||
|
"Failed to POST %s with JSON, falling back to urlencoded form: %s",
|
||||||
|
is_url,
|
||||||
|
e,
|
||||||
|
)
|
||||||
|
data = yield self.simple_http_client.post_urlencoded_get_json(
|
||||||
|
is_url, invite_config
|
||||||
)
|
)
|
||||||
|
|
||||||
invite_config.update(
|
|
||||||
{
|
|
||||||
"guest_access_token": guest_access_token,
|
|
||||||
"guest_user_id": guest_user_id,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
data = yield self.simple_http_client.post_urlencoded_get_json(
|
|
||||||
is_url, invite_config
|
|
||||||
)
|
|
||||||
# TODO: Check for success
|
# TODO: Check for success
|
||||||
token = data["token"]
|
token = data["token"]
|
||||||
public_keys = data.get("public_keys", [])
|
public_keys = data.get("public_keys", [])
|
||||||
@@ -1010,12 +991,6 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
|||||||
yield self.store.locally_reject_invite(target.to_string(), room_id)
|
yield self.store.locally_reject_invite(target.to_string(), room_id)
|
||||||
defer.returnValue({})
|
defer.returnValue({})
|
||||||
|
|
||||||
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
|
||||||
"""Implements RoomMemberHandler.get_or_register_3pid_guest
|
|
||||||
"""
|
|
||||||
rg = self.registration_handler
|
|
||||||
return rg.get_or_register_3pid_guest(medium, address, inviter_user_id)
|
|
||||||
|
|
||||||
def _user_joined_room(self, target, room_id):
|
def _user_joined_room(self, target, room_id):
|
||||||
"""Implements RoomMemberHandler._user_joined_room
|
"""Implements RoomMemberHandler._user_joined_room
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -20,7 +20,6 @@ from twisted.internet import defer
|
|||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
from synapse.handlers.room_member import RoomMemberHandler
|
from synapse.handlers.room_member import RoomMemberHandler
|
||||||
from synapse.replication.http.membership import (
|
from synapse.replication.http.membership import (
|
||||||
ReplicationRegister3PIDGuestRestServlet as Repl3PID,
|
|
||||||
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
|
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
|
||||||
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
|
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
|
||||||
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
|
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
|
||||||
@@ -33,7 +32,6 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
|
|||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomMemberWorkerHandler, self).__init__(hs)
|
super(RoomMemberWorkerHandler, self).__init__(hs)
|
||||||
|
|
||||||
self._get_register_3pid_client = Repl3PID.make_client(hs)
|
|
||||||
self._remote_join_client = ReplRemoteJoin.make_client(hs)
|
self._remote_join_client = ReplRemoteJoin.make_client(hs)
|
||||||
self._remote_reject_client = ReplRejectInvite.make_client(hs)
|
self._remote_reject_client = ReplRejectInvite.make_client(hs)
|
||||||
self._notify_change_client = ReplJoinedLeft.make_client(hs)
|
self._notify_change_client = ReplJoinedLeft.make_client(hs)
|
||||||
@@ -80,13 +78,3 @@ class RoomMemberWorkerHandler(RoomMemberHandler):
|
|||||||
return self._notify_change_client(
|
return self._notify_change_client(
|
||||||
user_id=target.to_string(), room_id=room_id, change="left"
|
user_id=target.to_string(), room_id=room_id, change="left"
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id):
|
|
||||||
"""Implements RoomMemberHandler.get_or_register_3pid_guest
|
|
||||||
"""
|
|
||||||
return self._get_register_3pid_client(
|
|
||||||
requester=requester,
|
|
||||||
medium=medium,
|
|
||||||
address=address,
|
|
||||||
inviter_user_id=inviter_user_id,
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -25,6 +25,7 @@ from prometheus_client import Counter
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.push.clientformat import format_push_rules_for_user
|
from synapse.push.clientformat import format_push_rules_for_user
|
||||||
from synapse.storage.roommember import MemberSummary
|
from synapse.storage.roommember import MemberSummary
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
@@ -33,7 +34,6 @@ from synapse.util.async_helpers import concurrently_execute
|
|||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.caches.lrucache import LruCache
|
from synapse.util.caches.lrucache import LruCache
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
from synapse.util.metrics import Measure, measure_func
|
from synapse.util.metrics import Measure, measure_func
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ from collections import namedtuple
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import AuthError, SynapseError
|
from synapse.api.errors import AuthError, SynapseError
|
||||||
|
from synapse.logging.context import run_in_background
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.wheel_timer import WheelTimer
|
from synapse.util.wheel_timer import WheelTimer
|
||||||
|
|
||||||
|
|||||||
@@ -45,9 +45,9 @@ from synapse.http import (
|
|||||||
cancelled_to_request_timed_out_error,
|
cancelled_to_request_timed_out_error,
|
||||||
redact_uri,
|
redact_uri,
|
||||||
)
|
)
|
||||||
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
from synapse.util.async_helpers import timeout_deferred
|
from synapse.util.async_helpers import timeout_deferred
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ from twisted.web.http_headers import Headers
|
|||||||
from twisted.web.iweb import IAgent
|
from twisted.web.iweb import IAgent
|
||||||
|
|
||||||
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
|
from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list
|
||||||
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
from synapse.util.caches.ttlcache import TTLCache
|
from synapse.util.caches.ttlcache import TTLCache
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
# period to cache .well-known results for by default
|
# period to cache .well-known results for by default
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ from twisted.internet.error import ConnectError
|
|||||||
from twisted.names import client, dns
|
from twisted.names import client, dns
|
||||||
from twisted.names.error import DNSNameError, DomainError
|
from twisted.names.error import DNSNameError, DomainError
|
||||||
|
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -48,8 +48,8 @@ from synapse.api.errors import (
|
|||||||
from synapse.http import QuieterFileBodyProducer
|
from synapse.http import QuieterFileBodyProducer
|
||||||
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
|
from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver
|
||||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||||
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
from synapse.util.async_helpers import timeout_deferred
|
from synapse.util.async_helpers import timeout_deferred
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ import threading
|
|||||||
|
|
||||||
from prometheus_client.core import Counter, Histogram
|
from prometheus_client.core import Counter, Histogram
|
||||||
|
|
||||||
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import LaterGauge
|
from synapse.metrics import LaterGauge
|
||||||
from synapse.util.logcontext import LoggingContext
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
+17
-11
@@ -39,8 +39,8 @@ from synapse.api.errors import (
|
|||||||
SynapseError,
|
SynapseError,
|
||||||
UnrecognizedRequestError,
|
UnrecognizedRequestError,
|
||||||
)
|
)
|
||||||
|
from synapse.logging.context import preserve_fn
|
||||||
from synapse.util.caches import intern_dict
|
from synapse.util.caches import intern_dict
|
||||||
from synapse.util.logcontext import preserve_fn
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -65,8 +65,8 @@ def wrap_json_request_handler(h):
|
|||||||
The handler method must have a signature of "handle_foo(self, request)",
|
The handler method must have a signature of "handle_foo(self, request)",
|
||||||
where "request" must be a SynapseRequest.
|
where "request" must be a SynapseRequest.
|
||||||
|
|
||||||
The handler must return a deferred. If the deferred succeeds we assume that
|
The handler must return a deferred or a coroutine. If the deferred succeeds
|
||||||
a response has been sent. If the deferred fails with a SynapseError we use
|
we assume that a response has been sent. If the deferred fails with a SynapseError we use
|
||||||
it to send a JSON response with the appropriate HTTP reponse code. If the
|
it to send a JSON response with the appropriate HTTP reponse code. If the
|
||||||
deferred fails with any other type of error we send a 500 reponse.
|
deferred fails with any other type of error we send a 500 reponse.
|
||||||
"""
|
"""
|
||||||
@@ -353,16 +353,22 @@ class DirectServeResource(resource.Resource):
|
|||||||
"""
|
"""
|
||||||
Render the request, using an asynchronous render handler if it exists.
|
Render the request, using an asynchronous render handler if it exists.
|
||||||
"""
|
"""
|
||||||
render_callback_name = "_async_render_" + request.method.decode("ascii")
|
async_render_callback_name = "_async_render_" + request.method.decode("ascii")
|
||||||
|
|
||||||
if hasattr(self, render_callback_name):
|
# Try and get the async renderer
|
||||||
# Call the handler
|
callback = getattr(self, async_render_callback_name, None)
|
||||||
callback = getattr(self, render_callback_name)
|
|
||||||
defer.ensureDeferred(callback(request))
|
|
||||||
|
|
||||||
return NOT_DONE_YET
|
# No async renderer for this request method.
|
||||||
else:
|
if not callback:
|
||||||
super().render(request)
|
return super().render(request)
|
||||||
|
|
||||||
|
resp = callback(request)
|
||||||
|
|
||||||
|
# If it's a coroutine, turn it into a Deferred
|
||||||
|
if isinstance(resp, types.CoroutineType):
|
||||||
|
defer.ensureDeferred(resp)
|
||||||
|
|
||||||
|
return NOT_DONE_YET
|
||||||
|
|
||||||
|
|
||||||
def _options_handler(request):
|
def _options_handler(request):
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from twisted.web.server import Request, Site
|
|||||||
|
|
||||||
from synapse.http import redact_uri
|
from synapse.http import redact_uri
|
||||||
from synapse.http.request_metrics import RequestMetrics, requests_counter
|
from synapse.http.request_metrics import RequestMetrics, requests_counter
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,693 @@
|
|||||||
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
""" Thread-local-alike tracking of log contexts within synapse
|
||||||
|
|
||||||
|
This module provides objects and utilities for tracking contexts through
|
||||||
|
synapse code, so that log lines can include a request identifier, and so that
|
||||||
|
CPU and database activity can be accounted for against the request that caused
|
||||||
|
them.
|
||||||
|
|
||||||
|
See doc/log_contexts.rst for details on how this works.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import types
|
||||||
|
|
||||||
|
from twisted.internet import defer, threads
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import resource
|
||||||
|
|
||||||
|
# Python doesn't ship with a definition of RUSAGE_THREAD but it's defined
|
||||||
|
# to be 1 on linux so we hard code it.
|
||||||
|
RUSAGE_THREAD = 1
|
||||||
|
|
||||||
|
# If the system doesn't support RUSAGE_THREAD then this should throw an
|
||||||
|
# exception.
|
||||||
|
resource.getrusage(RUSAGE_THREAD)
|
||||||
|
|
||||||
|
def get_thread_resource_usage():
|
||||||
|
return resource.getrusage(RUSAGE_THREAD)
|
||||||
|
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we
|
||||||
|
# won't track resource usage by returning None.
|
||||||
|
def get_thread_resource_usage():
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# get an id for the current thread.
|
||||||
|
#
|
||||||
|
# threading.get_ident doesn't actually return an OS-level tid, and annoyingly,
|
||||||
|
# on Linux it actually returns the same value either side of a fork() call. However
|
||||||
|
# we only fork in one place, so it's not worth the hoop-jumping to get a real tid.
|
||||||
|
#
|
||||||
|
get_thread_id = threading.get_ident
|
||||||
|
|
||||||
|
|
||||||
|
class ContextResourceUsage(object):
|
||||||
|
"""Object for tracking the resources used by a log context
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
ru_utime (float): user CPU time (in seconds)
|
||||||
|
ru_stime (float): system CPU time (in seconds)
|
||||||
|
db_txn_count (int): number of database transactions done
|
||||||
|
db_sched_duration_sec (float): amount of time spent waiting for a
|
||||||
|
database connection
|
||||||
|
db_txn_duration_sec (float): amount of time spent doing database
|
||||||
|
transactions (excluding scheduling time)
|
||||||
|
evt_db_fetch_count (int): number of events requested from the database
|
||||||
|
"""
|
||||||
|
|
||||||
|
__slots__ = [
|
||||||
|
"ru_stime",
|
||||||
|
"ru_utime",
|
||||||
|
"db_txn_count",
|
||||||
|
"db_txn_duration_sec",
|
||||||
|
"db_sched_duration_sec",
|
||||||
|
"evt_db_fetch_count",
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, copy_from=None):
|
||||||
|
"""Create a new ContextResourceUsage
|
||||||
|
|
||||||
|
Args:
|
||||||
|
copy_from (ContextResourceUsage|None): if not None, an object to
|
||||||
|
copy stats from
|
||||||
|
"""
|
||||||
|
if copy_from is None:
|
||||||
|
self.reset()
|
||||||
|
else:
|
||||||
|
self.ru_utime = copy_from.ru_utime
|
||||||
|
self.ru_stime = copy_from.ru_stime
|
||||||
|
self.db_txn_count = copy_from.db_txn_count
|
||||||
|
|
||||||
|
self.db_txn_duration_sec = copy_from.db_txn_duration_sec
|
||||||
|
self.db_sched_duration_sec = copy_from.db_sched_duration_sec
|
||||||
|
self.evt_db_fetch_count = copy_from.evt_db_fetch_count
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return ContextResourceUsage(copy_from=self)
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.ru_stime = 0.0
|
||||||
|
self.ru_utime = 0.0
|
||||||
|
self.db_txn_count = 0
|
||||||
|
|
||||||
|
self.db_txn_duration_sec = 0
|
||||||
|
self.db_sched_duration_sec = 0
|
||||||
|
self.evt_db_fetch_count = 0
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return (
|
||||||
|
"<ContextResourceUsage ru_stime='%r', ru_utime='%r', "
|
||||||
|
"db_txn_count='%r', db_txn_duration_sec='%r', "
|
||||||
|
"db_sched_duration_sec='%r', evt_db_fetch_count='%r'>"
|
||||||
|
) % (
|
||||||
|
self.ru_stime,
|
||||||
|
self.ru_utime,
|
||||||
|
self.db_txn_count,
|
||||||
|
self.db_txn_duration_sec,
|
||||||
|
self.db_sched_duration_sec,
|
||||||
|
self.evt_db_fetch_count,
|
||||||
|
)
|
||||||
|
|
||||||
|
def __iadd__(self, other):
|
||||||
|
"""Add another ContextResourceUsage's stats to this one's.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
other (ContextResourceUsage): the other resource usage object
|
||||||
|
"""
|
||||||
|
self.ru_utime += other.ru_utime
|
||||||
|
self.ru_stime += other.ru_stime
|
||||||
|
self.db_txn_count += other.db_txn_count
|
||||||
|
self.db_txn_duration_sec += other.db_txn_duration_sec
|
||||||
|
self.db_sched_duration_sec += other.db_sched_duration_sec
|
||||||
|
self.evt_db_fetch_count += other.evt_db_fetch_count
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __isub__(self, other):
|
||||||
|
self.ru_utime -= other.ru_utime
|
||||||
|
self.ru_stime -= other.ru_stime
|
||||||
|
self.db_txn_count -= other.db_txn_count
|
||||||
|
self.db_txn_duration_sec -= other.db_txn_duration_sec
|
||||||
|
self.db_sched_duration_sec -= other.db_sched_duration_sec
|
||||||
|
self.evt_db_fetch_count -= other.evt_db_fetch_count
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __add__(self, other):
|
||||||
|
res = ContextResourceUsage(copy_from=self)
|
||||||
|
res += other
|
||||||
|
return res
|
||||||
|
|
||||||
|
def __sub__(self, other):
|
||||||
|
res = ContextResourceUsage(copy_from=self)
|
||||||
|
res -= other
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
class LoggingContext(object):
|
||||||
|
"""Additional context for log formatting. Contexts are scoped within a
|
||||||
|
"with" block.
|
||||||
|
|
||||||
|
If a parent is given when creating a new context, then:
|
||||||
|
- logging fields are copied from the parent to the new context on entry
|
||||||
|
- when the new context exits, the cpu usage stats are copied from the
|
||||||
|
child to the parent
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name for the context for debugging.
|
||||||
|
parent_context (LoggingContext|None): The parent of the new context
|
||||||
|
"""
|
||||||
|
|
||||||
|
__slots__ = [
|
||||||
|
"previous_context",
|
||||||
|
"name",
|
||||||
|
"parent_context",
|
||||||
|
"_resource_usage",
|
||||||
|
"usage_start",
|
||||||
|
"main_thread",
|
||||||
|
"alive",
|
||||||
|
"request",
|
||||||
|
"tag",
|
||||||
|
]
|
||||||
|
|
||||||
|
thread_local = threading.local()
|
||||||
|
|
||||||
|
class Sentinel(object):
|
||||||
|
"""Sentinel to represent the root context"""
|
||||||
|
|
||||||
|
__slots__ = []
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "sentinel"
|
||||||
|
|
||||||
|
def copy_to(self, record):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_database_transaction(self, duration_sec):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_database_scheduled(self, sched_sec):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def record_event_fetch(self, event_count):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
__bool__ = __nonzero__ # python3
|
||||||
|
|
||||||
|
sentinel = Sentinel()
|
||||||
|
|
||||||
|
def __init__(self, name=None, parent_context=None, request=None):
|
||||||
|
self.previous_context = LoggingContext.current_context()
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
# track the resources used by this context so far
|
||||||
|
self._resource_usage = ContextResourceUsage()
|
||||||
|
|
||||||
|
# If alive has the thread resource usage when the logcontext last
|
||||||
|
# became active.
|
||||||
|
self.usage_start = None
|
||||||
|
|
||||||
|
self.main_thread = get_thread_id()
|
||||||
|
self.request = None
|
||||||
|
self.tag = ""
|
||||||
|
self.alive = True
|
||||||
|
|
||||||
|
self.parent_context = parent_context
|
||||||
|
|
||||||
|
if self.parent_context is not None:
|
||||||
|
self.parent_context.copy_to(self)
|
||||||
|
|
||||||
|
if request is not None:
|
||||||
|
# the request param overrides the request from the parent context
|
||||||
|
self.request = request
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
if self.request:
|
||||||
|
return str(self.request)
|
||||||
|
return "%s@%x" % (self.name, id(self))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def current_context(cls):
|
||||||
|
"""Get the current logging context from thread local storage
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LoggingContext: the current logging context
|
||||||
|
"""
|
||||||
|
return getattr(cls.thread_local, "current_context", cls.sentinel)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def set_current_context(cls, context):
|
||||||
|
"""Set the current logging context in thread local storage
|
||||||
|
Args:
|
||||||
|
context(LoggingContext): The context to activate.
|
||||||
|
Returns:
|
||||||
|
The context that was previously active
|
||||||
|
"""
|
||||||
|
current = cls.current_context()
|
||||||
|
|
||||||
|
if current is not context:
|
||||||
|
current.stop()
|
||||||
|
cls.thread_local.current_context = context
|
||||||
|
context.start()
|
||||||
|
return current
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
"""Enters this logging context into thread local storage"""
|
||||||
|
old_context = self.set_current_context(self)
|
||||||
|
if self.previous_context != old_context:
|
||||||
|
logger.warn(
|
||||||
|
"Expected previous context %r, found %r",
|
||||||
|
self.previous_context,
|
||||||
|
old_context,
|
||||||
|
)
|
||||||
|
self.alive = True
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback):
|
||||||
|
"""Restore the logging context in thread local storage to the state it
|
||||||
|
was before this context was entered.
|
||||||
|
Returns:
|
||||||
|
None to avoid suppressing any exceptions that were thrown.
|
||||||
|
"""
|
||||||
|
current = self.set_current_context(self.previous_context)
|
||||||
|
if current is not self:
|
||||||
|
if current is self.sentinel:
|
||||||
|
logger.warning("Expected logging context %s was lost", self)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"Expected logging context %s but found %s", self, current
|
||||||
|
)
|
||||||
|
self.previous_context = None
|
||||||
|
self.alive = False
|
||||||
|
|
||||||
|
# if we have a parent, pass our CPU usage stats on
|
||||||
|
if self.parent_context is not None and hasattr(
|
||||||
|
self.parent_context, "_resource_usage"
|
||||||
|
):
|
||||||
|
self.parent_context._resource_usage += self._resource_usage
|
||||||
|
|
||||||
|
# reset them in case we get entered again
|
||||||
|
self._resource_usage.reset()
|
||||||
|
|
||||||
|
def copy_to(self, record):
|
||||||
|
"""Copy logging fields from this context to a log record or
|
||||||
|
another LoggingContext
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 'request' is the only field we currently use in the logger, so that's
|
||||||
|
# all we need to copy
|
||||||
|
record.request = self.request
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
if get_thread_id() != self.main_thread:
|
||||||
|
logger.warning("Started logcontext %s on different thread", self)
|
||||||
|
return
|
||||||
|
|
||||||
|
# If we haven't already started record the thread resource usage so
|
||||||
|
# far
|
||||||
|
if not self.usage_start:
|
||||||
|
self.usage_start = get_thread_resource_usage()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
if get_thread_id() != self.main_thread:
|
||||||
|
logger.warning("Stopped logcontext %s on different thread", self)
|
||||||
|
return
|
||||||
|
|
||||||
|
# When we stop, let's record the cpu used since we started
|
||||||
|
if not self.usage_start:
|
||||||
|
logger.warning("Called stop on logcontext %s without calling start", self)
|
||||||
|
return
|
||||||
|
|
||||||
|
utime_delta, stime_delta = self._get_cputime()
|
||||||
|
self._resource_usage.ru_utime += utime_delta
|
||||||
|
self._resource_usage.ru_stime += stime_delta
|
||||||
|
|
||||||
|
self.usage_start = None
|
||||||
|
|
||||||
|
def get_resource_usage(self):
|
||||||
|
"""Get resources used by this logcontext so far.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ContextResourceUsage: a *copy* of the object tracking resource
|
||||||
|
usage so far
|
||||||
|
"""
|
||||||
|
# we always return a copy, for consistency
|
||||||
|
res = self._resource_usage.copy()
|
||||||
|
|
||||||
|
# If we are on the correct thread and we're currently running then we
|
||||||
|
# can include resource usage so far.
|
||||||
|
is_main_thread = get_thread_id() == self.main_thread
|
||||||
|
if self.alive and self.usage_start and is_main_thread:
|
||||||
|
utime_delta, stime_delta = self._get_cputime()
|
||||||
|
res.ru_utime += utime_delta
|
||||||
|
res.ru_stime += stime_delta
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
def _get_cputime(self):
|
||||||
|
"""Get the cpu usage time so far
|
||||||
|
|
||||||
|
Returns: Tuple[float, float]: seconds in user mode, seconds in system mode
|
||||||
|
"""
|
||||||
|
current = get_thread_resource_usage()
|
||||||
|
|
||||||
|
utime_delta = current.ru_utime - self.usage_start.ru_utime
|
||||||
|
stime_delta = current.ru_stime - self.usage_start.ru_stime
|
||||||
|
|
||||||
|
# sanity check
|
||||||
|
if utime_delta < 0:
|
||||||
|
logger.error(
|
||||||
|
"utime went backwards! %f < %f",
|
||||||
|
current.ru_utime,
|
||||||
|
self.usage_start.ru_utime,
|
||||||
|
)
|
||||||
|
utime_delta = 0
|
||||||
|
|
||||||
|
if stime_delta < 0:
|
||||||
|
logger.error(
|
||||||
|
"stime went backwards! %f < %f",
|
||||||
|
current.ru_stime,
|
||||||
|
self.usage_start.ru_stime,
|
||||||
|
)
|
||||||
|
stime_delta = 0
|
||||||
|
|
||||||
|
return utime_delta, stime_delta
|
||||||
|
|
||||||
|
def add_database_transaction(self, duration_sec):
|
||||||
|
if duration_sec < 0:
|
||||||
|
raise ValueError("DB txn time can only be non-negative")
|
||||||
|
self._resource_usage.db_txn_count += 1
|
||||||
|
self._resource_usage.db_txn_duration_sec += duration_sec
|
||||||
|
|
||||||
|
def add_database_scheduled(self, sched_sec):
|
||||||
|
"""Record a use of the database pool
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sched_sec (float): number of seconds it took us to get a
|
||||||
|
connection
|
||||||
|
"""
|
||||||
|
if sched_sec < 0:
|
||||||
|
raise ValueError("DB scheduling time can only be non-negative")
|
||||||
|
self._resource_usage.db_sched_duration_sec += sched_sec
|
||||||
|
|
||||||
|
def record_event_fetch(self, event_count):
|
||||||
|
"""Record a number of events being fetched from the db
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_count (int): number of events being fetched
|
||||||
|
"""
|
||||||
|
self._resource_usage.evt_db_fetch_count += event_count
|
||||||
|
|
||||||
|
|
||||||
|
class LoggingContextFilter(logging.Filter):
|
||||||
|
"""Logging filter that adds values from the current logging context to each
|
||||||
|
record.
|
||||||
|
Args:
|
||||||
|
**defaults: Default values to avoid formatters complaining about
|
||||||
|
missing fields
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **defaults):
|
||||||
|
self.defaults = defaults
|
||||||
|
|
||||||
|
def filter(self, record):
|
||||||
|
"""Add each fields from the logging contexts to the record.
|
||||||
|
Returns:
|
||||||
|
True to include the record in the log output.
|
||||||
|
"""
|
||||||
|
context = LoggingContext.current_context()
|
||||||
|
for key, value in self.defaults.items():
|
||||||
|
setattr(record, key, value)
|
||||||
|
|
||||||
|
# context should never be None, but if it somehow ends up being, then
|
||||||
|
# we end up in a death spiral of infinite loops, so let's check, for
|
||||||
|
# robustness' sake.
|
||||||
|
if context is not None:
|
||||||
|
context.copy_to(record)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class PreserveLoggingContext(object):
|
||||||
|
"""Captures the current logging context and restores it when the scope is
|
||||||
|
exited. Used to restore the context after a function using
|
||||||
|
@defer.inlineCallbacks is resumed by a callback from the reactor."""
|
||||||
|
|
||||||
|
__slots__ = ["current_context", "new_context", "has_parent"]
|
||||||
|
|
||||||
|
def __init__(self, new_context=None):
|
||||||
|
if new_context is None:
|
||||||
|
new_context = LoggingContext.sentinel
|
||||||
|
self.new_context = new_context
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
"""Captures the current logging context"""
|
||||||
|
self.current_context = LoggingContext.set_current_context(self.new_context)
|
||||||
|
|
||||||
|
if self.current_context:
|
||||||
|
self.has_parent = self.current_context.previous_context is not None
|
||||||
|
if not self.current_context.alive:
|
||||||
|
logger.debug("Entering dead context: %s", self.current_context)
|
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback):
|
||||||
|
"""Restores the current logging context"""
|
||||||
|
context = LoggingContext.set_current_context(self.current_context)
|
||||||
|
|
||||||
|
if context != self.new_context:
|
||||||
|
if context is LoggingContext.sentinel:
|
||||||
|
logger.warning("Expected logging context %s was lost", self.new_context)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"Expected logging context %s but found %s",
|
||||||
|
self.new_context,
|
||||||
|
context,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.current_context is not LoggingContext.sentinel:
|
||||||
|
if not self.current_context.alive:
|
||||||
|
logger.debug("Restoring dead context: %s", self.current_context)
|
||||||
|
|
||||||
|
|
||||||
|
def nested_logging_context(suffix, parent_context=None):
|
||||||
|
"""Creates a new logging context as a child of another.
|
||||||
|
|
||||||
|
The nested logging context will have a 'request' made up of the parent context's
|
||||||
|
request, plus the given suffix.
|
||||||
|
|
||||||
|
CPU/db usage stats will be added to the parent context's on exit.
|
||||||
|
|
||||||
|
Normal usage looks like:
|
||||||
|
|
||||||
|
with nested_logging_context(suffix):
|
||||||
|
# ... do stuff
|
||||||
|
|
||||||
|
Args:
|
||||||
|
suffix (str): suffix to add to the parent context's 'request'.
|
||||||
|
parent_context (LoggingContext|None): parent context. Will use the current context
|
||||||
|
if None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
LoggingContext: new logging context.
|
||||||
|
"""
|
||||||
|
if parent_context is None:
|
||||||
|
parent_context = LoggingContext.current_context()
|
||||||
|
return LoggingContext(
|
||||||
|
parent_context=parent_context, request=parent_context.request + "-" + suffix
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def preserve_fn(f):
|
||||||
|
"""Function decorator which wraps the function with run_in_background"""
|
||||||
|
|
||||||
|
def g(*args, **kwargs):
|
||||||
|
return run_in_background(f, *args, **kwargs)
|
||||||
|
|
||||||
|
return g
|
||||||
|
|
||||||
|
|
||||||
|
def run_in_background(f, *args, **kwargs):
|
||||||
|
"""Calls a function, ensuring that the current context is restored after
|
||||||
|
return from the function, and that the sentinel context is set once the
|
||||||
|
deferred returned by the function completes.
|
||||||
|
|
||||||
|
Useful for wrapping functions that return a deferred or coroutine, which you don't
|
||||||
|
yield or await on (for instance because you want to pass it to
|
||||||
|
deferred.gatherResults()).
|
||||||
|
|
||||||
|
Note that if you completely discard the result, you should make sure that
|
||||||
|
`f` doesn't raise any deferred exceptions, otherwise a scary-looking
|
||||||
|
CRITICAL error about an unhandled error will be logged without much
|
||||||
|
indication about where it came from.
|
||||||
|
"""
|
||||||
|
current = LoggingContext.current_context()
|
||||||
|
try:
|
||||||
|
res = f(*args, **kwargs)
|
||||||
|
except: # noqa: E722
|
||||||
|
# the assumption here is that the caller doesn't want to be disturbed
|
||||||
|
# by synchronous exceptions, so let's turn them into Failures.
|
||||||
|
return defer.fail()
|
||||||
|
|
||||||
|
if isinstance(res, types.CoroutineType):
|
||||||
|
res = defer.ensureDeferred(res)
|
||||||
|
|
||||||
|
if not isinstance(res, defer.Deferred):
|
||||||
|
return res
|
||||||
|
|
||||||
|
if res.called and not res.paused:
|
||||||
|
# The function should have maintained the logcontext, so we can
|
||||||
|
# optimise out the messing about
|
||||||
|
return res
|
||||||
|
|
||||||
|
# The function may have reset the context before returning, so
|
||||||
|
# we need to restore it now.
|
||||||
|
ctx = LoggingContext.set_current_context(current)
|
||||||
|
|
||||||
|
# The original context will be restored when the deferred
|
||||||
|
# completes, but there is nothing waiting for it, so it will
|
||||||
|
# get leaked into the reactor or some other function which
|
||||||
|
# wasn't expecting it. We therefore need to reset the context
|
||||||
|
# here.
|
||||||
|
#
|
||||||
|
# (If this feels asymmetric, consider it this way: we are
|
||||||
|
# effectively forking a new thread of execution. We are
|
||||||
|
# probably currently within a ``with LoggingContext()`` block,
|
||||||
|
# which is supposed to have a single entry and exit point. But
|
||||||
|
# by spawning off another deferred, we are effectively
|
||||||
|
# adding a new exit point.)
|
||||||
|
res.addBoth(_set_context_cb, ctx)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def make_deferred_yieldable(deferred):
|
||||||
|
"""Given a deferred, make it follow the Synapse logcontext rules:
|
||||||
|
|
||||||
|
If the deferred has completed (or is not actually a Deferred), essentially
|
||||||
|
does nothing (just returns another completed deferred with the
|
||||||
|
result/failure).
|
||||||
|
|
||||||
|
If the deferred has not yet completed, resets the logcontext before
|
||||||
|
returning a deferred. Then, when the deferred completes, restores the
|
||||||
|
current logcontext before running callbacks/errbacks.
|
||||||
|
|
||||||
|
(This is more-or-less the opposite operation to run_in_background.)
|
||||||
|
"""
|
||||||
|
if not isinstance(deferred, defer.Deferred):
|
||||||
|
return deferred
|
||||||
|
|
||||||
|
if deferred.called and not deferred.paused:
|
||||||
|
# it looks like this deferred is ready to run any callbacks we give it
|
||||||
|
# immediately. We may as well optimise out the logcontext faffery.
|
||||||
|
return deferred
|
||||||
|
|
||||||
|
# ok, we can't be sure that a yield won't block, so let's reset the
|
||||||
|
# logcontext, and add a callback to the deferred to restore it.
|
||||||
|
prev_context = LoggingContext.set_current_context(LoggingContext.sentinel)
|
||||||
|
deferred.addBoth(_set_context_cb, prev_context)
|
||||||
|
return deferred
|
||||||
|
|
||||||
|
|
||||||
|
def _set_context_cb(result, context):
|
||||||
|
"""A callback function which just sets the logging context"""
|
||||||
|
LoggingContext.set_current_context(context)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def defer_to_thread(reactor, f, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Calls the function `f` using a thread from the reactor's default threadpool and
|
||||||
|
returns the result as a Deferred.
|
||||||
|
|
||||||
|
Creates a new logcontext for `f`, which is created as a child of the current
|
||||||
|
logcontext (so its CPU usage metrics will get attributed to the current
|
||||||
|
logcontext). `f` should preserve the logcontext it is given.
|
||||||
|
|
||||||
|
The result deferred follows the Synapse logcontext rules: you should `yield`
|
||||||
|
on it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
|
||||||
|
the Deferred will be invoked, and whose threadpool we should use for the
|
||||||
|
function.
|
||||||
|
|
||||||
|
Normally this will be hs.get_reactor().
|
||||||
|
|
||||||
|
f (callable): The function to call.
|
||||||
|
|
||||||
|
args: positional arguments to pass to f.
|
||||||
|
|
||||||
|
kwargs: keyword arguments to pass to f.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: A Deferred which fires a callback with the result of `f`, or an
|
||||||
|
errback if `f` throws an exception.
|
||||||
|
"""
|
||||||
|
return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
A wrapper for twisted.internet.threads.deferToThreadpool, which handles
|
||||||
|
logcontexts correctly.
|
||||||
|
|
||||||
|
Calls the function `f` using a thread from the given threadpool and returns
|
||||||
|
the result as a Deferred.
|
||||||
|
|
||||||
|
Creates a new logcontext for `f`, which is created as a child of the current
|
||||||
|
logcontext (so its CPU usage metrics will get attributed to the current
|
||||||
|
logcontext). `f` should preserve the logcontext it is given.
|
||||||
|
|
||||||
|
The result deferred follows the Synapse logcontext rules: you should `yield`
|
||||||
|
on it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
|
||||||
|
the Deferred will be invoked. Normally this will be hs.get_reactor().
|
||||||
|
|
||||||
|
threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
|
||||||
|
running `f`. Normally this will be hs.get_reactor().getThreadPool().
|
||||||
|
|
||||||
|
f (callable): The function to call.
|
||||||
|
|
||||||
|
args: positional arguments to pass to f.
|
||||||
|
|
||||||
|
kwargs: keyword arguments to pass to f.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred: A Deferred which fires a callback with the result of `f`, or an
|
||||||
|
errback if `f` throws an exception.
|
||||||
|
"""
|
||||||
|
logcontext = LoggingContext.current_context()
|
||||||
|
|
||||||
|
def g():
|
||||||
|
with LoggingContext(parent_context=logcontext):
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
|
||||||
|
return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g))
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from six import StringIO
|
||||||
|
|
||||||
|
|
||||||
|
class LogFormatter(logging.Formatter):
|
||||||
|
"""Log formatter which gives more detail for exceptions
|
||||||
|
|
||||||
|
This is the same as the standard log formatter, except that when logging
|
||||||
|
exceptions [typically via log.foo("msg", exc_info=1)], it prints the
|
||||||
|
sequence that led up to the point at which the exception was caught.
|
||||||
|
(Normally only stack frames between the point the exception was raised and
|
||||||
|
where it was caught are logged).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(LogFormatter, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def formatException(self, ei):
|
||||||
|
sio = StringIO()
|
||||||
|
(typ, val, tb) = ei
|
||||||
|
|
||||||
|
# log the stack above the exception capture point if possible, but
|
||||||
|
# check that we actually have an f_back attribute to work around
|
||||||
|
# https://twistedmatrix.com/trac/ticket/9305
|
||||||
|
|
||||||
|
if tb and hasattr(tb.tb_frame, "f_back"):
|
||||||
|
sio.write("Capture point (most recent call last):\n")
|
||||||
|
traceback.print_stack(tb.tb_frame.f_back, None, sio)
|
||||||
|
|
||||||
|
traceback.print_exception(typ, val, tb, None, sio)
|
||||||
|
s = sio.getvalue()
|
||||||
|
sio.close()
|
||||||
|
if s[-1:] == "\n":
|
||||||
|
s = s[:-1]
|
||||||
|
return s
|
||||||
@@ -22,7 +22,7 @@ from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily
|
|||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.logging.context import LoggingContext, PreserveLoggingContext
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|||||||
@@ -12,10 +12,14 @@
|
|||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
import logging
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ModuleApi(object):
|
class ModuleApi(object):
|
||||||
"""A proxy object that gets passed to password auth providers so they
|
"""A proxy object that gets passed to password auth providers so they
|
||||||
@@ -76,8 +80,13 @@ class ModuleApi(object):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def register(self, localpart, displayname=None, emails=[]):
|
def register(self, localpart, displayname=None, emails=[]):
|
||||||
"""Registers a new user with given localpart and optional
|
"""Registers a new user with given localpart and optional displayname, emails.
|
||||||
displayname, emails.
|
|
||||||
|
Also returns an access token for the new user.
|
||||||
|
|
||||||
|
Deprecated: avoid this, as it generates a new device with no way to
|
||||||
|
return that device to the user. Prefer separate calls to register_user and
|
||||||
|
register_device.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
localpart (str): The localpart of the new user.
|
localpart (str): The localpart of the new user.
|
||||||
@@ -85,15 +94,48 @@ class ModuleApi(object):
|
|||||||
emails (List[str]): Emails to bind to the new user.
|
emails (List[str]): Emails to bind to the new user.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: a 2-tuple of (user_id, access_token)
|
Deferred[tuple[str, str]]: a 2-tuple of (user_id, access_token)
|
||||||
"""
|
"""
|
||||||
# Register the user
|
logger.warning(
|
||||||
reg = self.hs.get_registration_handler()
|
"Using deprecated ModuleApi.register which creates a dummy user device."
|
||||||
user_id, access_token = yield reg.register(
|
)
|
||||||
|
user_id = yield self.register_user(localpart, displayname, emails)
|
||||||
|
_, access_token = yield self.register_device(user_id)
|
||||||
|
defer.returnValue((user_id, access_token))
|
||||||
|
|
||||||
|
def register_user(self, localpart, displayname=None, emails=[]):
|
||||||
|
"""Registers a new user with given localpart and optional displayname, emails.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
localpart (str): The localpart of the new user.
|
||||||
|
displayname (str|None): The displayname of the new user.
|
||||||
|
emails (List[str]): Emails to bind to the new user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[str]: user_id
|
||||||
|
"""
|
||||||
|
return self.hs.get_registration_handler().register_user(
|
||||||
localpart=localpart, default_display_name=displayname, bind_emails=emails
|
localpart=localpart, default_display_name=displayname, bind_emails=emails
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((user_id, access_token))
|
def register_device(self, user_id, device_id=None, initial_display_name=None):
|
||||||
|
"""Register a device for a user and generate an access token.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str): full canonical @user:id
|
||||||
|
device_id (str|None): The device ID to check, or None to generate
|
||||||
|
a new one.
|
||||||
|
initial_display_name (str|None): An optional display name for the
|
||||||
|
device.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
|
||||||
|
"""
|
||||||
|
return self.hs.get_registration_handler().register_device(
|
||||||
|
user_id=user_id,
|
||||||
|
device_id=device_id,
|
||||||
|
initial_display_name=initial_display_name,
|
||||||
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def invalidate_access_token(self, access_token):
|
def invalidate_access_token(self, access_token):
|
||||||
|
|||||||
+2
-2
@@ -23,12 +23,12 @@ from twisted.internet import defer
|
|||||||
from synapse.api.constants import EventTypes, Membership
|
from synapse.api.constants import EventTypes, Membership
|
||||||
from synapse.api.errors import AuthError
|
from synapse.api.errors import AuthError
|
||||||
from synapse.handlers.presence import format_user_presence_state
|
from synapse.handlers.presence import format_user_presence_state
|
||||||
|
from synapse.logging.context import PreserveLoggingContext
|
||||||
|
from synapse.logging.utils import log_function
|
||||||
from synapse.metrics import LaterGauge
|
from synapse.metrics import LaterGauge
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import StreamToken
|
from synapse.types import StreamToken
|
||||||
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
|
from synapse.util.async_helpers import ObservableDeferred, timeout_deferred
|
||||||
from synapse.util.logcontext import PreserveLoggingContext
|
|
||||||
from synapse.util.logutils import log_function
|
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
# Copyright 2017 New Vector Ltd
|
# Copyright 2017 New Vector Ltd
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
@@ -248,6 +249,18 @@ BASE_APPEND_OVERRIDE_RULES = [
|
|||||||
],
|
],
|
||||||
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
"actions": ["notify", {"set_tweak": "highlight", "value": True}],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"rule_id": "global/override/.m.rule.reaction",
|
||||||
|
"conditions": [
|
||||||
|
{
|
||||||
|
"kind": "event_match",
|
||||||
|
"key": "type",
|
||||||
|
"pattern": "m.reaction",
|
||||||
|
"_id": "_reaction",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"actions": ["dont_notify"],
|
||||||
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import StoreError
|
from synapse.api.errors import StoreError
|
||||||
|
from synapse.logging.context import make_deferred_yieldable
|
||||||
from synapse.push.presentable_names import (
|
from synapse.push.presentable_names import (
|
||||||
calculate_room_name,
|
calculate_room_name,
|
||||||
descriptor_from_member_events,
|
descriptor_from_member_events,
|
||||||
@@ -36,7 +37,6 @@ from synapse.push.presentable_names import (
|
|||||||
)
|
)
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util.async_helpers import concurrently_execute
|
from synapse.util.async_helpers import concurrently_execute
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
from synapse.visibility import filter_events_for_client
|
from synapse.visibility import filter_events_for_client
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|||||||
@@ -156,70 +156,6 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
|||||||
defer.returnValue((200, ret))
|
defer.returnValue((200, ret))
|
||||||
|
|
||||||
|
|
||||||
class ReplicationRegister3PIDGuestRestServlet(ReplicationEndpoint):
|
|
||||||
"""Gets/creates a guest account for given 3PID.
|
|
||||||
|
|
||||||
Request format:
|
|
||||||
|
|
||||||
POST /_synapse/replication/get_or_register_3pid_guest/
|
|
||||||
|
|
||||||
{
|
|
||||||
"requester": ...,
|
|
||||||
"medium": ...,
|
|
||||||
"address": ...,
|
|
||||||
"inviter_user_id": ...
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
NAME = "get_or_register_3pid_guest"
|
|
||||||
PATH_ARGS = ()
|
|
||||||
|
|
||||||
def __init__(self, hs):
|
|
||||||
super(ReplicationRegister3PIDGuestRestServlet, self).__init__(hs)
|
|
||||||
|
|
||||||
self.registeration_handler = hs.get_registration_handler()
|
|
||||||
self.store = hs.get_datastore()
|
|
||||||
self.clock = hs.get_clock()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _serialize_payload(requester, medium, address, inviter_user_id):
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
requester(Requester)
|
|
||||||
medium (str)
|
|
||||||
address (str)
|
|
||||||
inviter_user_id (str): The user ID who is trying to invite the
|
|
||||||
3PID
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
"requester": requester.serialize(),
|
|
||||||
"medium": medium,
|
|
||||||
"address": address,
|
|
||||||
"inviter_user_id": inviter_user_id,
|
|
||||||
}
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _handle_request(self, request):
|
|
||||||
content = parse_json_object_from_request(request)
|
|
||||||
|
|
||||||
medium = content["medium"]
|
|
||||||
address = content["address"]
|
|
||||||
inviter_user_id = content["inviter_user_id"]
|
|
||||||
|
|
||||||
requester = Requester.deserialize(self.store, content["requester"])
|
|
||||||
|
|
||||||
if requester.user:
|
|
||||||
request.authenticated_entity = requester.user.to_string()
|
|
||||||
|
|
||||||
logger.info("get_or_register_3pid_guest: %r", content)
|
|
||||||
|
|
||||||
ret = yield self.registeration_handler.get_or_register_3pid_guest(
|
|
||||||
medium, address, inviter_user_id
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue((200, ret))
|
|
||||||
|
|
||||||
|
|
||||||
class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
|
class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
|
||||||
"""Notifies that a user has joined or left the room
|
"""Notifies that a user has joined or left the room
|
||||||
|
|
||||||
@@ -272,5 +208,4 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint):
|
|||||||
def register_servlets(hs, http_server):
|
def register_servlets(hs, http_server):
|
||||||
ReplicationRemoteJoinRestServlet(hs).register(http_server)
|
ReplicationRemoteJoinRestServlet(hs).register(http_server)
|
||||||
ReplicationRemoteRejectInviteRestServlet(hs).register(http_server)
|
ReplicationRemoteRejectInviteRestServlet(hs).register(http_server)
|
||||||
ReplicationRegister3PIDGuestRestServlet(hs).register(http_server)
|
|
||||||
ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server)
|
ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server)
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user