Compare commits
89 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| ef8415adc2 | |||
| 82d9d524bd | |||
| d6328e03fd | |||
| 33ea87be39 | |||
| 9ce4220d6c | |||
| 112cf5a73a | |||
| 8353ddd951 | |||
| 160c52d0d4 | |||
| 25d16fea78 | |||
| 187d2837a9 | |||
| 2d6308a043 | |||
| 839f9b9231 | |||
| eba7caf09f | |||
| 6840ebeef8 | |||
| dd927b29e1 | |||
| 414d2ca3a6 | |||
| 97d7e4c7b7 | |||
| a9dab970b8 | |||
| f12e1f029c | |||
| 9ca4ae7131 | |||
| 14db086428 | |||
| 5cec6d1845 | |||
| 4024520ff8 | |||
| 3c9bb86fde | |||
| 304a1376c2 | |||
| e0b77b004d | |||
| 9b14a810d2 | |||
| 1e7864c929 | |||
| 6d56a694f4 | |||
| e9344e0dee | |||
| 9fd4f83f1a | |||
| cc7cc853b1 | |||
| 3ed595e327 | |||
| d0530382ee | |||
| d8db29c481 | |||
| f03f8b7f4c | |||
| b2a6f90a67 | |||
| a10c8dae85 | |||
| 4f68188d0b | |||
| b59a4eba64 | |||
| 5c15039e06 | |||
| 6312d6cc7c | |||
| 09e9a26b71 | |||
| 7e68691ce9 | |||
| 97174780ce | |||
| 9532eb55ec | |||
| a766c41d25 | |||
| 426218323b | |||
| 453aaaadc0 | |||
| 10383e6e6f | |||
| 5bc9484537 | |||
| 2ddc13577c | |||
| 94dac0f3e5 | |||
| abce00fc6a | |||
| 0382b0ffee | |||
| 6d1e699b5c | |||
| ca7abb129c | |||
| 12f49b22ec | |||
| 0afcbc65cb | |||
| 843dd714cb | |||
| b56a224e22 | |||
| ab157e61a2 | |||
| 26b62796c2 | |||
| 028f674cd3 | |||
| 4914a88829 | |||
| 2decc92e2f | |||
| 5009d988da | |||
| 95d38afe96 | |||
| 2cca90dd40 | |||
| 837340bdce | |||
| a099926fcc | |||
| 2ebeda48b2 | |||
| 8182a1cfb5 | |||
| 928d1ccd73 | |||
| 6745b7de6d | |||
| 8dc1eb6697 | |||
| a2419b27fe | |||
| 8e0cee90d2 | |||
| a46ef1e3a4 | |||
| ccbc9e5e17 | |||
| d51ca9d9b3 | |||
| fe13bd52ac | |||
| a4cf2c1184 | |||
| aeb2263320 | |||
| deca87ddf2 | |||
| 83827c4922 | |||
| 14fe33cabf | |||
| 43b9a40370 | |||
| 1040e9b648 |
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.4
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:2.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:2.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.4
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.5
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -0,0 +1,33 @@
|
||||
import sys
|
||||
from tap.parser import Parser
|
||||
from tap.line import Result, Unknown, Diagnostic
|
||||
|
||||
out = ["### TAP Output for " + sys.argv[2]]
|
||||
|
||||
p = Parser()
|
||||
|
||||
in_error = False
|
||||
|
||||
for line in p.parse_file(sys.argv[1]):
|
||||
if isinstance(line, Result):
|
||||
if in_error:
|
||||
out.append("")
|
||||
out.append("</pre></code></details>")
|
||||
out.append("")
|
||||
out.append("----")
|
||||
out.append("")
|
||||
in_error = False
|
||||
|
||||
if not line.ok and not line.todo:
|
||||
in_error = True
|
||||
|
||||
out.append("FAILURE Test #%d: ``%s``" % (line.number, line.description))
|
||||
out.append("")
|
||||
out.append("<details><summary>Show log</summary><code><pre>")
|
||||
|
||||
elif isinstance(line, Diagnostic) and in_error:
|
||||
out.append(line.text)
|
||||
|
||||
if out:
|
||||
for line in out[:-3]:
|
||||
print(line)
|
||||
@@ -1,22 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -ex
|
||||
|
||||
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
|
||||
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
|
||||
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
|
||||
source $BASH_ENV
|
||||
if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
|
||||
echo "Not merging forward, as this is a release branch"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
||||
then
|
||||
echo "Can't figure out what the PR number is! Assuming merge target is develop."
|
||||
if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
|
||||
echo "Not a pull request, or hasn't had a PR opened yet..."
|
||||
|
||||
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
||||
# can probably assume it's based on it and will be merged into it.
|
||||
GITBASE="develop"
|
||||
else
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
|
||||
fi
|
||||
|
||||
# Show what we are before
|
||||
+63
-71
@@ -2,6 +2,7 @@ env:
|
||||
CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
|
||||
|
||||
steps:
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e pep8"
|
||||
@@ -46,15 +47,16 @@ steps:
|
||||
|
||||
- wait
|
||||
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py27,codecov"
|
||||
label: ":python: 2.7 / SQLite"
|
||||
- "tox -e py35-old,codecov"
|
||||
label: ":python: 3.5 / SQLite / Old Deps"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:2.7"
|
||||
image: "python:3.5"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
@@ -114,74 +116,6 @@ steps:
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py27-old,codecov"
|
||||
label: ":python: 2.7 / SQLite / Old Deps"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:2.7"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.4"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg94.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg95.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.4"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg94.yaml
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
@@ -232,3 +166,61 @@ steps:
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
|
||||
- label: "SyTest - :python: 3.5 / SQLite / Monolith"
|
||||
agents:
|
||||
queue: "medium"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
POSTGRES: "1"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
|
||||
agents:
|
||||
queue: "medium"
|
||||
env:
|
||||
POSTGRES: "1"
|
||||
WORKERS: "1"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
soft_fail: true
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
limit: 2
|
||||
- exit_status: 2
|
||||
limit: 2
|
||||
|
||||
@@ -0,0 +1,145 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
|
||||
# sytest-synapse docker images.
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$BUILDKITE" ]
|
||||
then
|
||||
SYNAPSE_DIR=`pwd`
|
||||
else
|
||||
SYNAPSE_DIR="/src"
|
||||
fi
|
||||
|
||||
# Attempt to find a sytest to use.
|
||||
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
|
||||
if [ -d "/sytest" ]; then
|
||||
# If the user has mounted in a SyTest checkout, use that.
|
||||
echo "Using local sytests..."
|
||||
|
||||
# create ourselves a working directory and dos2unix some scripts therein
|
||||
mkdir -p /work/jenkins
|
||||
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
|
||||
dos2unix -n "/sytest/$i" "/work/$i"
|
||||
done
|
||||
ln -sf /sytest/tests /work
|
||||
ln -sf /sytest/keys /work
|
||||
SYTEST_LIB="/sytest/lib"
|
||||
else
|
||||
if [ -n "BUILDKITE_BRANCH" ]
|
||||
then
|
||||
branch_name=$BUILDKITE_BRANCH
|
||||
else
|
||||
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
|
||||
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
|
||||
fi
|
||||
|
||||
# Try and fetch the branch
|
||||
echo "Trying to get same-named sytest branch..."
|
||||
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
|
||||
# Probably a 404, fall back to develop
|
||||
echo "Using develop instead..."
|
||||
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
|
||||
}
|
||||
|
||||
mkdir -p /work
|
||||
tar -C /work --strip-components=1 -xf sytest.tar.gz
|
||||
SYTEST_LIB="/work/lib"
|
||||
fi
|
||||
|
||||
cd /work
|
||||
|
||||
# PostgreSQL setup
|
||||
if [ -n "$POSTGRES" ]
|
||||
then
|
||||
export PGUSER=postgres
|
||||
export POSTGRES_DB_1=pg1
|
||||
export POSTGRES_DB_2=pg2
|
||||
|
||||
# Start the database
|
||||
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||
|
||||
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
|
||||
jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
# Make the test databases for the two Synapse servers that will be spun up
|
||||
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
|
||||
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
|
||||
|
||||
fi
|
||||
|
||||
if [ -n "$OFFLINE" ]; then
|
||||
# if we're in offline mode, just put synapse into the virtualenv, and
|
||||
# hope that the deps are up-to-date.
|
||||
#
|
||||
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
|
||||
# so we just run setup.py explicitly.)
|
||||
#
|
||||
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
|
||||
else
|
||||
# We've already created the virtualenv, but lets double check we have all
|
||||
# deps.
|
||||
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
|
||||
/venv/bin/pip install -q --upgrade --no-cache-dir \
|
||||
lxml psycopg2 coverage codecov tap.py
|
||||
|
||||
# Make sure all Perl deps are installed -- this is done in the docker build
|
||||
# so will only install packages added since the last Docker build
|
||||
./install-deps.pl
|
||||
fi
|
||||
|
||||
|
||||
# Run the tests
|
||||
>&2 echo "+++ Running tests"
|
||||
|
||||
RUN_TESTS=(
|
||||
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
|
||||
)
|
||||
|
||||
TEST_STATUS=0
|
||||
|
||||
if [ -n "$WORKERS" ]; then
|
||||
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
|
||||
else
|
||||
RUN_TESTS+=(-I Synapse)
|
||||
fi
|
||||
|
||||
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
|
||||
|
||||
if [ $TEST_STATUS -ne 0 ]; then
|
||||
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
|
||||
else
|
||||
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
|
||||
fi
|
||||
|
||||
>&2 echo "--- Copying assets"
|
||||
|
||||
# Copy out the logs
|
||||
mkdir -p /logs
|
||||
cp results.tap /logs/results.tap
|
||||
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
|
||||
|
||||
# Upload coverage to codecov and upload files, if running on Buildkite
|
||||
if [ -n "$BUILDKITE" ]
|
||||
then
|
||||
/venv/bin/coverage combine || true
|
||||
/venv/bin/coverage xml || true
|
||||
/venv/bin/codecov -X gcov -f coverage.xml
|
||||
|
||||
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
|
||||
tar xvf buildkite.tar.gz
|
||||
chmod +x ./buildkite-agent
|
||||
|
||||
# Upload the files
|
||||
./buildkite-agent artifact upload "/logs/**/*.log*"
|
||||
./buildkite-agent artifact upload "/logs/results.tap"
|
||||
|
||||
if [ $TEST_STATUS -ne 0 ]; then
|
||||
# Annotate, if failure
|
||||
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
exit $TEST_STATUS
|
||||
@@ -4,160 +4,23 @@ jobs:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py2
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
sytestpy2:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2postgres:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2merged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2postgresmerged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
|
||||
sytestpy3:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3postgres:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3merged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3postgresmerged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- sytestpy2:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy2postgres:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy3:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy3postgres:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy2merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy2postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy3merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy3postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- dockerhubuploadrelease:
|
||||
filters:
|
||||
tags:
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
# One username per supported platform and one custom link
|
||||
patreon: matrixdotorg
|
||||
liberapay: matrixdotorg
|
||||
custom: https://paypal.me/matrixdotorg
|
||||
+21
@@ -1,3 +1,24 @@
|
||||
Synapse 1.0.0 (2019-06-11)
|
||||
==========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix bug where attempting to send transactions with large number of EDUs can fail. ([\#5418](https://github.com/matrix-org/synapse/issues/5418))
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- Expand the federation guide to include relevant content from the MSC1711 FAQ ([\#5419](https://github.com/matrix-org/synapse/issues/5419))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Move password reset links to /_matrix/client/unstable namespace. ([\#5424](https://github.com/matrix-org/synapse/issues/5424))
|
||||
|
||||
|
||||
Synapse 1.0.0rc3 (2019-06-10)
|
||||
=============================
|
||||
|
||||
|
||||
+9
-10
@@ -30,21 +30,20 @@ use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
||||
pull requests to synapse get automatically tested by Travis and CircleCI.
|
||||
If your change breaks the build, this will be shown in GitHub, so please
|
||||
keep an eye on the pull request for feedback.
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite
|
||||
<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration.
|
||||
Buildkite builds need to be authorised by a maintainer. If your change breaks
|
||||
the build, this will be shown in GitHub, so please keep an eye on the pull
|
||||
request for feedback.
|
||||
|
||||
To run unit tests in a local development environment, you can use:
|
||||
|
||||
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
|
||||
SQLite-backed Synapse on Python 2.7.
|
||||
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
|
||||
- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
|
||||
for SQLite-backed Synapse on Python 3.5.
|
||||
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
|
||||
- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
|
||||
(requires a running local PostgreSQL with access to create databases).
|
||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
|
||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
|
||||
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||
set up PostgreSQL yourself.
|
||||
|
||||
|
||||
+14
-13
@@ -1,14 +1,14 @@
|
||||
* [Installing Synapse](#installing-synapse)
|
||||
* [Installing from source](#installing-from-source)
|
||||
* [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
* [Troubleshooting Installation](#troubleshooting-installation)
|
||||
* [Prebuilt packages](#prebuilt-packages)
|
||||
* [Setting up Synapse](#setting-up-synapse)
|
||||
* [TLS certificates](#tls-certificates)
|
||||
* [Email](#email)
|
||||
* [Registering a user](#registering-a-user)
|
||||
* [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
* [URL previews](#url-previews)
|
||||
- [Installing Synapse](#installing-synapse)
|
||||
- [Installing from source](#installing-from-source)
|
||||
- [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
- [Troubleshooting Installation](#troubleshooting-installation)
|
||||
- [Prebuilt packages](#prebuilt-packages)
|
||||
- [Setting up Synapse](#setting-up-synapse)
|
||||
- [TLS certificates](#tls-certificates)
|
||||
- [Email](#email)
|
||||
- [Registering a user](#registering-a-user)
|
||||
- [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
- [URL previews](#url-previews)
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
@@ -395,8 +395,9 @@ To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
instance, if using certbot, use `fullchain.pem` as your certificate, not
|
||||
`cert.pem`).
|
||||
|
||||
For those of you upgrading your TLS certificate for Synapse 1.0 compliance,
|
||||
please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100).
|
||||
For a more detailed guide to configuring your server for federation, see
|
||||
[federate.md](docs/federate.md)
|
||||
|
||||
|
||||
## Email
|
||||
|
||||
|
||||
+29
-2
@@ -49,6 +49,33 @@ returned by the Client-Server API:
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v1.1
|
||||
=================
|
||||
|
||||
Synapse 1.1 removes support for older Python and PostgreSQL versions, as
|
||||
outlined in `our deprecation notice <https://matrix.org/blog/2019/04/08/synapse-deprecating-postgres-9-4-and-python-2-x>`_.
|
||||
|
||||
Minimum Python Version
|
||||
----------------------
|
||||
|
||||
Synapse v1.1 has a minimum Python requirement of Python 3.5. Python 3.6 or
|
||||
Python 3.7 are recommended as they have improved internal string handling,
|
||||
significantly reducing memory usage.
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Debian packages or
|
||||
Docker images, action is not required.
|
||||
|
||||
If you install Synapse in a Python virtual environment, please see "Upgrading to
|
||||
v0.34.0" for notes on setting up a new virtualenv under Python 3.
|
||||
|
||||
Minimum PostgreSQL Version
|
||||
--------------------------
|
||||
|
||||
If using PostgreSQL under Synapse, you will need to use PostgreSQL 9.5 or above.
|
||||
Please see the
|
||||
`PostgreSQL documentation <https://www.postgresql.org/docs/11/upgrading.html>`_
|
||||
for more details on upgrading your database.
|
||||
|
||||
Upgrading to v1.0
|
||||
=================
|
||||
|
||||
@@ -71,11 +98,11 @@ server in a closed federation. This can be done in one of two ways:-
|
||||
* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
|
||||
|
||||
See the `sample configuration file <docs/sample_config.yaml>`_
|
||||
for more details on these settings.
|
||||
for more details on these settings.
|
||||
|
||||
Email
|
||||
-----
|
||||
When a user requests a password reset, Synapse will send an email to the
|
||||
When a user requests a password reset, Synapse will send an email to the
|
||||
user to confirm the request.
|
||||
|
||||
Previous versions of Synapse delegated the job of sending this email to an
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Add logging to 3pid invite signature verification.
|
||||
@@ -0,0 +1 @@
|
||||
Add monthly active users to phonehome stats.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug where running synapse_port_db would cause the account validity feature to fail because it didn't set the type of the email_sent column to boolean.
|
||||
@@ -0,0 +1 @@
|
||||
Allow expired user to trigger renewal email sending manually.
|
||||
@@ -0,0 +1 @@
|
||||
Track deactivated accounts in the database.
|
||||
@@ -0,0 +1 @@
|
||||
Clean up code for sending federation EDUs.
|
||||
@@ -0,0 +1 @@
|
||||
Add a sponsor button to the repo.
|
||||
@@ -0,0 +1 @@
|
||||
Don't log non-200 responses from federation queries as exceptions.
|
||||
@@ -0,0 +1 @@
|
||||
Statistics on forward extremities per room are now exposed via Prometheus.
|
||||
@@ -0,0 +1 @@
|
||||
Add a sponsor button to the repo.
|
||||
@@ -0,0 +1 @@
|
||||
Warn about disabling email-based password resets when a reset occurs, and remove warning when someone attempts a phone-based reset.
|
||||
@@ -0,0 +1 @@
|
||||
Fix email notifications for unnamed rooms with multiple people.
|
||||
@@ -0,0 +1 @@
|
||||
Fix exceptions in federation reader worker caused by attempting to renew attestations, which should only happen on master worker.
|
||||
@@ -0,0 +1 @@
|
||||
Fix handling of failures fetching remote content to not log failures as exceptions.
|
||||
@@ -0,0 +1 @@
|
||||
Fix a bug where deactivated users could receive renewal emails if the account validity feature is on.
|
||||
@@ -0,0 +1 @@
|
||||
Add --no-daemonize option to run synapse in the foreground, per issue #4130. Contributed by Soham Gumaste.
|
||||
@@ -1 +0,0 @@
|
||||
Fix bug where attempting to send transactions with large number of EDUs can fail.
|
||||
@@ -0,0 +1 @@
|
||||
Python 2.7 is no longer a supported platform. Synapse now requires Python 3.5+ to run.
|
||||
@@ -0,0 +1 @@
|
||||
Update Python syntax in contrib/ to Python 3.
|
||||
@@ -0,0 +1 @@
|
||||
Update federation_client dev script to support `.well-known` and work with python3.
|
||||
@@ -0,0 +1 @@
|
||||
PostgreSQL 9.4 is no longer supported. Synapse requires Postgres 9.5+ or above for Postgres support.
|
||||
@@ -0,0 +1 @@
|
||||
Statistics on forward extremities per room are now exposed via Prometheus.
|
||||
@@ -0,0 +1 @@
|
||||
SyTest has been moved to Buildkite.
|
||||
@@ -0,0 +1 @@
|
||||
Demo script now uses python3.
|
||||
@@ -0,0 +1 @@
|
||||
Statistics on forward extremities per room are now exposed via Prometheus.
|
||||
@@ -0,0 +1 @@
|
||||
Fix missing invite state after exchanging 3PID invites over federaton.
|
||||
@@ -0,0 +1,2 @@
|
||||
Track deactivated accounts in the database.
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
Allow server admins to define implementations of extra rules for allowing or denying incoming events.
|
||||
@@ -0,0 +1 @@
|
||||
Allow server admins to define implementations of extra rules for allowing or denying incoming events.
|
||||
@@ -0,0 +1 @@
|
||||
The demo servers talk to each other again.
|
||||
@@ -15,6 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
""" Starts a synapse client console. """
|
||||
from __future__ import print_function
|
||||
|
||||
from twisted.internet import reactor, defer, threads
|
||||
from http import TwistedHttpClient
|
||||
@@ -109,7 +110,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
|
||||
"""
|
||||
if len(line) == 0:
|
||||
print json.dumps(self.config, indent=4)
|
||||
print(json.dumps(self.config, indent=4))
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -123,8 +124,8 @@ class SynapseCmd(cmd.Cmd):
|
||||
]
|
||||
for key, valid_vals in config_rules:
|
||||
if key == args["key"] and args["val"] not in valid_vals:
|
||||
print "%s value must be one of %s" % (args["key"],
|
||||
valid_vals)
|
||||
print("%s value must be one of %s" % (args["key"],
|
||||
valid_vals))
|
||||
return
|
||||
|
||||
# toggle the http client verbosity
|
||||
@@ -133,11 +134,11 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
# assign the new config
|
||||
self.config[args["key"]] = args["val"]
|
||||
print json.dumps(self.config, indent=4)
|
||||
print(json.dumps(self.config, indent=4))
|
||||
|
||||
save_config(self.config)
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def do_register(self, line):
|
||||
"""Registers for a new account: "register <userid> <noupdate>"
|
||||
@@ -153,7 +154,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
pwd = getpass.getpass("Type a password for this user: ")
|
||||
pwd2 = getpass.getpass("Retype the password: ")
|
||||
if pwd != pwd2 or len(pwd) == 0:
|
||||
print "Password mismatch."
|
||||
print("Password mismatch.")
|
||||
pwd = None
|
||||
else:
|
||||
password = pwd
|
||||
@@ -174,12 +175,12 @@ class SynapseCmd(cmd.Cmd):
|
||||
# check the registration flows
|
||||
url = self._url() + "/register"
|
||||
json_res = yield self.http_client.do_request("GET", url)
|
||||
print json.dumps(json_res, indent=4)
|
||||
print(json.dumps(json_res, indent=4))
|
||||
|
||||
passwordFlow = None
|
||||
for flow in json_res["flows"]:
|
||||
if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
|
||||
print "Unable to register: Home server requires captcha."
|
||||
print("Unable to register: Home server requires captcha.")
|
||||
return
|
||||
if flow["type"] == "m.login.password" and "stages" not in flow:
|
||||
passwordFlow = flow
|
||||
@@ -189,7 +190,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
return
|
||||
|
||||
json_res = yield self.http_client.do_request("POST", url, data=data)
|
||||
print json.dumps(json_res, indent=4)
|
||||
print(json.dumps(json_res, indent=4))
|
||||
if update_config and "user_id" in json_res:
|
||||
self.config["user"] = json_res["user_id"]
|
||||
self.config["token"] = json_res["access_token"]
|
||||
@@ -215,7 +216,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
reactor.callFromThread(self._do_login, user, p)
|
||||
#print " got %s " % p
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_login(self, user, password):
|
||||
@@ -227,13 +228,13 @@ class SynapseCmd(cmd.Cmd):
|
||||
}
|
||||
url = self._url() + path
|
||||
json_res = yield self.http_client.do_request("POST", url, data=data)
|
||||
print json_res
|
||||
print(json_res)
|
||||
|
||||
if "access_token" in json_res:
|
||||
self.config["user"] = user
|
||||
self.config["token"] = json_res["access_token"]
|
||||
save_config(self.config)
|
||||
print "Login successful."
|
||||
print("Login successful.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _check_can_login(self):
|
||||
@@ -242,10 +243,10 @@ class SynapseCmd(cmd.Cmd):
|
||||
# submitting!
|
||||
url = self._url() + path
|
||||
json_res = yield self.http_client.do_request("GET", url)
|
||||
print json_res
|
||||
print(json_res)
|
||||
|
||||
if "flows" not in json_res:
|
||||
print "Failed to find any login flows."
|
||||
print("Failed to find any login flows.")
|
||||
defer.returnValue(False)
|
||||
|
||||
flow = json_res["flows"][0] # assume first is the one we want.
|
||||
@@ -275,9 +276,9 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
|
||||
headers={'Content-Type': ['application/x-www-form-urlencoded']})
|
||||
print json_res
|
||||
print(json_res)
|
||||
if 'sid' in json_res:
|
||||
print "Token sent. Your session ID is %s" % (json_res['sid'])
|
||||
print("Token sent. Your session ID is %s" % (json_res['sid']))
|
||||
|
||||
def do_emailvalidate(self, line):
|
||||
"""Validate and associate a third party ID
|
||||
@@ -297,7 +298,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
|
||||
headers={'Content-Type': ['application/x-www-form-urlencoded']})
|
||||
print json_res
|
||||
print(json_res)
|
||||
|
||||
def do_3pidbind(self, line):
|
||||
"""Validate and associate a third party ID
|
||||
@@ -317,7 +318,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
|
||||
headers={'Content-Type': ['application/x-www-form-urlencoded']})
|
||||
print json_res
|
||||
print(json_res)
|
||||
|
||||
def do_join(self, line):
|
||||
"""Joins a room: "join <roomid>" """
|
||||
@@ -325,7 +326,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
args = self._parse(line, ["roomid"], force_keys=True)
|
||||
self._do_membership_change(args["roomid"], "join", self._usr())
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def do_joinalias(self, line):
|
||||
try:
|
||||
@@ -333,7 +334,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
path = "/join/%s" % urllib.quote(args["roomname"])
|
||||
reactor.callFromThread(self._run_and_pprint, "POST", path, {})
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def do_topic(self, line):
|
||||
""""topic [set|get] <roomid> [<newtopic>]"
|
||||
@@ -343,17 +344,17 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
args = self._parse(line, ["action", "roomid", "topic"])
|
||||
if "action" not in args or "roomid" not in args:
|
||||
print "Must specify set|get and a room ID."
|
||||
print("Must specify set|get and a room ID.")
|
||||
return
|
||||
if args["action"].lower() not in ["set", "get"]:
|
||||
print "Must specify set|get, not %s" % args["action"]
|
||||
print("Must specify set|get, not %s" % args["action"])
|
||||
return
|
||||
|
||||
path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
|
||||
|
||||
if args["action"].lower() == "set":
|
||||
if "topic" not in args:
|
||||
print "Must specify a new topic."
|
||||
print("Must specify a new topic.")
|
||||
return
|
||||
body = {
|
||||
"topic": args["topic"]
|
||||
@@ -362,7 +363,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
elif args["action"].lower() == "get":
|
||||
reactor.callFromThread(self._run_and_pprint, "GET", path)
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def do_invite(self, line):
|
||||
"""Invite a user to a room: "invite <userid> <roomid>" """
|
||||
@@ -373,7 +374,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
|
||||
reactor.callFromThread(self._do_invite, args["roomid"], user_id)
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_invite(self, roomid, userstring):
|
||||
@@ -393,29 +394,29 @@ class SynapseCmd(cmd.Cmd):
|
||||
if 'public_key' in pubKeyObj:
|
||||
pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
|
||||
else:
|
||||
print "No public key found in pubkey response!"
|
||||
print("No public key found in pubkey response!")
|
||||
|
||||
sigValid = False
|
||||
|
||||
if pubKey:
|
||||
for signame in json_res['signatures']:
|
||||
if signame not in TRUSTED_ID_SERVERS:
|
||||
print "Ignoring signature from untrusted server %s" % (signame)
|
||||
print("Ignoring signature from untrusted server %s" % (signame))
|
||||
else:
|
||||
try:
|
||||
verify_signed_json(json_res, signame, pubKey)
|
||||
sigValid = True
|
||||
print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
|
||||
print("Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame))
|
||||
break
|
||||
except SignatureVerifyException as e:
|
||||
print "Invalid signature from %s" % (signame)
|
||||
print e
|
||||
print("Invalid signature from %s" % (signame))
|
||||
print(e)
|
||||
|
||||
if sigValid:
|
||||
print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
|
||||
print("Resolved 3pid %s to %s" % (userstring, json_res['mxid']))
|
||||
mxid = json_res['mxid']
|
||||
else:
|
||||
print "Got association for %s but couldn't verify signature" % (userstring)
|
||||
print("Got association for %s but couldn't verify signature" % (userstring))
|
||||
|
||||
if not mxid:
|
||||
mxid = "@" + userstring + ":" + self._domain()
|
||||
@@ -428,7 +429,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
args = self._parse(line, ["roomid"], force_keys=True)
|
||||
self._do_membership_change(args["roomid"], "leave", self._usr())
|
||||
except Exception as e:
|
||||
print e
|
||||
print(e)
|
||||
|
||||
def do_send(self, line):
|
||||
"""Sends a message. "send <roomid> <body>" """
|
||||
@@ -453,10 +454,10 @@ class SynapseCmd(cmd.Cmd):
|
||||
"""
|
||||
args = self._parse(line, ["type", "roomid", "qp"])
|
||||
if not "type" in args or not "roomid" in args:
|
||||
print "Must specify type and room ID."
|
||||
print("Must specify type and room ID.")
|
||||
return
|
||||
if args["type"] not in ["members", "messages"]:
|
||||
print "Unrecognised type: %s" % args["type"]
|
||||
print("Unrecognised type: %s" % args["type"])
|
||||
return
|
||||
room_id = args["roomid"]
|
||||
path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
|
||||
@@ -468,7 +469,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
key_value = key_value_str.split("=")
|
||||
qp[key_value[0]] = key_value[1]
|
||||
except:
|
||||
print "Bad query param: %s" % key_value
|
||||
print("Bad query param: %s" % key_value)
|
||||
return
|
||||
|
||||
reactor.callFromThread(self._run_and_pprint, "GET", path,
|
||||
@@ -508,14 +509,14 @@ class SynapseCmd(cmd.Cmd):
|
||||
args = self._parse(line, ["method", "path", "data"])
|
||||
# sanity check
|
||||
if "method" not in args or "path" not in args:
|
||||
print "Must specify path and method."
|
||||
print("Must specify path and method.")
|
||||
return
|
||||
|
||||
args["method"] = args["method"].upper()
|
||||
valid_methods = ["PUT", "GET", "POST", "DELETE",
|
||||
"XPUT", "XGET", "XPOST", "XDELETE"]
|
||||
if args["method"] not in valid_methods:
|
||||
print "Unsupported method: %s" % args["method"]
|
||||
print("Unsupported method: %s" % args["method"])
|
||||
return
|
||||
|
||||
if "data" not in args:
|
||||
@@ -524,7 +525,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
args["data"] = json.loads(args["data"])
|
||||
except Exception as e:
|
||||
print "Data is not valid JSON. %s" % e
|
||||
print("Data is not valid JSON. %s" % e)
|
||||
return
|
||||
|
||||
qp = {"access_token": self._tok()}
|
||||
@@ -553,7 +554,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
try:
|
||||
timeout = int(args["timeout"])
|
||||
except ValueError:
|
||||
print "Timeout must be in milliseconds."
|
||||
print("Timeout must be in milliseconds.")
|
||||
return
|
||||
reactor.callFromThread(self._do_event_stream, timeout)
|
||||
|
||||
@@ -566,7 +567,7 @@ class SynapseCmd(cmd.Cmd):
|
||||
"timeout": str(timeout),
|
||||
"from": self.event_stream_token
|
||||
})
|
||||
print json.dumps(res, indent=4)
|
||||
print(json.dumps(res, indent=4))
|
||||
|
||||
if "chunk" in res:
|
||||
for event in res["chunk"]:
|
||||
@@ -669,9 +670,9 @@ class SynapseCmd(cmd.Cmd):
|
||||
data=data,
|
||||
qparams=query_params)
|
||||
if alt_text:
|
||||
print alt_text
|
||||
print(alt_text)
|
||||
else:
|
||||
print json.dumps(json_res, indent=4)
|
||||
print(json.dumps(json_res, indent=4))
|
||||
|
||||
|
||||
def save_config(config):
|
||||
@@ -680,16 +681,16 @@ def save_config(config):
|
||||
|
||||
|
||||
def main(server_url, identity_server_url, username, token, config_path):
|
||||
print "Synapse command line client"
|
||||
print "==========================="
|
||||
print "Server: %s" % server_url
|
||||
print "Type 'help' to get started."
|
||||
print "Close this console with CTRL+C then CTRL+D."
|
||||
print("Synapse command line client")
|
||||
print("===========================")
|
||||
print("Server: %s" % server_url)
|
||||
print("Type 'help' to get started.")
|
||||
print("Close this console with CTRL+C then CTRL+D.")
|
||||
if not username or not token:
|
||||
print "- 'register <username>' - Register an account"
|
||||
print "- 'stream' - Connect to the event stream"
|
||||
print "- 'create <roomid>' - Create a room"
|
||||
print "- 'send <roomid> <message>' - Send a message"
|
||||
print("- 'register <username>' - Register an account")
|
||||
print("- 'stream' - Connect to the event stream")
|
||||
print("- 'create <roomid>' - Create a room")
|
||||
print("- 'send <roomid> <message>' - Send a message")
|
||||
http_client = TwistedHttpClient()
|
||||
|
||||
# the command line client
|
||||
@@ -705,7 +706,7 @@ def main(server_url, identity_server_url, username, token, config_path):
|
||||
http_client.verbose = "on" == syn_cmd.config["verbose"]
|
||||
except:
|
||||
pass
|
||||
print "Loaded config from %s" % config_path
|
||||
print("Loaded config from %s" % config_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -736,7 +737,7 @@ if __name__ == '__main__':
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.server:
|
||||
print "You must supply a server URL to communicate with."
|
||||
print("You must supply a server URL to communicate with.")
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
from twisted.web.client import Agent, readBody
|
||||
from twisted.web.http_headers import Headers
|
||||
from twisted.internet import defer, reactor
|
||||
@@ -141,15 +142,15 @@ class TwistedHttpClient(HttpClient):
|
||||
headers_dict["User-Agent"] = ["Synapse Cmd Client"]
|
||||
|
||||
retries_left = 5
|
||||
print "%s to %s with headers %s" % (method, url, headers_dict)
|
||||
print("%s to %s with headers %s" % (method, url, headers_dict))
|
||||
if self.verbose and producer:
|
||||
if "password" in producer.data:
|
||||
temp = producer.data["password"]
|
||||
producer.data["password"] = "[REDACTED]"
|
||||
print json.dumps(producer.data, indent=4)
|
||||
print(json.dumps(producer.data, indent=4))
|
||||
producer.data["password"] = temp
|
||||
else:
|
||||
print json.dumps(producer.data, indent=4)
|
||||
print(json.dumps(producer.data, indent=4))
|
||||
|
||||
while True:
|
||||
try:
|
||||
@@ -161,7 +162,7 @@ class TwistedHttpClient(HttpClient):
|
||||
)
|
||||
break
|
||||
except Exception as e:
|
||||
print "uh oh: %s" % e
|
||||
print("uh oh: %s" % e)
|
||||
if retries_left:
|
||||
yield self.sleep(2 ** (5 - retries_left))
|
||||
retries_left -= 1
|
||||
@@ -169,8 +170,8 @@ class TwistedHttpClient(HttpClient):
|
||||
raise e
|
||||
|
||||
if self.verbose:
|
||||
print "Status %s %s" % (response.code, response.phrase)
|
||||
print pformat(list(response.headers.getAllRawHeaders()))
|
||||
print("Status %s %s" % (response.code, response.phrase))
|
||||
print(pformat(list(response.headers.getAllRawHeaders())))
|
||||
defer.returnValue(response)
|
||||
|
||||
def sleep(self, seconds):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import print_function
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -48,7 +49,7 @@ def make_graph(pdus, room, filename_prefix):
|
||||
c = colors.pop()
|
||||
color_map[o] = c
|
||||
except:
|
||||
print "Run out of colours!"
|
||||
print("Run out of colours!")
|
||||
color_map[o] = "black"
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
@@ -93,7 +94,7 @@ def make_graph(pdus, room, filename_prefix):
|
||||
end_name = make_name(i, o)
|
||||
|
||||
if end_name not in node_map:
|
||||
print "%s not in nodes" % end_name
|
||||
print("%s not in nodes" % end_name)
|
||||
continue
|
||||
|
||||
edge = pydot.Edge(node_map[start_name], node_map[end_name])
|
||||
|
||||
+11
-10
@@ -1,3 +1,4 @@
|
||||
from __future__ import print_function
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -26,19 +27,19 @@ from six import string_types
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
print("Reading lines")
|
||||
with open(file_name) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print "Read lines"
|
||||
print("Read lines")
|
||||
|
||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
||||
|
||||
print "Loaded events."
|
||||
print("Loaded events.")
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
print "Sorted events"
|
||||
print("Sorted events")
|
||||
|
||||
if limit:
|
||||
events = events[-int(limit):]
|
||||
@@ -55,7 +56,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||
content = content.replace("\n", "<br/>\n")
|
||||
|
||||
print content
|
||||
print(content)
|
||||
content = []
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
@@ -74,7 +75,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
|
||||
content = "<br/>\n".join(content)
|
||||
|
||||
print content
|
||||
print(content)
|
||||
|
||||
label = (
|
||||
"<"
|
||||
@@ -102,7 +103,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
print "Created Nodes"
|
||||
print("Created Nodes")
|
||||
|
||||
for event in events:
|
||||
for prev_id, _ in event.prev_events:
|
||||
@@ -120,15 +121,15 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
print "Created edges"
|
||||
print("Created edges")
|
||||
|
||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||
|
||||
print "Created Dot"
|
||||
print("Created Dot")
|
||||
|
||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||
|
||||
print "Created svg"
|
||||
print("Created svg")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
|
||||
@@ -8,8 +8,9 @@ we set the remote SDP at which point the stream ends. Our video never gets to
|
||||
the bridge.
|
||||
|
||||
Requires:
|
||||
npm install jquery jsdom
|
||||
npm install jquery jsdom
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import gevent
|
||||
import grequests
|
||||
@@ -51,7 +52,7 @@ class TrivialMatrixClient:
|
||||
req = grequests.get(url)
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "incoming from matrix",obj
|
||||
print("incoming from matrix",obj)
|
||||
if 'end' not in obj:
|
||||
continue
|
||||
self.token = obj['end']
|
||||
@@ -60,22 +61,22 @@ class TrivialMatrixClient:
|
||||
|
||||
def joinRoom(self, roomId):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
|
||||
print url
|
||||
print(url)
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data='{}')
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
print("response: ",obj)
|
||||
|
||||
def sendEvent(self, roomId, evType, event):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
|
||||
print url
|
||||
print json.dumps(event)
|
||||
print(url)
|
||||
print(json.dumps(event))
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data=json.dumps(event))
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
print("response: ",obj)
|
||||
|
||||
|
||||
|
||||
@@ -85,31 +86,31 @@ xmppClients = {}
|
||||
def matrixLoop():
|
||||
while True:
|
||||
ev = matrixCli.getEvent()
|
||||
print ev
|
||||
print(ev)
|
||||
if ev['type'] == 'm.room.member':
|
||||
print 'membership event'
|
||||
print('membership event')
|
||||
if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
|
||||
roomId = ev['room_id']
|
||||
print "joining room %s" % (roomId)
|
||||
print("joining room %s" % (roomId))
|
||||
matrixCli.joinRoom(roomId)
|
||||
elif ev['type'] == 'm.room.message':
|
||||
if ev['room_id'] in xmppClients:
|
||||
print "already have a bridge for that user, ignoring"
|
||||
print("already have a bridge for that user, ignoring")
|
||||
continue
|
||||
print "got message, connecting"
|
||||
print("got message, connecting")
|
||||
xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.invite':
|
||||
print "Incoming call"
|
||||
print("Incoming call")
|
||||
#sdp = ev['content']['offer']['sdp']
|
||||
#print "sdp: %s" % (sdp)
|
||||
#xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
#gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.answer':
|
||||
print "Call answered"
|
||||
print("Call answered")
|
||||
sdp = ev['content']['answer']['sdp']
|
||||
if ev['room_id'] not in xmppClients:
|
||||
print "We didn't have a call for that room"
|
||||
print("We didn't have a call for that room")
|
||||
continue
|
||||
# should probably check call ID too
|
||||
xmppCli = xmppClients[ev['room_id']]
|
||||
@@ -146,7 +147,7 @@ class TrivialXmppClient:
|
||||
return obj
|
||||
|
||||
def sendAnswer(self, answer):
|
||||
print "sdp from matrix client",answer
|
||||
print("sdp from matrix client",answer)
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
jingle, out_err = p.communicate(answer)
|
||||
jingle = jingle % {
|
||||
@@ -156,28 +157,28 @@ class TrivialXmppClient:
|
||||
'responder': self.jid,
|
||||
'sid': self.callsid
|
||||
}
|
||||
print "answer jingle from sdp",jingle
|
||||
print("answer jingle from sdp",jingle)
|
||||
res = self.sendIq(jingle)
|
||||
print "reply from answer: ",res
|
||||
print("reply from answer: ",res)
|
||||
|
||||
self.ssrcs = {}
|
||||
jingleSoup = BeautifulSoup(jingle)
|
||||
for cont in jingleSoup.iq.jingle.findAll('content'):
|
||||
if cont.description:
|
||||
self.ssrcs[cont['name']] = cont.description['ssrc']
|
||||
print "my ssrcs:",self.ssrcs
|
||||
print("my ssrcs:",self.ssrcs)
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(self.advertiseSsrcs)
|
||||
])
|
||||
|
||||
def advertiseSsrcs(self):
|
||||
time.sleep(7)
|
||||
print "SSRC spammer started"
|
||||
time.sleep(7)
|
||||
print("SSRC spammer started")
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print "reply from ssrc announce: ",res
|
||||
print("reply from ssrc announce: ",res)
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
@@ -186,19 +187,19 @@ class TrivialXmppClient:
|
||||
self.matrixCallId = time.time()
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
|
||||
|
||||
print res
|
||||
print(res)
|
||||
self.sid = res.body['sid']
|
||||
print "sid %s" % (self.sid)
|
||||
print("sid %s" % (self.sid))
|
||||
|
||||
res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
|
||||
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
|
||||
|
||||
res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
|
||||
print res
|
||||
print(res)
|
||||
|
||||
self.jid = res.body.iq.bind.jid.string
|
||||
print "jid: %s" % (self.jid)
|
||||
print("jid: %s" % (self.jid))
|
||||
self.shortJid = self.jid.split('-')[0]
|
||||
|
||||
res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
|
||||
@@ -217,13 +218,13 @@ class TrivialXmppClient:
|
||||
if p.c and p.c.nick:
|
||||
u['nick'] = p.c.nick.string
|
||||
self.muc['users'].append(u)
|
||||
print "muc: ",self.muc
|
||||
print("muc: ",self.muc)
|
||||
|
||||
# wait for stuff
|
||||
while True:
|
||||
print "waiting..."
|
||||
print("waiting...")
|
||||
res = self.sendIq("")
|
||||
print "got from stream: ",res
|
||||
print("got from stream: ",res)
|
||||
if res.body.iq:
|
||||
jingles = res.body.iq.findAll('jingle')
|
||||
if len(jingles):
|
||||
@@ -232,15 +233,15 @@ class TrivialXmppClient:
|
||||
elif 'type' in res.body and res.body['type'] == 'terminate':
|
||||
self.running = False
|
||||
del xmppClients[self.matrixRoom]
|
||||
return
|
||||
return
|
||||
|
||||
def handleInvite(self, jingle):
|
||||
self.initiator = jingle['initiator']
|
||||
self.callsid = jingle['sid']
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
print "raw jingle invite",str(jingle)
|
||||
print("raw jingle invite",str(jingle))
|
||||
sdp, out_err = p.communicate(str(jingle))
|
||||
print "transformed remote offer sdp",sdp
|
||||
print("transformed remote offer sdp",sdp)
|
||||
inviteEvent = {
|
||||
'offer': {
|
||||
'type': 'offer',
|
||||
@@ -252,7 +253,7 @@ class TrivialXmppClient:
|
||||
}
|
||||
matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
|
||||
|
||||
matrixCli = TrivialMatrixClient(ACCESS_TOKEN)
|
||||
matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(matrixLoop)
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
from argparse import ArgumentParser
|
||||
import json
|
||||
import requests
|
||||
import sys
|
||||
import urllib
|
||||
|
||||
try:
|
||||
raw_input
|
||||
except NameError: # Python 3
|
||||
raw_input = input
|
||||
|
||||
def _mkurl(template, kws):
|
||||
for key in kws:
|
||||
template = template.replace(key, kws[key])
|
||||
@@ -13,7 +19,7 @@ def _mkurl(template, kws):
|
||||
def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
if not why:
|
||||
why = "Automated kick."
|
||||
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
|
||||
print("Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix))
|
||||
room_state_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
|
||||
{
|
||||
@@ -22,13 +28,13 @@ def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
"$TOKEN": access_token
|
||||
}
|
||||
)
|
||||
print "Getting room state => %s" % room_state_url
|
||||
print("Getting room state => %s" % room_state_url)
|
||||
res = requests.get(room_state_url)
|
||||
print "HTTP %s" % res.status_code
|
||||
print("HTTP %s" % res.status_code)
|
||||
state_events = res.json()
|
||||
if "error" in state_events:
|
||||
print "FATAL"
|
||||
print state_events
|
||||
print("FATAL")
|
||||
print(state_events)
|
||||
return
|
||||
|
||||
kick_list = []
|
||||
@@ -44,15 +50,15 @@ def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
kick_list.append(event["state_key"])
|
||||
|
||||
if len(kick_list) == 0:
|
||||
print "No user IDs match the prefix '%s'" % user_id_prefix
|
||||
print("No user IDs match the prefix '%s'" % user_id_prefix)
|
||||
return
|
||||
|
||||
print "The following user IDs will be kicked from %s" % room_name
|
||||
print("The following user IDs will be kicked from %s" % room_name)
|
||||
for uid in kick_list:
|
||||
print uid
|
||||
print(uid)
|
||||
doit = raw_input("Continue? [Y]es\n")
|
||||
if len(doit) > 0 and doit.lower() == 'y':
|
||||
print "Kicking members..."
|
||||
print("Kicking members...")
|
||||
# encode them all
|
||||
kick_list = [urllib.quote(uid) for uid in kick_list]
|
||||
for uid in kick_list:
|
||||
@@ -69,14 +75,14 @@ def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
"membership": "leave",
|
||||
"reason": why
|
||||
}
|
||||
print "Kicking %s" % uid
|
||||
print("Kicking %s" % uid)
|
||||
res = requests.put(kick_url, data=json.dumps(kick_body))
|
||||
if res.status_code != 200:
|
||||
print "ERROR: HTTP %s" % res.status_code
|
||||
print("ERROR: HTTP %s" % res.status_code)
|
||||
if res.json().get("error"):
|
||||
print "ERROR: JSON %s" % res.json()
|
||||
|
||||
|
||||
print("ERROR: JSON %s" % res.json())
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
|
||||
|
||||
Vendored
+6
@@ -1,3 +1,9 @@
|
||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.0.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jun 2019 17:09:53 +0100
|
||||
|
||||
matrix-synapse-py3 (0.99.5.2) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.5.2.
|
||||
|
||||
+6
-2
@@ -1,9 +1,13 @@
|
||||
DO NOT USE THESE DEMO SERVERS IN PRODUCTION
|
||||
|
||||
Requires you to have done:
|
||||
python setup.py develop
|
||||
|
||||
|
||||
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
|
||||
It will also start a web server on port 8000 pointed at the webclient.
|
||||
The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
|
||||
|
||||
To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
|
||||
and are configured in a highly insecure way. Do not use these configuration files in production.
|
||||
|
||||
stop.sh will stop the synapse servers and the webclient.
|
||||
|
||||
|
||||
+66
-4
@@ -21,14 +21,76 @@ for port in 8080 8081 8082; do
|
||||
pushd demo/$port
|
||||
|
||||
#rm $DIR/etc/$port.config
|
||||
python -m synapse.app.homeserver \
|
||||
python3 -m synapse.app.homeserver \
|
||||
--generate-config \
|
||||
-H "localhost:$https_port" \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
--report-stats no
|
||||
|
||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||
if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then
|
||||
printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
|
||||
|
||||
echo 'enable_registration: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
|
||||
# accidentaly bork me with your fancy settings.
|
||||
listeners=$(cat <<-PORTLISTENERS
|
||||
# Configure server to listen on both $https_port and $port
|
||||
# This overides some of the default settings above
|
||||
listeners:
|
||||
- port: $https_port
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
|
||||
- port: $port
|
||||
tls: false
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: http
|
||||
x_forwarded: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
compress: false
|
||||
PORTLISTENERS
|
||||
)
|
||||
echo "${listeners}" >> $DIR/etc/$port.config
|
||||
|
||||
# Disable tls for the servers
|
||||
printf '\n\n# Disable tls on the servers.' >> $DIR/etc/$port.config
|
||||
echo '# DO NOT USE IN PRODUCTION' >> $DIR/etc/$port.config
|
||||
echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' >> $DIR/etc/$port.config
|
||||
echo 'federation_verify_certificates: false' >> $DIR/etc/$port.config
|
||||
|
||||
# Set tls paths
|
||||
echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" >> $DIR/etc/$port.config
|
||||
echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" >> $DIR/etc/$port.config
|
||||
|
||||
# Generate tls keys
|
||||
openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
|
||||
|
||||
# Ignore keys from the trusted keys server
|
||||
echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
|
||||
echo 'trusted_key_servers:' >> $DIR/etc/$port.config
|
||||
echo ' - server_name: "matrix.org"' >> $DIR/etc/$port.config
|
||||
echo ' accept_keys_insecurely: true' >> $DIR/etc/$port.config
|
||||
|
||||
# Reduce the blacklist
|
||||
blacklist=$(cat <<-BLACK
|
||||
# Set the blacklist so that it doesn't include 127.0.0.1
|
||||
federation_ip_range_blacklist:
|
||||
- '10.0.0.0/8'
|
||||
- '172.16.0.0/12'
|
||||
- '192.168.0.0/16'
|
||||
- '100.64.0.0/10'
|
||||
- '169.254.0.0/16'
|
||||
- '::1/128'
|
||||
- 'fe80::/64'
|
||||
- 'fc00::/7'
|
||||
BLACK
|
||||
)
|
||||
echo "${blacklist}" >> $DIR/etc/$port.config
|
||||
fi
|
||||
|
||||
# Check script parameters
|
||||
if [ $# -eq 1 ]; then
|
||||
@@ -55,7 +117,7 @@ for port in 8080 8081 8082; do
|
||||
echo "report_stats: false" >> $DIR/etc/$port.config
|
||||
fi
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
python3 -m synapse.app.homeserver \
|
||||
--config-path "$DIR/etc/$port.config" \
|
||||
-D \
|
||||
-vv \
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
FROM matrixdotorg/sytest:latest
|
||||
|
||||
# The Sytest image doesn't come with python, so install that
|
||||
RUN apt-get -qq install -y python python-dev python-pip
|
||||
RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
|
||||
|
||||
# We need tox to run the tests in run_pg_tests.sh
|
||||
RUN pip install tox
|
||||
RUN python3 -m pip install tox
|
||||
|
||||
ADD run_pg_tests.sh /pg_tests.sh
|
||||
ENTRYPOINT /pg_tests.sh
|
||||
|
||||
+1
-1
@@ -14,7 +14,7 @@ This image is designed to run either with an automatically generated
|
||||
configuration file or with a custom configuration that requires manual editing.
|
||||
|
||||
An easy way to make use of this image is via docker-compose. See the
|
||||
[contrib/docker](../contrib/docker) section of the synapse project for
|
||||
[contrib/docker](https://github.com/matrix-org/synapse/tree/master/contrib/docker) section of the synapse project for
|
||||
examples.
|
||||
|
||||
### Without Compose (harder)
|
||||
|
||||
@@ -17,4 +17,4 @@ su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start'
|
||||
# Run the tests
|
||||
cd /src
|
||||
export TRIAL_FLAGS="-j 4"
|
||||
tox --workdir=/tmp -e py27-postgres
|
||||
tox --workdir=/tmp -e py35-postgres
|
||||
|
||||
@@ -1,5 +1,22 @@
|
||||
# MSC1711 Certificates FAQ
|
||||
|
||||
## Historical Note
|
||||
This document was originally written to guide server admins through the upgrade
|
||||
path towards Synapse 1.0. Specifically,
|
||||
[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)
|
||||
required that all servers present valid TLS certificates on their federation
|
||||
API. Admins were encouraged to achieve compliance from version 0.99.0 (released
|
||||
in February 2019) ahead of version 1.0 (released June 2019) enforcing the
|
||||
certificate checks.
|
||||
|
||||
Much of what follows is now outdated since most admins will have already
|
||||
upgraded, however it may be of use to those with old installs returning to the
|
||||
project.
|
||||
|
||||
If you are setting up a server from scratch you almost certainly should look at
|
||||
the [installation guide](INSTALL.md) instead.
|
||||
|
||||
## Introduction
|
||||
The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It
|
||||
supports the r0.1 release of the server to server specification, but is
|
||||
compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well
|
||||
|
||||
+74
-3
@@ -14,9 +14,9 @@ up and will work provided you set the ``server_name`` to match your
|
||||
machine's public DNS hostname, and provide Synapse with a TLS certificate
|
||||
which is valid for your ``server_name``.
|
||||
|
||||
Once you have completed the steps necessary to federate, you should be able to
|
||||
join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a
|
||||
room for Synapse admins.)
|
||||
Once federation has been configured, you should be able to join a room over
|
||||
federation. A good place to start is ``#synapse:matrix.org`` - a room for
|
||||
Synapse admins.
|
||||
|
||||
|
||||
## Delegation
|
||||
@@ -98,6 +98,77 @@ _matrix._tcp.<server_name>``. In our example, we would expect this:
|
||||
Note that the target of a SRV record cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
### Delegation FAQ
|
||||
#### When do I need a SRV record or .well-known URI?
|
||||
|
||||
If your homeserver listens on the default federation port (8448), and your
|
||||
`server_name` points to the host that your homeserver runs on, you do not need an SRV
|
||||
record or `.well-known/matrix/server` URI.
|
||||
|
||||
For instance, if you registered `example.com` and pointed its DNS A record at a
|
||||
fresh server, you could install Synapse on that host,
|
||||
giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled,
|
||||
it would automatically generate a valid TLS certificate for you via Let's Encrypt
|
||||
and no SRV record or .well-known URI would be needed.
|
||||
|
||||
This is the common case, although you can add an SRV record or
|
||||
`.well-known/matrix/server` URI for completeness if you wish.
|
||||
|
||||
**However**, if your server does not listen on port 8448, or if your `server_name`
|
||||
does not point to the host that your homeserver runs on, you will need to let
|
||||
other servers know how to find it. The way to do this is via .well-known or an
|
||||
SRV record.
|
||||
|
||||
#### I have created a .well-known URI. Do I still need an SRV record?
|
||||
|
||||
As of Synapse 0.99, Synapse will first check for the existence of a .well-known
|
||||
URI and follow any delegation it suggests. It will only then check for the
|
||||
existence of an SRV record.
|
||||
|
||||
That means that the SRV record will often be redundant. However, you should
|
||||
remember that there may still be older versions of Synapse in the federation
|
||||
which do not understand .well-known URIs, so if you removed your SRV record
|
||||
you would no longer be able to federate with them.
|
||||
|
||||
It is therefore best to leave the SRV record in place for now. Synapse 0.34 and
|
||||
earlier will follow the SRV record (and not care about the invalid
|
||||
certificate). Synapse 0.99 and later will follow the .well-known URI, with the
|
||||
correct certificate chain.
|
||||
|
||||
#### Can I manage my own certificates rather than having Synapse renew certificates itself?
|
||||
|
||||
Yes, you are welcome to manage your certificates yourself. Synapse will only
|
||||
attempt to obtain certificates from Let's Encrypt if you configure it to do
|
||||
so.The only requirement is that there is a valid TLS cert present for
|
||||
federation end points.
|
||||
|
||||
#### Do you still recommend against using a reverse proxy on the federation port?
|
||||
|
||||
We no longer actively recommend against using a reverse proxy. Many admins will
|
||||
find it easier to direct federation traffic to a reverse proxy and manage their
|
||||
own TLS certificates, and this is a supported configuration.
|
||||
|
||||
See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a
|
||||
reverse proxy.
|
||||
|
||||
#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?
|
||||
|
||||
Practically speaking, this is no longer necessary.
|
||||
|
||||
If you are using a reverse proxy for all of your TLS traffic, then you can set
|
||||
`no_tls: True` in the Synapse config. In that case, the only reason Synapse
|
||||
needs the certificate is to populate a legacy `tls_fingerprints` field in the
|
||||
federation API. This is ignored by Synapse 0.99.0 and later, and the only time
|
||||
pre-0.99 Synapses will check it is when attempting to fetch the server keys -
|
||||
and generally this is delegated via `matrix.org`, which will be running a modern
|
||||
version of Synapse.
|
||||
|
||||
#### Do I need the same certificate for the client and federation port?
|
||||
|
||||
No. There is nothing stopping you from using different certificates,
|
||||
particularly if you are using a reverse proxy. However, Synapse will use the
|
||||
same certificate on any ports where TLS is configured.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
You can use the [federation tester](
|
||||
|
||||
+2
-2
@@ -1,7 +1,7 @@
|
||||
Using Postgres
|
||||
--------------
|
||||
|
||||
Postgres version 9.4 or later is known to work.
|
||||
Postgres version 9.5 or later is known to work.
|
||||
|
||||
Install postgres client libraries
|
||||
=================================
|
||||
@@ -16,7 +16,7 @@ a postgres database.
|
||||
* For other pre-built packages, please consult the documentation from the
|
||||
relevant package.
|
||||
|
||||
* If you installed synapse `in a virtualenv
|
||||
* If you installed synapse `in a virtualenv
|
||||
<../INSTALL.md#installing-from-source>`_, you can install the library with::
|
||||
|
||||
~/synapse/env/bin/pip install matrix-synapse[postgres]
|
||||
|
||||
@@ -21,7 +21,8 @@ import argparse
|
||||
import base64
|
||||
import json
|
||||
import sys
|
||||
from urlparse import urlparse, urlunparse
|
||||
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
import nacl.signing
|
||||
import requests
|
||||
@@ -145,7 +146,7 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||
|
||||
for key, sig in signed_json["signatures"][origin_name].items():
|
||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig)
|
||||
authorization_headers.append(bytes(header))
|
||||
authorization_headers.append(header.encode("ascii"))
|
||||
print("Authorization: %s" % header, file=sys.stderr)
|
||||
|
||||
dest = "matrix://%s%s" % (destination, path)
|
||||
@@ -250,7 +251,7 @@ def read_args_from_config(args):
|
||||
|
||||
class MatrixConnectionAdapter(HTTPAdapter):
|
||||
@staticmethod
|
||||
def lookup(s):
|
||||
def lookup(s, skip_well_known=False):
|
||||
if s[-1] == ']':
|
||||
# ipv6 literal (with no port)
|
||||
return s, 8448
|
||||
@@ -263,19 +264,51 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||
raise ValueError("Invalid host:port '%s'" % s)
|
||||
return out[0], port
|
||||
|
||||
# try a .well-known lookup
|
||||
if not skip_well_known:
|
||||
well_known = MatrixConnectionAdapter.get_well_known(s)
|
||||
if well_known:
|
||||
return MatrixConnectionAdapter.lookup(
|
||||
well_known, skip_well_known=True
|
||||
)
|
||||
|
||||
try:
|
||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||
return srv.host, srv.port
|
||||
except Exception:
|
||||
return s, 8448
|
||||
|
||||
@staticmethod
|
||||
def get_well_known(server_name):
|
||||
uri = "https://%s/.well-known/matrix/server" % (server_name, )
|
||||
print("fetching %s" % (uri, ), file=sys.stderr)
|
||||
|
||||
try:
|
||||
resp = requests.get(uri)
|
||||
if resp.status_code != 200:
|
||||
print("%s gave %i" % (uri, resp.status_code), file=sys.stderr)
|
||||
return None
|
||||
|
||||
parsed_well_known = resp.json()
|
||||
if not isinstance(parsed_well_known, dict):
|
||||
raise Exception("not a dict")
|
||||
if "m.server" not in parsed_well_known:
|
||||
raise Exception("Missing key 'm.server'")
|
||||
new_name = parsed_well_known['m.server']
|
||||
print("well-known lookup gave %s" % (new_name, ), file=sys.stderr)
|
||||
return new_name
|
||||
|
||||
except Exception as e:
|
||||
print("Invalid response from %s: %s" % (uri, e, ), file=sys.stderr)
|
||||
return None
|
||||
|
||||
def get_connection(self, url, proxies=None):
|
||||
parsed = urlparse(url)
|
||||
parsed = urlparse.urlparse(url)
|
||||
|
||||
(host, port) = self.lookup(parsed.netloc)
|
||||
netloc = "%s:%d" % (host, port)
|
||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||
url = urlunparse(
|
||||
url = urlparse.urlunparse(
|
||||
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
|
||||
)
|
||||
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from signedjson.key import write_signing_keys, generate_signing_key
|
||||
from signedjson.key import generate_signing_key, write_signing_keys
|
||||
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ BOOLEAN_COLUMNS = {
|
||||
"group_roles": ["is_public"],
|
||||
"local_group_membership": ["is_publicised", "is_admin"],
|
||||
"e2e_room_keys": ["is_verified"],
|
||||
"account_validity": ["email_sent"],
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -102,6 +102,16 @@ setup(
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
long_description=long_description,
|
||||
python_requires='~=3.5',
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Topic :: Communications :: Chat',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Programming Language :: Python :: 3 :: Only',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
],
|
||||
scripts=["synctl"] + glob.glob("scripts/*"),
|
||||
cmdclass={'test': TestCommand},
|
||||
)
|
||||
|
||||
+8
-1
@@ -17,6 +17,13 @@
|
||||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 5):
|
||||
print("Synapse requires Python 3.5 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
from twisted.internet import protocol
|
||||
from twisted.internet.protocol import Factory
|
||||
@@ -27,4 +34,4 @@ try:
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.0.0rc3"
|
||||
__version__ = "1.0.0"
|
||||
|
||||
+13
-2
@@ -184,11 +184,22 @@ class Auth(object):
|
||||
return event_auth.get_public_keys(invite_event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_user_by_req(self, request, allow_guest=False, rights="access"):
|
||||
def get_user_by_req(
|
||||
self,
|
||||
request,
|
||||
allow_guest=False,
|
||||
rights="access",
|
||||
allow_expired=False,
|
||||
):
|
||||
""" Get a registered user's ID.
|
||||
|
||||
Args:
|
||||
request - An HTTP request with an access_token query parameter.
|
||||
allow_expired - Whether to allow the request through even if the account is
|
||||
expired. If true, Synapse will still require an access token to be
|
||||
provided but won't check if the account it belongs to has expired. This
|
||||
works thanks to /login delivering access tokens regardless of accounts'
|
||||
expiration.
|
||||
Returns:
|
||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||
Raises:
|
||||
@@ -229,7 +240,7 @@ class Auth(object):
|
||||
is_guest = user_info["is_guest"]
|
||||
|
||||
# Deny the request if the user account has expired.
|
||||
if self._account_validity.enabled:
|
||||
if self._account_validity.enabled and not allow_expired:
|
||||
user_id = user.to_string()
|
||||
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
|
||||
if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:
|
||||
|
||||
@@ -176,7 +176,6 @@ class SynapseHomeServer(HomeServer):
|
||||
|
||||
resources.update({
|
||||
"/_matrix/client/api/v1": client_resource,
|
||||
"/_synapse/password_reset": client_resource,
|
||||
"/_matrix/client/r0": client_resource,
|
||||
"/_matrix/client/unstable": client_resource,
|
||||
"/_matrix/client/v2_alpha": client_resource,
|
||||
@@ -541,6 +540,7 @@ def run(hs):
|
||||
stats["total_room_count"] = room_count
|
||||
|
||||
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
|
||||
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
|
||||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
|
||||
@@ -19,15 +19,12 @@ from __future__ import print_function
|
||||
|
||||
# This file can't be called email.py because if it is, we cannot:
|
||||
import email.utils
|
||||
import logging
|
||||
import os
|
||||
|
||||
import pkg_resources
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmailConfig(Config):
|
||||
def read_config(self, config):
|
||||
@@ -85,10 +82,12 @@ class EmailConfig(Config):
|
||||
self.email_password_reset_behaviour = (
|
||||
"remote" if email_trust_identity_server_for_password_resets else "local"
|
||||
)
|
||||
self.password_resets_were_disabled_due_to_email_config = False
|
||||
if self.email_password_reset_behaviour == "local" and email_config == {}:
|
||||
logger.warn(
|
||||
"User password resets have been disabled due to lack of email config"
|
||||
)
|
||||
# We cannot warn the user this has happened here
|
||||
# Instead do so when a user attempts to reset their password
|
||||
self.password_resets_were_disabled_due_to_email_config = True
|
||||
|
||||
self.email_password_reset_behaviour = "off"
|
||||
|
||||
# Get lifetime of a validation token in milliseconds
|
||||
|
||||
@@ -17,8 +17,8 @@ from twisted.internet import defer
|
||||
|
||||
|
||||
class ThirdPartyEventRules(object):
|
||||
"""Allows server admins to provide a Python module implementing an extra set of rules
|
||||
to apply when processing events.
|
||||
"""Allows server admins to provide a Python module implementing an extra
|
||||
set of rules to apply when processing events.
|
||||
|
||||
This is designed to help admins of closed federations with enforcing custom
|
||||
behaviours.
|
||||
@@ -35,7 +35,10 @@ class ThirdPartyEventRules(object):
|
||||
module, config = hs.config.third_party_event_rules
|
||||
|
||||
if module is not None:
|
||||
self.third_party_rules = module(config=config)
|
||||
self.third_party_rules = module(
|
||||
config=config,
|
||||
http_client=hs.get_simple_http_client(),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_event_allowed(self, event, context):
|
||||
@@ -46,7 +49,7 @@ class ThirdPartyEventRules(object):
|
||||
context (synapse.events.snapshot.EventContext): The context of the event.
|
||||
|
||||
Returns:
|
||||
defer.Deferred(bool), True if the event should be allowed, False if not.
|
||||
defer.Deferred[bool]: True if the event should be allowed, False if not.
|
||||
"""
|
||||
if self.third_party_rules is None:
|
||||
defer.returnValue(True)
|
||||
@@ -60,3 +63,52 @@ class ThirdPartyEventRules(object):
|
||||
|
||||
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_create_room(self, requester, config, is_requester_admin):
|
||||
"""Intercept requests to create room to allow, deny or update the
|
||||
request config.
|
||||
|
||||
Args:
|
||||
requester (Requester)
|
||||
config (dict): The creation config from the client.
|
||||
is_requester_admin (bool): If the requester is an admin
|
||||
|
||||
Returns:
|
||||
defer.Deferred
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
return
|
||||
|
||||
yield self.third_party_rules.on_create_room(
|
||||
requester, config, is_requester_admin
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_threepid_can_be_invited(self, medium, address, room_id):
|
||||
"""Check if a provided 3PID can be invited in the given room.
|
||||
|
||||
Args:
|
||||
medium (str): The 3PID's medium.
|
||||
address (str): The 3PID's address.
|
||||
room_id (str): The room we want to invite the threepid to.
|
||||
|
||||
Returns:
|
||||
defer.Deferred[bool], True if the 3PID can be invited, False if not.
|
||||
"""
|
||||
|
||||
if self.third_party_rules is None:
|
||||
defer.returnValue(True)
|
||||
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(room_id)
|
||||
room_state_events = yield self.store.get_events(state_ids.values())
|
||||
|
||||
state_events = {}
|
||||
for key, event_id in state_ids.items():
|
||||
state_events[key] = room_state_events[event_id]
|
||||
|
||||
ret = yield self.third_party_rules.check_threepid_can_be_invited(
|
||||
medium, address, state_events,
|
||||
)
|
||||
defer.returnValue(ret)
|
||||
|
||||
@@ -189,11 +189,21 @@ class PerDestinationQueue(object):
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
device_message_edus, device_stream_id, dev_list_id = (
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2)
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = (
|
||||
yield self._get_device_update_edus(limit)
|
||||
)
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
to_device_edus, device_stream_id = (
|
||||
yield self._get_to_device_message_edus(limit)
|
||||
)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
# BEGIN CRITICAL SECTION
|
||||
#
|
||||
# In order to avoid a race condition, we need to make sure that
|
||||
@@ -208,10 +218,6 @@ class PerDestinationQueue(object):
|
||||
# We can only include at most 50 PDUs per transactions
|
||||
pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
|
||||
|
||||
pending_edus = []
|
||||
|
||||
# We can only include at most 100 EDUs per transactions
|
||||
# rr_edus and pending_presence take at most one slot each
|
||||
pending_edus.extend(self._get_rr_edus(force_flush=False))
|
||||
pending_presence = self._pending_presence
|
||||
self._pending_presence = {}
|
||||
@@ -232,7 +238,6 @@ class PerDestinationQueue(object):
|
||||
)
|
||||
)
|
||||
|
||||
pending_edus.extend(device_message_edus)
|
||||
pending_edus.extend(
|
||||
self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
|
||||
)
|
||||
@@ -272,10 +277,13 @@ class PerDestinationQueue(object):
|
||||
sent_edus_by_type.labels(edu.edu_type).inc()
|
||||
# Remove the acknowledged device messages from the database
|
||||
# Only bother if we actually sent some device messages
|
||||
if device_message_edus:
|
||||
if to_device_edus:
|
||||
yield self._store.delete_device_msgs_for_remote(
|
||||
self._destination, device_stream_id
|
||||
)
|
||||
|
||||
# also mark the device updates as sent
|
||||
if device_update_edus:
|
||||
logger.info(
|
||||
"Marking as sent %r %r", self._destination, dev_list_id
|
||||
)
|
||||
@@ -347,7 +355,7 @@ class PerDestinationQueue(object):
|
||||
return pending_edus
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_new_device_messages(self, limit):
|
||||
def _get_device_update_edus(self, limit):
|
||||
last_device_list = self._last_device_list_stream_id
|
||||
|
||||
# Retrieve list of new device updates to send to the destination
|
||||
@@ -366,15 +374,19 @@ class PerDestinationQueue(object):
|
||||
|
||||
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
|
||||
|
||||
defer.returnValue((edus, now_stream_id))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_to_device_message_edus(self, limit):
|
||||
last_device_stream_id = self._last_device_stream_id
|
||||
to_device_stream_id = self._store.get_to_device_stream_token()
|
||||
contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
|
||||
self._destination,
|
||||
last_device_stream_id,
|
||||
to_device_stream_id,
|
||||
limit - len(edus),
|
||||
limit,
|
||||
)
|
||||
edus.extend(
|
||||
edus = [
|
||||
Edu(
|
||||
origin=self._server_name,
|
||||
destination=self._destination,
|
||||
@@ -382,6 +394,6 @@ class PerDestinationQueue(object):
|
||||
content=content,
|
||||
)
|
||||
for content in contents
|
||||
)
|
||||
]
|
||||
|
||||
defer.returnValue((edus, stream_id, now_stream_id))
|
||||
defer.returnValue((edus, stream_id))
|
||||
|
||||
@@ -42,7 +42,7 @@ from signedjson.sign import sign_json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import RequestSendFailed, SynapseError
|
||||
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import run_in_background
|
||||
@@ -132,9 +132,10 @@ class GroupAttestionRenewer(object):
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.attestations = hs.get_groups_attestation_signing()
|
||||
|
||||
self._renew_attestations_loop = self.clock.looping_call(
|
||||
self._start_renew_attestations, 30 * 60 * 1000,
|
||||
)
|
||||
if not hs.config.worker_app:
|
||||
self._renew_attestations_loop = self.clock.looping_call(
|
||||
self._start_renew_attestations, 30 * 60 * 1000,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_renew_attestation(self, group_id, user_id, content):
|
||||
@@ -194,7 +195,7 @@ class GroupAttestionRenewer(object):
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
except (RequestSendFailed, HttpResponseException) as e:
|
||||
logger.warning(
|
||||
"Failed to renew attestation of %r in %r: %s",
|
||||
user_id, group_id, e,
|
||||
|
||||
@@ -110,6 +110,9 @@ class AccountValidityHandler(object):
|
||||
# Stop right here if the user doesn't have at least one email address.
|
||||
# In this case, they will have to ask their server admin to renew their
|
||||
# account manually.
|
||||
# We don't need to do a specific check to make sure the account isn't
|
||||
# deactivated, as a deactivated account isn't supposed to have any
|
||||
# email address attached to it.
|
||||
if not addresses:
|
||||
return
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017, 2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -42,6 +43,8 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
# it left off (if it has work left to do).
|
||||
hs.get_reactor().callWhenRunning(self._start_user_parting)
|
||||
|
||||
self._account_validity_enabled = hs.config.account_validity.enabled
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deactivate_account(self, user_id, erase_data, id_server=None):
|
||||
"""Deactivate a user's account
|
||||
@@ -114,6 +117,13 @@ class DeactivateAccountHandler(BaseHandler):
|
||||
# parts users from rooms (if it isn't already running)
|
||||
self._start_user_parting()
|
||||
|
||||
# Remove all information on the user from the account_validity table.
|
||||
if self._account_validity_enabled:
|
||||
yield self.store.delete_account_validity_for_user(user_id)
|
||||
|
||||
# Mark the user as deactivated.
|
||||
yield self.store.set_user_deactivated_status(user_id, True)
|
||||
|
||||
defer.returnValue(identity_server_supports_unbinding)
|
||||
|
||||
def _start_user_parting(self):
|
||||
|
||||
@@ -2677,12 +2677,6 @@ class FederationHandler(BaseHandler):
|
||||
# though the sender isn't a local user.
|
||||
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
|
||||
|
||||
# XXX we send the invite here, but send_membership_event also sends it,
|
||||
# so we end up making two requests. I think this is redundant.
|
||||
returned_invite = yield self.send_invite(origin, event)
|
||||
# TODO: Make sure the signatures actually are correct.
|
||||
event.signatures.update(returned_invite.signatures)
|
||||
|
||||
member_handler = self.hs.get_room_member_handler()
|
||||
yield member_handler.send_membership_event(None, event, context)
|
||||
|
||||
@@ -2750,25 +2744,55 @@ class FederationHandler(BaseHandler):
|
||||
if not invite_event:
|
||||
raise AuthError(403, "Could not find invite")
|
||||
|
||||
logger.debug("Checking auth on event %r", event.content)
|
||||
|
||||
last_exception = None
|
||||
# for each public key in the 3pid invite event
|
||||
for public_key_object in self.hs.get_auth().get_public_keys(invite_event):
|
||||
try:
|
||||
# for each sig on the third_party_invite block of the actual invite
|
||||
for server, signature_block in signed["signatures"].items():
|
||||
for key_name, encoded_signature in signature_block.items():
|
||||
if not key_name.startswith("ed25519:"):
|
||||
continue
|
||||
|
||||
public_key = public_key_object["public_key"]
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
logger.debug(
|
||||
"Attempting to verify sig with key %s from %r "
|
||||
"against pubkey %r",
|
||||
key_name, server, public_key_object,
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
if "key_validity_url" in public_key_object:
|
||||
yield self._check_key_revocation(
|
||||
public_key,
|
||||
|
||||
try:
|
||||
public_key = public_key_object["public_key"]
|
||||
verify_key = decode_verify_key_bytes(
|
||||
key_name,
|
||||
decode_base64(public_key)
|
||||
)
|
||||
verify_signed_json(signed, server, verify_key)
|
||||
logger.debug(
|
||||
"Successfully verified sig with key %s from %r "
|
||||
"against pubkey %r",
|
||||
key_name, server, public_key_object,
|
||||
)
|
||||
except Exception:
|
||||
logger.info(
|
||||
"Failed to verify sig with key %s from %r "
|
||||
"against pubkey %r",
|
||||
key_name, server, public_key_object,
|
||||
)
|
||||
raise
|
||||
try:
|
||||
if "key_validity_url" in public_key_object:
|
||||
yield self._check_key_revocation(
|
||||
public_key,
|
||||
public_key_object["key_validity_url"]
|
||||
)
|
||||
except Exception:
|
||||
logger.info(
|
||||
"Failed to query key_validity_url %s",
|
||||
public_key_object["key_validity_url"]
|
||||
)
|
||||
raise
|
||||
return
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
|
||||
@@ -49,9 +49,7 @@ def _create_rerouter(func_name):
|
||||
def http_response_errback(failure):
|
||||
failure.trap(HttpResponseException)
|
||||
e = failure.value
|
||||
if e.code == 403:
|
||||
raise e.to_synapse_error()
|
||||
return failure
|
||||
raise e.to_synapse_error()
|
||||
|
||||
def request_failed_errback(failure):
|
||||
failure.trap(RequestSendFailed)
|
||||
|
||||
+16
-13
@@ -15,12 +15,15 @@
|
||||
|
||||
import logging
|
||||
|
||||
from six import raise_from
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
@@ -85,10 +88,10 @@ class BaseProfileHandler(BaseHandler):
|
||||
ignore_backoff=True,
|
||||
)
|
||||
defer.returnValue(result)
|
||||
except CodeMessageException as e:
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get displayname")
|
||||
raise
|
||||
except RequestSendFailed as e:
|
||||
raise_from(SynapseError(502, "Failed to fetch profile"), e)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_profile_from_cache(self, user_id):
|
||||
@@ -142,10 +145,10 @@ class BaseProfileHandler(BaseHandler):
|
||||
},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except CodeMessageException as e:
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get displayname")
|
||||
raise
|
||||
except RequestSendFailed as e:
|
||||
raise_from(SynapseError(502, "Failed to fetch profile"), e)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
defer.returnValue(result["displayname"])
|
||||
|
||||
@@ -208,10 +211,10 @@ class BaseProfileHandler(BaseHandler):
|
||||
},
|
||||
ignore_backoff=True,
|
||||
)
|
||||
except CodeMessageException as e:
|
||||
if e.code != 404:
|
||||
logger.exception("Failed to get avatar_url")
|
||||
raise
|
||||
except RequestSendFailed as e:
|
||||
raise_from(SynapseError(502, "Failed to fetch profile"), e)
|
||||
except HttpResponseException as e:
|
||||
raise e.to_synapse_error()
|
||||
|
||||
defer.returnValue(result["avatar_url"])
|
||||
|
||||
|
||||
@@ -75,6 +75,10 @@ class RoomCreationHandler(BaseHandler):
|
||||
# linearizer to stop two upgrades happening at once
|
||||
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
|
||||
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def upgrade_room(self, requester, old_room_id, new_version):
|
||||
"""Replace a room with a new room with a different version
|
||||
@@ -470,7 +474,26 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
yield self.auth.check_auth_blocking(user_id)
|
||||
|
||||
if not self.spam_checker.user_may_create_room(user_id):
|
||||
if (self._server_notices_mxid is not None and
|
||||
requester.user.to_string() == self._server_notices_mxid):
|
||||
# allow the server notices mxid to create rooms
|
||||
is_requester_admin = True
|
||||
else:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
|
||||
# Check whether the third party rules allows/changes the room create
|
||||
# request.
|
||||
yield self.third_party_event_rules.on_create_room(
|
||||
requester,
|
||||
config,
|
||||
is_requester_admin=is_requester_admin,
|
||||
)
|
||||
|
||||
if not is_requester_admin and not self.spam_checker.user_may_create_room(
|
||||
user_id,
|
||||
):
|
||||
raise SynapseError(403, "You are not permitted to create rooms")
|
||||
|
||||
if ratelimit:
|
||||
|
||||
@@ -72,6 +72,7 @@ class RoomMemberHandler(object):
|
||||
|
||||
self.clock = hs.get_clock()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.third_party_event_rules = hs.get_third_party_event_rules()
|
||||
self._server_notices_mxid = self.config.server_notices_mxid
|
||||
self._enable_lookup = hs.config.enable_3pid_lookup
|
||||
self.allow_per_room_profiles = self.config.allow_per_room_profiles
|
||||
@@ -723,6 +724,15 @@ class RoomMemberHandler(object):
|
||||
# can't just rely on the standard ratelimiting of events.
|
||||
yield self.base_handler.ratelimit(requester)
|
||||
|
||||
can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
|
||||
medium, address, room_id,
|
||||
)
|
||||
if not can_invite:
|
||||
raise SynapseError(
|
||||
403, "This third-party identifier can not be invited in this room",
|
||||
Codes.FORBIDDEN,
|
||||
)
|
||||
|
||||
invitee = yield self._lookup_3pid(
|
||||
id_server, medium, address
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
from six import text_type
|
||||
from six import raise_from, text_type
|
||||
from six.moves import urllib
|
||||
|
||||
import treq
|
||||
@@ -542,10 +542,15 @@ class SimpleHttpClient(object):
|
||||
length = yield make_deferred_yieldable(
|
||||
_readBodyToFile(response, output_stream, max_size)
|
||||
)
|
||||
except SynapseError:
|
||||
# This can happen e.g. because the body is too large.
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.exception("Failed to download body")
|
||||
raise SynapseError(
|
||||
502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN
|
||||
raise_from(
|
||||
SynapseError(
|
||||
502, ("Failed to download remote body: %s" % e),
|
||||
),
|
||||
e
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
|
||||
+91
-20
@@ -25,7 +25,7 @@ import six
|
||||
|
||||
import attr
|
||||
from prometheus_client import Counter, Gauge, Histogram
|
||||
from prometheus_client.core import REGISTRY, GaugeMetricFamily
|
||||
from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricFamily
|
||||
|
||||
from twisted.internet import reactor
|
||||
|
||||
@@ -40,7 +40,6 @@ HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
||||
|
||||
|
||||
class RegistryProxy(object):
|
||||
|
||||
@staticmethod
|
||||
def collect():
|
||||
for metric in REGISTRY.collect():
|
||||
@@ -63,10 +62,7 @@ class LaterGauge(object):
|
||||
try:
|
||||
calls = self.caller()
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Exception running callback for LaterGauge(%s)",
|
||||
self.name,
|
||||
)
|
||||
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
|
||||
yield g
|
||||
return
|
||||
|
||||
@@ -116,9 +112,7 @@ class InFlightGauge(object):
|
||||
# Create a class which have the sub_metrics values as attributes, which
|
||||
# default to 0 on initialization. Used to pass to registered callbacks.
|
||||
self._metrics_class = attr.make_class(
|
||||
"_MetricsEntry",
|
||||
attrs={x: attr.ib(0) for x in sub_metrics},
|
||||
slots=True,
|
||||
"_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
|
||||
)
|
||||
|
||||
# Counts number of in flight blocks for a given set of label values
|
||||
@@ -157,7 +151,9 @@ class InFlightGauge(object):
|
||||
|
||||
Note: may be called by a separate thread.
|
||||
"""
|
||||
in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels)
|
||||
in_flight = GaugeMetricFamily(
|
||||
self.name + "_total", self.desc, labels=self.labels
|
||||
)
|
||||
|
||||
metrics_by_key = {}
|
||||
|
||||
@@ -179,7 +175,9 @@ class InFlightGauge(object):
|
||||
yield in_flight
|
||||
|
||||
for name in self.sub_metrics:
|
||||
gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels)
|
||||
gauge = GaugeMetricFamily(
|
||||
"_".join([self.name, name]), "", labels=self.labels
|
||||
)
|
||||
for key, metrics in six.iteritems(metrics_by_key):
|
||||
gauge.add_metric(key, getattr(metrics, name))
|
||||
yield gauge
|
||||
@@ -193,12 +191,74 @@ class InFlightGauge(object):
|
||||
all_gauges[self.name] = self
|
||||
|
||||
|
||||
@attr.s(hash=True)
|
||||
class BucketCollector(object):
|
||||
"""
|
||||
Like a Histogram, but allows buckets to be point-in-time instead of
|
||||
incrementally added to.
|
||||
|
||||
Args:
|
||||
name (str): Base name of metric to be exported to Prometheus.
|
||||
data_collector (callable -> dict): A synchronous callable that
|
||||
returns a dict mapping bucket to number of items in the
|
||||
bucket. If these buckets are not the same as the buckets
|
||||
given to this class, they will be remapped into them.
|
||||
buckets (list[float]): List of floats/ints of the buckets to
|
||||
give to Prometheus. +Inf is ignored, if given.
|
||||
|
||||
"""
|
||||
|
||||
name = attr.ib()
|
||||
data_collector = attr.ib()
|
||||
buckets = attr.ib()
|
||||
|
||||
def collect(self):
|
||||
|
||||
# Fetch the data -- this must be synchronous!
|
||||
data = self.data_collector()
|
||||
|
||||
buckets = {}
|
||||
|
||||
res = []
|
||||
for x in data.keys():
|
||||
for i, bound in enumerate(self.buckets):
|
||||
if x <= bound:
|
||||
buckets[bound] = buckets.get(bound, 0) + data[x]
|
||||
|
||||
for i in self.buckets:
|
||||
res.append([str(i), buckets.get(i, 0)])
|
||||
|
||||
res.append(["+Inf", sum(data.values())])
|
||||
|
||||
metric = HistogramMetricFamily(
|
||||
self.name,
|
||||
"",
|
||||
buckets=res,
|
||||
sum_value=sum([x * y for x, y in data.items()]),
|
||||
)
|
||||
yield metric
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
self.buckets = [float(x) for x in self.buckets if x != "+Inf"]
|
||||
if self.buckets != sorted(self.buckets):
|
||||
raise ValueError("Buckets not sorted")
|
||||
|
||||
self.buckets = tuple(self.buckets)
|
||||
|
||||
if self.name in all_gauges.keys():
|
||||
logger.warning("%s already registered, reregistering" % (self.name,))
|
||||
REGISTRY.unregister(all_gauges.pop(self.name))
|
||||
|
||||
REGISTRY.register(self)
|
||||
all_gauges[self.name] = self
|
||||
|
||||
|
||||
#
|
||||
# Detailed CPU metrics
|
||||
#
|
||||
|
||||
class CPUMetrics(object):
|
||||
|
||||
class CPUMetrics(object):
|
||||
def __init__(self):
|
||||
ticks_per_sec = 100
|
||||
try:
|
||||
@@ -237,13 +297,28 @@ gc_time = Histogram(
|
||||
"python_gc_time",
|
||||
"Time taken to GC (sec)",
|
||||
["gen"],
|
||||
buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50,
|
||||
5.00, 7.50, 15.00, 30.00, 45.00, 60.00],
|
||||
buckets=[
|
||||
0.0025,
|
||||
0.005,
|
||||
0.01,
|
||||
0.025,
|
||||
0.05,
|
||||
0.10,
|
||||
0.25,
|
||||
0.50,
|
||||
1.00,
|
||||
2.50,
|
||||
5.00,
|
||||
7.50,
|
||||
15.00,
|
||||
30.00,
|
||||
45.00,
|
||||
60.00,
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class GCCounts(object):
|
||||
|
||||
def collect(self):
|
||||
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
|
||||
for n, m in enumerate(gc.get_count()):
|
||||
@@ -279,9 +354,7 @@ sent_transactions_counter = Counter("synapse_federation_client_sent_transactions
|
||||
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
|
||||
|
||||
event_processing_loop_counter = Counter(
|
||||
"synapse_event_processing_loop_count",
|
||||
"Event processing loop iterations",
|
||||
["name"],
|
||||
"synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
|
||||
)
|
||||
|
||||
event_processing_loop_room_count = Counter(
|
||||
@@ -311,7 +384,6 @@ last_ticked = time.time()
|
||||
|
||||
|
||||
class ReactorLastSeenMetric(object):
|
||||
|
||||
def collect(self):
|
||||
cm = GaugeMetricFamily(
|
||||
"python_twisted_reactor_last_seen",
|
||||
@@ -325,7 +397,6 @@ REGISTRY.register(ReactorLastSeenMetric())
|
||||
|
||||
|
||||
def runUntilCurrentTimer(func):
|
||||
|
||||
@functools.wraps(func)
|
||||
def f(*args, **kwargs):
|
||||
now = reactor.seconds()
|
||||
|
||||
@@ -114,6 +114,21 @@ class EmailPusher(object):
|
||||
|
||||
run_as_background_process("emailpush.process", self._process)
|
||||
|
||||
def _pause_processing(self):
|
||||
"""Used by tests to temporarily pause processing of events.
|
||||
|
||||
Asserts that its not currently processing.
|
||||
"""
|
||||
assert not self._is_processing
|
||||
self._is_processing = True
|
||||
|
||||
def _resume_processing(self):
|
||||
"""Used by tests to resume processing of events after pausing.
|
||||
"""
|
||||
assert self._is_processing
|
||||
self._is_processing = False
|
||||
self._start_processing()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _process(self):
|
||||
# we should never get here if we are already processing
|
||||
@@ -215,6 +230,10 @@ class EmailPusher(object):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def save_last_stream_ordering_and_success(self, last_stream_ordering):
|
||||
if last_stream_ordering is None:
|
||||
# This happens if we haven't yet processed anything
|
||||
return
|
||||
|
||||
self.last_stream_ordering = last_stream_ordering
|
||||
yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id, self.email, self.user_id,
|
||||
|
||||
@@ -117,7 +117,7 @@ class Mailer(object):
|
||||
|
||||
link = (
|
||||
self.hs.config.public_baseurl +
|
||||
"_synapse/password_reset/email/submit_token"
|
||||
"_matrix/client/unstable/password_reset/email/submit_token"
|
||||
"?token=%s&client_secret=%s&sid=%s" %
|
||||
(token, client_secret, sid)
|
||||
)
|
||||
|
||||
@@ -162,6 +162,17 @@ def calculate_room_name(store, room_state_ids, user_id, fallback_to_members=True
|
||||
|
||||
|
||||
def descriptor_from_member_events(member_events):
|
||||
"""Get a description of the room based on the member events.
|
||||
|
||||
Args:
|
||||
member_events (Iterable[FrozenEvent])
|
||||
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
|
||||
member_events = list(member_events)
|
||||
|
||||
if len(member_events) == 0:
|
||||
return "nobody"
|
||||
elif len(member_events) == 1:
|
||||
|
||||
@@ -60,6 +60,11 @@ class PusherPool:
|
||||
def add_pusher(self, user_id, access_token, kind, app_id,
|
||||
app_display_name, device_display_name, pushkey, lang, data,
|
||||
profile_tag=""):
|
||||
"""Creates a new pusher and adds it to the pool
|
||||
|
||||
Returns:
|
||||
Deferred[EmailPusher|HttpPusher]
|
||||
"""
|
||||
time_now_msec = self.clock.time_msec()
|
||||
|
||||
# we try to create the pusher just to validate the config: it
|
||||
@@ -103,7 +108,9 @@ class PusherPool:
|
||||
last_stream_ordering=last_stream_ordering,
|
||||
profile_tag=profile_tag,
|
||||
)
|
||||
yield self.start_pusher_by_id(app_id, pushkey, user_id)
|
||||
pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id)
|
||||
|
||||
defer.returnValue(pusher)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey,
|
||||
@@ -184,7 +191,11 @@ class PusherPool:
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def start_pusher_by_id(self, app_id, pushkey, user_id):
|
||||
"""Look up the details for the given pusher, and start it"""
|
||||
"""Look up the details for the given pusher, and start it
|
||||
|
||||
Returns:
|
||||
Deferred[EmailPusher|HttpPusher|None]: The pusher started, if any
|
||||
"""
|
||||
if not self._should_start_pushers:
|
||||
return
|
||||
|
||||
@@ -192,13 +203,16 @@ class PusherPool:
|
||||
app_id, pushkey
|
||||
)
|
||||
|
||||
p = None
|
||||
pusher_dict = None
|
||||
for r in resultlist:
|
||||
if r['user_name'] == user_id:
|
||||
p = r
|
||||
pusher_dict = r
|
||||
|
||||
if p:
|
||||
yield self._start_pusher(p)
|
||||
pusher = None
|
||||
if pusher_dict:
|
||||
pusher = yield self._start_pusher(pusher_dict)
|
||||
|
||||
defer.returnValue(pusher)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _start_pushers(self):
|
||||
@@ -224,7 +238,7 @@ class PusherPool:
|
||||
pusherdict (dict):
|
||||
|
||||
Returns:
|
||||
None
|
||||
Deferred[EmailPusher|HttpPusher]
|
||||
"""
|
||||
try:
|
||||
p = self.pusher_factory.create_pusher(pusherdict)
|
||||
@@ -270,6 +284,8 @@ class PusherPool:
|
||||
|
||||
p.on_started(have_notifs)
|
||||
|
||||
defer.returnValue(p)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def remove_pusher(self, app_id, pushkey, user_id):
|
||||
appid_pushkey = "%s:%s" % (app_id, pushkey)
|
||||
|
||||
@@ -44,7 +44,7 @@ REQUIREMENTS = [
|
||||
"canonicaljson>=1.1.3",
|
||||
"signedjson>=1.0.0",
|
||||
"pynacl>=1.2.1",
|
||||
"idna>=2",
|
||||
"idna>=2.5",
|
||||
|
||||
# validating SSL certs for IP addresses requires service_identity 18.1.
|
||||
"service_identity>=18.1.0",
|
||||
@@ -65,7 +65,7 @@ REQUIREMENTS = [
|
||||
"sortedcontainers>=1.4.4",
|
||||
"psutil>=2.0.0",
|
||||
"pymacaroons>=0.13.0",
|
||||
"msgpack>=0.5.0",
|
||||
"msgpack>=0.5.2",
|
||||
"phonenumbers>=8.2.0",
|
||||
"six>=1.10",
|
||||
# prometheus_client 0.4.0 changed the format of counter metrics
|
||||
@@ -80,7 +80,7 @@ REQUIREMENTS = [
|
||||
]
|
||||
|
||||
CONDITIONAL_REQUIREMENTS = {
|
||||
"email": ["Jinja2>=2.9", "bleach>=1.4.2"],
|
||||
"email": ["Jinja2>=2.9", "bleach>=1.4.3"],
|
||||
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
|
||||
|
||||
# we use execute_batch, which arrived in psycopg 2.7.
|
||||
|
||||
@@ -17,11 +17,17 @@ import abc
|
||||
import logging
|
||||
import re
|
||||
|
||||
from six import raise_from
|
||||
from six.moves import urllib
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import CodeMessageException, HttpResponseException
|
||||
from synapse.api.errors import (
|
||||
CodeMessageException,
|
||||
HttpResponseException,
|
||||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
@@ -175,6 +181,8 @@ class ReplicationEndpoint(object):
|
||||
# on the master process that we should send to the client. (And
|
||||
# importantly, not stack traces everywhere)
|
||||
raise e.to_synapse_error()
|
||||
except RequestSendFailed as e:
|
||||
raise_from(SynapseError(502, "Failed to talk to master"), e)
|
||||
|
||||
defer.returnValue(result)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<html>
|
||||
<head></head>
|
||||
<body>
|
||||
<p>Your password was successfully reset. You may now close this window.</p>
|
||||
<p>Your email has now been validated, please return to your client to reset your password. You may now close this window.</p>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import re
|
||||
|
||||
from six.moves import http_client
|
||||
|
||||
@@ -68,7 +67,13 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
if self.config.email_password_reset_behaviour == "off":
|
||||
raise SynapseError(400, "Password resets have been disabled on this server")
|
||||
if self.config.password_resets_were_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
"User password resets have been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
400, "Email-based password resets have been disabled on this server",
|
||||
)
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
@@ -196,9 +201,6 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
if not self.config.email_password_reset_behaviour == "off":
|
||||
raise SynapseError(400, "Password resets have been disabled on this server")
|
||||
|
||||
body = parse_json_object_from_request(request)
|
||||
|
||||
assert_params_in_dict(body, [
|
||||
@@ -228,9 +230,11 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
|
||||
|
||||
class PasswordResetSubmitTokenServlet(RestServlet):
|
||||
"""Handles 3PID validation token submission"""
|
||||
PATTERNS = [
|
||||
re.compile("^/_synapse/password_reset/(?P<medium>[^/]*)/submit_token/*$"),
|
||||
]
|
||||
PATTERNS = client_patterns(
|
||||
"/password_reset/(?P<medium>[^/]*)/submit_token/*$",
|
||||
releases=(),
|
||||
unstable=True,
|
||||
)
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
@@ -251,6 +255,14 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
||||
400,
|
||||
"This medium is currently not supported for password resets",
|
||||
)
|
||||
if self.config.email_password_reset_behaviour == "off":
|
||||
if self.config.password_resets_were_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
"User password resets have been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
400, "Email-based password resets have been disabled on this server",
|
||||
)
|
||||
|
||||
sid = parse_string(request, "sid")
|
||||
client_secret = parse_string(request, "client_secret")
|
||||
|
||||
@@ -79,7 +79,7 @@ class AccountValiditySendMailServlet(RestServlet):
|
||||
if not self.account_validity.renew_by_email_enabled:
|
||||
raise AuthError(403, "Account renewal via email is disabled on this server.")
|
||||
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
requester = yield self.auth.get_user_by_req(request, allow_expired=True)
|
||||
user_id = requester.user.to_string()
|
||||
yield self.account_activity_handler.send_renewal_email_to_user(user_id)
|
||||
|
||||
|
||||
@@ -386,8 +386,10 @@ class MediaRepository(object):
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except SynapseError:
|
||||
logger.exception("Failed to fetch remote media %s/%s",
|
||||
server_name, media_id)
|
||||
logger.warn(
|
||||
"Failed to fetch remote media %s/%s",
|
||||
server_name, media_id,
|
||||
)
|
||||
raise
|
||||
except NotRetryingDestination:
|
||||
logger.warn("Not retrying destination %r", server_name)
|
||||
|
||||
+29
-15
@@ -279,23 +279,37 @@ class DataStore(
|
||||
"""
|
||||
Counts the number of users who used this homeserver in the last 24 hours.
|
||||
"""
|
||||
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
|
||||
return self.runInteraction("count_daily_users", self._count_users, yesterday,)
|
||||
|
||||
def _count_users(txn):
|
||||
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
|
||||
def count_monthly_users(self):
|
||||
"""
|
||||
Counts the number of users who used this homeserver in the last 30 days.
|
||||
Note this method is intended for phonehome metrics only and is different
|
||||
from the mau figure in synapse.storage.monthly_active_users which,
|
||||
amongst other things, includes a 3 day grace period before a user counts.
|
||||
"""
|
||||
thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
|
||||
return self.runInteraction(
|
||||
"count_monthly_users",
|
||||
self._count_users,
|
||||
thirty_days_ago,
|
||||
)
|
||||
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT user_id FROM user_ips
|
||||
WHERE last_seen > ?
|
||||
GROUP BY user_id
|
||||
) u
|
||||
"""
|
||||
|
||||
txn.execute(sql, (yesterday,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
return self.runInteraction("count_users", _count_users)
|
||||
def _count_users(self, txn, time_from):
|
||||
"""
|
||||
Returns number of users seen in the past time_from period
|
||||
"""
|
||||
sql = """
|
||||
SELECT COALESCE(count(*), 0) FROM (
|
||||
SELECT user_id FROM user_ips
|
||||
WHERE last_seen > ?
|
||||
GROUP BY user_id
|
||||
) u
|
||||
"""
|
||||
txn.execute(sql, (time_from,))
|
||||
count, = txn.fetchone()
|
||||
return count
|
||||
|
||||
def count_r30_users(self):
|
||||
"""
|
||||
|
||||
@@ -299,12 +299,12 @@ class SQLBaseStore(object):
|
||||
|
||||
def select_users_with_no_expiration_date_txn(txn):
|
||||
"""Retrieves the list of registered users with no expiration date from the
|
||||
database.
|
||||
database, filtering out deactivated users.
|
||||
"""
|
||||
sql = (
|
||||
"SELECT users.name FROM users"
|
||||
" LEFT JOIN account_validity ON (users.name = account_validity.user_id)"
|
||||
" WHERE account_validity.user_id is NULL;"
|
||||
" WHERE account_validity.user_id is NULL AND users.deactivated = 0;"
|
||||
)
|
||||
txn.execute(sql, [])
|
||||
|
||||
|
||||
@@ -45,6 +45,10 @@ class PostgresEngine(object):
|
||||
# together. For example, version 8.1.5 will be returned as 80105
|
||||
self._version = db_conn.server_version
|
||||
|
||||
# Are we on a supported PostgreSQL version?
|
||||
if self._version < 90500:
|
||||
raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.")
|
||||
|
||||
db_conn.set_isolation_level(
|
||||
self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
|
||||
)
|
||||
@@ -64,9 +68,9 @@ class PostgresEngine(object):
|
||||
@property
|
||||
def can_native_upsert(self):
|
||||
"""
|
||||
Can we use native UPSERTs? This requires PostgreSQL 9.5+.
|
||||
Can we use native UPSERTs?
|
||||
"""
|
||||
return self._version >= 90500
|
||||
return True
|
||||
|
||||
def is_deadlock(self, error):
|
||||
if isinstance(error, self.module.DatabaseError):
|
||||
|
||||
+32
-13
@@ -17,7 +17,7 @@
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from collections import OrderedDict, deque, namedtuple
|
||||
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
|
||||
from functools import wraps
|
||||
|
||||
from six import iteritems, text_type
|
||||
@@ -33,6 +33,7 @@ from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.events import EventBase # noqa: F401
|
||||
from synapse.events.snapshot import EventContext # noqa: F401
|
||||
from synapse.metrics import BucketCollector
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.state import StateResolutionStore
|
||||
from synapse.storage.background_updates import BackgroundUpdateStore
|
||||
@@ -220,13 +221,39 @@ class EventsStore(
|
||||
EventsWorkerStore,
|
||||
BackgroundUpdateStore,
|
||||
):
|
||||
|
||||
def __init__(self, db_conn, hs):
|
||||
super(EventsStore, self).__init__(db_conn, hs)
|
||||
|
||||
self._event_persist_queue = _EventPeristenceQueue()
|
||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||
|
||||
# Collect metrics on the number of forward extremities that exist.
|
||||
# Counter of number of extremities to count
|
||||
self._current_forward_extremities_amount = c_counter()
|
||||
|
||||
BucketCollector(
|
||||
"synapse_forward_extremities",
|
||||
lambda: self._current_forward_extremities_amount,
|
||||
buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"]
|
||||
)
|
||||
|
||||
# Read the extrems every 60 minutes
|
||||
hs.get_clock().looping_call(self._read_forward_extremities, 60 * 60 * 1000)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _read_forward_extremities(self):
|
||||
def fetch(txn):
|
||||
txn.execute(
|
||||
"""
|
||||
select count(*) c from event_forward_extremities
|
||||
group by room_id
|
||||
"""
|
||||
)
|
||||
return txn.fetchall()
|
||||
|
||||
res = yield self.runInteraction("read_forward_extremities", fetch)
|
||||
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def persist_events(self, events_and_contexts, backfilled=False):
|
||||
"""
|
||||
@@ -568,17 +595,11 @@ class EventsStore(
|
||||
)
|
||||
|
||||
txn.execute(sql, batch)
|
||||
results.extend(
|
||||
r[0]
|
||||
for r in txn
|
||||
if not json.loads(r[1]).get("soft_failed")
|
||||
)
|
||||
results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
|
||||
|
||||
for chunk in batch_iter(event_ids, 100):
|
||||
yield self.runInteraction(
|
||||
"_get_events_which_are_prevs",
|
||||
_get_events_which_are_prevs_txn,
|
||||
chunk,
|
||||
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
|
||||
)
|
||||
|
||||
defer.returnValue(results)
|
||||
@@ -640,9 +661,7 @@ class EventsStore(
|
||||
|
||||
for chunk in batch_iter(event_ids, 100):
|
||||
yield self.runInteraction(
|
||||
"_get_prevs_before_rejected",
|
||||
_get_prevs_before_rejected_txn,
|
||||
chunk,
|
||||
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
|
||||
)
|
||||
|
||||
defer.returnValue(existing_prevs)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
||||
from six import iterkeys
|
||||
@@ -31,6 +32,8 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
|
||||
|
||||
THIRTY_MINUTES_IN_MS = 30 * 60 * 1000
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RegistrationWorkerStore(SQLBaseStore):
|
||||
def __init__(self, db_conn, hs):
|
||||
@@ -248,6 +251,20 @@ class RegistrationWorkerStore(SQLBaseStore):
|
||||
desc="set_renewal_mail_status",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_account_validity_for_user(self, user_id):
|
||||
"""Deletes the entry for the given user in the account validity table, removing
|
||||
their expiration date and renewal token.
|
||||
|
||||
Args:
|
||||
user_id (str): ID of the user to remove from the account validity table.
|
||||
"""
|
||||
yield self._simple_delete_one(
|
||||
table="account_validity",
|
||||
keyvalues={"user_id": user_id},
|
||||
desc="delete_account_validity_for_user",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def is_server_admin(self, user):
|
||||
res = yield self._simple_select_one_onecol(
|
||||
@@ -598,11 +615,77 @@ class RegistrationStore(
|
||||
"user_threepids_grandfather", self._bg_user_threepids_grandfather,
|
||||
)
|
||||
|
||||
self.register_background_update_handler(
|
||||
"users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag,
|
||||
)
|
||||
|
||||
# Create a background job for culling expired 3PID validity tokens
|
||||
hs.get_clock().looping_call(
|
||||
self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _backgroud_update_set_deactivated_flag(self, progress, batch_size):
|
||||
"""Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1
|
||||
for each of them.
|
||||
"""
|
||||
|
||||
last_user = progress.get("user_id", "")
|
||||
|
||||
def _backgroud_update_set_deactivated_flag_txn(txn):
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT
|
||||
users.name,
|
||||
COUNT(access_tokens.token) AS count_tokens,
|
||||
COUNT(user_threepids.address) AS count_threepids
|
||||
FROM users
|
||||
LEFT JOIN access_tokens ON (access_tokens.user_id = users.name)
|
||||
LEFT JOIN user_threepids ON (user_threepids.user_id = users.name)
|
||||
WHERE (users.password_hash IS NULL OR users.password_hash = '')
|
||||
AND (users.appservice_id IS NULL OR users.appservice_id = '')
|
||||
AND users.is_guest = 0
|
||||
AND users.name > ?
|
||||
GROUP BY users.name
|
||||
ORDER BY users.name ASC
|
||||
LIMIT ?;
|
||||
""",
|
||||
(last_user, batch_size),
|
||||
)
|
||||
|
||||
rows = self.cursor_to_dict(txn)
|
||||
|
||||
if not rows:
|
||||
return True
|
||||
|
||||
rows_processed_nb = 0
|
||||
|
||||
for user in rows:
|
||||
if not user["count_tokens"] and not user["count_threepids"]:
|
||||
self.set_user_deactivated_status_txn(txn, user["user_id"], True)
|
||||
rows_processed_nb += 1
|
||||
|
||||
logger.info("Marked %d rows as deactivated", rows_processed_nb)
|
||||
|
||||
self._background_update_progress_txn(
|
||||
txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]}
|
||||
)
|
||||
|
||||
if batch_size > len(rows):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
end = yield self.runInteraction(
|
||||
"users_set_deactivated_flag",
|
||||
_backgroud_update_set_deactivated_flag_txn,
|
||||
)
|
||||
|
||||
if end:
|
||||
yield self._end_background_update("users_set_deactivated_flag")
|
||||
|
||||
defer.returnValue(batch_size)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_access_token_to_user(self, user_id, token, device_id=None):
|
||||
"""Adds an access token for the given user.
|
||||
@@ -1268,3 +1351,50 @@ class RegistrationStore(
|
||||
"delete_threepid_session",
|
||||
delete_threepid_session_txn,
|
||||
)
|
||||
|
||||
def set_user_deactivated_status_txn(self, txn, user_id, deactivated):
|
||||
self._simple_update_one_txn(
|
||||
txn=txn,
|
||||
table="users",
|
||||
keyvalues={"name": user_id},
|
||||
updatevalues={"deactivated": 1 if deactivated else 0},
|
||||
)
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.get_user_deactivated_status, (user_id,),
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_user_deactivated_status(self, user_id, deactivated):
|
||||
"""Set the `deactivated` property for the provided user to the provided value.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user to set the status for.
|
||||
deactivated (bool): The value to set for `deactivated`.
|
||||
"""
|
||||
|
||||
yield self.runInteraction(
|
||||
"set_user_deactivated_status",
|
||||
self.set_user_deactivated_status_txn,
|
||||
user_id, deactivated,
|
||||
)
|
||||
|
||||
@cachedInlineCallbacks()
|
||||
def get_user_deactivated_status(self, user_id):
|
||||
"""Retrieve the value for the `deactivated` property for the provided user.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user to retrieve the status for.
|
||||
|
||||
Returns:
|
||||
defer.Deferred(bool): The requested value.
|
||||
"""
|
||||
|
||||
res = yield self._simple_select_one_onecol(
|
||||
table="users",
|
||||
keyvalues={"name": user_id},
|
||||
retcol="deactivated",
|
||||
desc="get_user_deactivated_status",
|
||||
)
|
||||
|
||||
# Convert the integer into a boolean.
|
||||
defer.returnValue(res == 1)
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
/* Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
ALTER TABLE users ADD deactivated SMALLINT DEFAULT 0 NOT NULL;
|
||||
|
||||
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||
('users_set_deactivated_flag', '{}');
|
||||
@@ -341,29 +341,7 @@ class SearchStore(BackgroundUpdateStore):
|
||||
for entry in entries
|
||||
)
|
||||
|
||||
# inserts to a GIN index are normally batched up into a pending
|
||||
# list, and then all committed together once the list gets to a
|
||||
# certain size. The trouble with that is that postgres (pre-9.5)
|
||||
# uses work_mem to determine the length of the list, and work_mem
|
||||
# is typically very large.
|
||||
#
|
||||
# We therefore reduce work_mem while we do the insert.
|
||||
#
|
||||
# (postgres 9.5 uses the separate gin_pending_list_limit setting,
|
||||
# so doesn't suffer the same problem, but changing work_mem will
|
||||
# be harmless)
|
||||
#
|
||||
# Note that we don't need to worry about restoring it on
|
||||
# exception, because exceptions will cause the transaction to be
|
||||
# rolled back, including the effects of the SET command.
|
||||
#
|
||||
# Also: we use SET rather than SET LOCAL because there's lots of
|
||||
# other stuff going on in this transaction, which want to have the
|
||||
# normal work_mem setting.
|
||||
|
||||
txn.execute("SET work_mem='256kB'")
|
||||
txn.executemany(sql, args)
|
||||
txn.execute("RESET work_mem")
|
||||
|
||||
elif isinstance(self.database_engine, Sqlite3Engine):
|
||||
sql = (
|
||||
|
||||
@@ -69,10 +69,14 @@ def abort(message, colour=RED, stream=sys.stderr):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def start(configfile):
|
||||
def start(configfile, daemonize=True):
|
||||
write("Starting ...")
|
||||
args = SYNAPSE
|
||||
args.extend(["--daemonize", "-c", configfile])
|
||||
|
||||
if daemonize:
|
||||
args.extend(["--daemonize", "-c", configfile])
|
||||
else:
|
||||
args.extend(["-c", configfile])
|
||||
|
||||
try:
|
||||
subprocess.check_call(args)
|
||||
@@ -143,12 +147,21 @@ def main():
|
||||
help="start or stop all the workers in the given directory"
|
||||
" and the main synapse process",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-daemonize",
|
||||
action="store_false",
|
||||
help="Run synapse in the foreground for debugging. "
|
||||
"Will work only if the daemonize option is not set in the config."
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.worker and options.all_processes:
|
||||
write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr)
|
||||
sys.exit(1)
|
||||
if options.no_daemonize and options.all_processes:
|
||||
write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
configfile = options.configfile
|
||||
|
||||
@@ -276,7 +289,7 @@ def main():
|
||||
# Check if synapse is already running
|
||||
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
|
||||
abort("synapse.app.homeserver already running")
|
||||
start(configfile)
|
||||
start(configfile, bool(options.no_daemonize))
|
||||
|
||||
for worker in workers:
|
||||
env = os.environ.copy()
|
||||
|
||||
+69
-28
@@ -15,6 +15,7 @@
|
||||
|
||||
import os
|
||||
|
||||
import attr
|
||||
import pkg_resources
|
||||
|
||||
from twisted.internet.defer import Deferred
|
||||
@@ -24,15 +25,16 @@ from synapse.rest.client.v1 import login, room
|
||||
|
||||
from tests.unittest import HomeserverTestCase
|
||||
|
||||
try:
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
except Exception:
|
||||
load_jinja2_templates = None
|
||||
|
||||
@attr.s
|
||||
class _User(object):
|
||||
"Helper wrapper for user ID and access token"
|
||||
id = attr.ib()
|
||||
token = attr.ib()
|
||||
|
||||
|
||||
class EmailPusherTests(HomeserverTestCase):
|
||||
|
||||
skip = "No Jinja installed" if not load_jinja2_templates else None
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets_for_client_rest_resource,
|
||||
room.register_servlets,
|
||||
@@ -77,25 +79,32 @@ class EmailPusherTests(HomeserverTestCase):
|
||||
|
||||
return hs
|
||||
|
||||
def test_sends_email(self):
|
||||
|
||||
def prepare(self, reactor, clock, hs):
|
||||
# Register the user who gets notified
|
||||
user_id = self.register_user("user", "pass")
|
||||
access_token = self.login("user", "pass")
|
||||
self.user_id = self.register_user("user", "pass")
|
||||
self.access_token = self.login("user", "pass")
|
||||
|
||||
# Register the user who sends the message
|
||||
other_user_id = self.register_user("otheruser", "pass")
|
||||
other_access_token = self.login("otheruser", "pass")
|
||||
# Register other users
|
||||
self.others = [
|
||||
_User(
|
||||
id=self.register_user("otheruser1", "pass"),
|
||||
token=self.login("otheruser1", "pass"),
|
||||
),
|
||||
_User(
|
||||
id=self.register_user("otheruser2", "pass"),
|
||||
token=self.login("otheruser2", "pass"),
|
||||
),
|
||||
]
|
||||
|
||||
# Register the pusher
|
||||
user_tuple = self.get_success(
|
||||
self.hs.get_datastore().get_user_by_access_token(access_token)
|
||||
self.hs.get_datastore().get_user_by_access_token(self.access_token)
|
||||
)
|
||||
token_id = user_tuple["token_id"]
|
||||
|
||||
self.get_success(
|
||||
self.pusher = self.get_success(
|
||||
self.hs.get_pusherpool().add_pusher(
|
||||
user_id=user_id,
|
||||
user_id=self.user_id,
|
||||
access_token=token_id,
|
||||
kind="email",
|
||||
app_id="m.email",
|
||||
@@ -107,22 +116,54 @@ class EmailPusherTests(HomeserverTestCase):
|
||||
)
|
||||
)
|
||||
|
||||
# Create a room
|
||||
room = self.helper.create_room_as(user_id, tok=access_token)
|
||||
|
||||
# Invite the other person
|
||||
self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
|
||||
|
||||
# The other user joins
|
||||
self.helper.join(room=room, user=other_user_id, tok=other_access_token)
|
||||
def test_simple_sends_email(self):
|
||||
# Create a simple room with two users
|
||||
room = self.helper.create_room_as(self.user_id, tok=self.access_token)
|
||||
self.helper.invite(
|
||||
room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id,
|
||||
)
|
||||
self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token)
|
||||
|
||||
# The other user sends some messages
|
||||
self.helper.send(room, body="Hi!", tok=other_access_token)
|
||||
self.helper.send(room, body="There!", tok=other_access_token)
|
||||
self.helper.send(room, body="Hi!", tok=self.others[0].token)
|
||||
self.helper.send(room, body="There!", tok=self.others[0].token)
|
||||
|
||||
# We should get emailed about that message
|
||||
self._check_for_mail()
|
||||
|
||||
def test_multiple_members_email(self):
|
||||
# We want to test multiple notifications, so we pause processing of push
|
||||
# while we send messages.
|
||||
self.pusher._pause_processing()
|
||||
|
||||
# Create a simple room with multiple other users
|
||||
room = self.helper.create_room_as(self.user_id, tok=self.access_token)
|
||||
|
||||
for other in self.others:
|
||||
self.helper.invite(
|
||||
room=room, src=self.user_id, tok=self.access_token, targ=other.id,
|
||||
)
|
||||
self.helper.join(room=room, user=other.id, tok=other.token)
|
||||
|
||||
# The other users send some messages
|
||||
self.helper.send(room, body="Hi!", tok=self.others[0].token)
|
||||
self.helper.send(room, body="There!", tok=self.others[1].token)
|
||||
self.helper.send(room, body="There!", tok=self.others[1].token)
|
||||
|
||||
# Nothing should have happened yet, as we're paused.
|
||||
assert not self.email_attempts
|
||||
|
||||
self.pusher._resume_processing()
|
||||
|
||||
# We should get emailed about those messages
|
||||
self._check_for_mail()
|
||||
|
||||
def _check_for_mail(self):
|
||||
"Check that the user receives an email notification"
|
||||
|
||||
# Get the stream ordering before it gets sent
|
||||
pushers = self.get_success(
|
||||
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
|
||||
self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
|
||||
)
|
||||
self.assertEqual(len(pushers), 1)
|
||||
last_stream_ordering = pushers[0]["last_stream_ordering"]
|
||||
@@ -132,7 +173,7 @@ class EmailPusherTests(HomeserverTestCase):
|
||||
|
||||
# It hasn't succeeded yet, so the stream ordering shouldn't have moved
|
||||
pushers = self.get_success(
|
||||
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
|
||||
self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
|
||||
)
|
||||
self.assertEqual(len(pushers), 1)
|
||||
self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
|
||||
@@ -149,7 +190,7 @@ class EmailPusherTests(HomeserverTestCase):
|
||||
|
||||
# The stream ordering has increased
|
||||
pushers = self.get_success(
|
||||
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
|
||||
self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
|
||||
)
|
||||
self.assertEqual(len(pushers), 1)
|
||||
self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)
|
||||
|
||||
@@ -23,15 +23,9 @@ from synapse.util.logcontext import make_deferred_yieldable
|
||||
|
||||
from tests.unittest import HomeserverTestCase
|
||||
|
||||
try:
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
except Exception:
|
||||
load_jinja2_templates = None
|
||||
|
||||
|
||||
class HTTPPusherTests(HomeserverTestCase):
|
||||
|
||||
skip = "No Jinja installed" if not load_jinja2_templates else None
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets_for_client_rest_resource,
|
||||
room.register_servlets,
|
||||
|
||||
@@ -23,14 +23,8 @@ from synapse.rest.consent import consent_resource
|
||||
from tests import unittest
|
||||
from tests.server import render
|
||||
|
||||
try:
|
||||
from synapse.push.mailer import load_jinja2_templates
|
||||
except Exception:
|
||||
load_jinja2_templates = None
|
||||
|
||||
|
||||
class ConsentResourceTestCase(unittest.HomeserverTestCase):
|
||||
skip = "No Jinja installed" if not load_jinja2_templates else None
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets_for_client_rest_resource,
|
||||
room.register_servlets,
|
||||
|
||||
@@ -0,0 +1,286 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015-2016 OpenMarket Ltd
|
||||
# Copyright 2017-2018 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from email.parser import Parser
|
||||
|
||||
import pkg_resources
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import LoginType
|
||||
from synapse.rest.client.v1 import login
|
||||
from synapse.rest.client.v2_alpha import account, register
|
||||
|
||||
from tests import unittest
|
||||
|
||||
|
||||
class PasswordResetTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
servlets = [
|
||||
account.register_servlets,
|
||||
synapse.rest.admin.register_servlets_for_client_rest_resource,
|
||||
register.register_servlets,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def make_homeserver(self, reactor, clock):
|
||||
config = self.default_config()
|
||||
|
||||
# Email config.
|
||||
self.email_attempts = []
|
||||
|
||||
def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs):
|
||||
self.email_attempts.append(msg)
|
||||
return
|
||||
|
||||
config["email"] = {
|
||||
"enable_notifs": False,
|
||||
"template_dir": os.path.abspath(
|
||||
pkg_resources.resource_filename("synapse", "res/templates")
|
||||
),
|
||||
"smtp_host": "127.0.0.1",
|
||||
"smtp_port": 20,
|
||||
"require_transport_security": False,
|
||||
"smtp_user": None,
|
||||
"smtp_pass": None,
|
||||
"notif_from": "test@example.com",
|
||||
}
|
||||
config["public_baseurl"] = "https://example.com"
|
||||
|
||||
hs = self.setup_test_homeserver(config=config, sendmail=sendmail)
|
||||
return hs
|
||||
|
||||
def prepare(self, reactor, clock, hs):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
def test_basic_password_reset(self):
|
||||
"""Test basic password reset flow
|
||||
"""
|
||||
old_password = "monkey"
|
||||
new_password = "kangeroo"
|
||||
|
||||
user_id = self.register_user("kermit", old_password)
|
||||
self.login("kermit", old_password)
|
||||
|
||||
email = "test@example.com"
|
||||
|
||||
# Add a threepid
|
||||
self.get_success(
|
||||
self.store.user_add_threepid(
|
||||
user_id=user_id,
|
||||
medium="email",
|
||||
address=email,
|
||||
validated_at=0,
|
||||
added_at=0,
|
||||
)
|
||||
)
|
||||
|
||||
client_secret = "foobar"
|
||||
session_id = self._request_token(email, client_secret)
|
||||
|
||||
self.assertEquals(len(self.email_attempts), 1)
|
||||
link = self._get_link_from_email()
|
||||
|
||||
self._validate_token(link)
|
||||
|
||||
self._reset_password(new_password, session_id, client_secret)
|
||||
|
||||
# Assert we can log in with the new password
|
||||
self.login("kermit", new_password)
|
||||
|
||||
# Assert we can't log in with the old password
|
||||
self.attempt_wrong_password_login("kermit", old_password)
|
||||
|
||||
def test_cant_reset_password_without_clicking_link(self):
|
||||
"""Test that we do actually need to click the link in the email
|
||||
"""
|
||||
old_password = "monkey"
|
||||
new_password = "kangeroo"
|
||||
|
||||
user_id = self.register_user("kermit", old_password)
|
||||
self.login("kermit", old_password)
|
||||
|
||||
email = "test@example.com"
|
||||
|
||||
# Add a threepid
|
||||
self.get_success(
|
||||
self.store.user_add_threepid(
|
||||
user_id=user_id,
|
||||
medium="email",
|
||||
address=email,
|
||||
validated_at=0,
|
||||
added_at=0,
|
||||
)
|
||||
)
|
||||
|
||||
client_secret = "foobar"
|
||||
session_id = self._request_token(email, client_secret)
|
||||
|
||||
self.assertEquals(len(self.email_attempts), 1)
|
||||
|
||||
# Attempt to reset password without clicking the link
|
||||
self._reset_password(
|
||||
new_password, session_id, client_secret, expected_code=401,
|
||||
)
|
||||
|
||||
# Assert we can log in with the old password
|
||||
self.login("kermit", old_password)
|
||||
|
||||
# Assert we can't log in with the new password
|
||||
self.attempt_wrong_password_login("kermit", new_password)
|
||||
|
||||
def test_no_valid_token(self):
|
||||
"""Test that we do actually need to request a token and can't just
|
||||
make a session up.
|
||||
"""
|
||||
old_password = "monkey"
|
||||
new_password = "kangeroo"
|
||||
|
||||
user_id = self.register_user("kermit", old_password)
|
||||
self.login("kermit", old_password)
|
||||
|
||||
email = "test@example.com"
|
||||
|
||||
# Add a threepid
|
||||
self.get_success(
|
||||
self.store.user_add_threepid(
|
||||
user_id=user_id,
|
||||
medium="email",
|
||||
address=email,
|
||||
validated_at=0,
|
||||
added_at=0,
|
||||
)
|
||||
)
|
||||
|
||||
client_secret = "foobar"
|
||||
session_id = "weasle"
|
||||
|
||||
# Attempt to reset password without even requesting an email
|
||||
self._reset_password(
|
||||
new_password, session_id, client_secret, expected_code=401,
|
||||
)
|
||||
|
||||
# Assert we can log in with the old password
|
||||
self.login("kermit", old_password)
|
||||
|
||||
# Assert we can't log in with the new password
|
||||
self.attempt_wrong_password_login("kermit", new_password)
|
||||
|
||||
def _request_token(self, email, client_secret):
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
b"account/password/email/requestToken",
|
||||
{"client_secret": client_secret, "email": email, "send_attempt": 1},
|
||||
)
|
||||
self.render(request)
|
||||
self.assertEquals(200, channel.code, channel.result)
|
||||
|
||||
return channel.json_body["sid"]
|
||||
|
||||
def _validate_token(self, link):
|
||||
# Remove the host
|
||||
path = link.replace("https://example.com", "")
|
||||
|
||||
request, channel = self.make_request("GET", path, shorthand=False)
|
||||
self.render(request)
|
||||
self.assertEquals(200, channel.code, channel.result)
|
||||
|
||||
def _get_link_from_email(self):
|
||||
assert self.email_attempts, "No emails have been sent"
|
||||
|
||||
raw_msg = self.email_attempts[-1].decode("UTF-8")
|
||||
mail = Parser().parsestr(raw_msg)
|
||||
|
||||
text = None
|
||||
for part in mail.walk():
|
||||
if part.get_content_type() == "text/plain":
|
||||
text = part.get_payload(decode=True).decode("UTF-8")
|
||||
break
|
||||
|
||||
if not text:
|
||||
self.fail("Could not find text portion of email to parse")
|
||||
|
||||
match = re.search(r"https://example.com\S+", text)
|
||||
assert match, "Could not find link in email"
|
||||
|
||||
return match.group(0)
|
||||
|
||||
def _reset_password(
|
||||
self, new_password, session_id, client_secret, expected_code=200
|
||||
):
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
b"account/password",
|
||||
{
|
||||
"new_password": new_password,
|
||||
"auth": {
|
||||
"type": LoginType.EMAIL_IDENTITY,
|
||||
"threepid_creds": {
|
||||
"client_secret": client_secret,
|
||||
"sid": session_id,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
self.render(request)
|
||||
self.assertEquals(expected_code, channel.code, channel.result)
|
||||
|
||||
|
||||
class DeactivateTestCase(unittest.HomeserverTestCase):
|
||||
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets_for_client_rest_resource,
|
||||
login.register_servlets,
|
||||
account.register_servlets,
|
||||
]
|
||||
|
||||
def make_homeserver(self, reactor, clock):
|
||||
hs = self.setup_test_homeserver()
|
||||
return hs
|
||||
|
||||
def test_deactivate_account(self):
|
||||
user_id = self.register_user("kermit", "test")
|
||||
tok = self.login("kermit", "test")
|
||||
|
||||
request_data = json.dumps({
|
||||
"auth": {
|
||||
"type": "m.login.password",
|
||||
"user": user_id,
|
||||
"password": "test",
|
||||
},
|
||||
"erase": False,
|
||||
})
|
||||
request, channel = self.make_request(
|
||||
"POST",
|
||||
"account/deactivate",
|
||||
request_data,
|
||||
access_token=tok,
|
||||
)
|
||||
self.render(request)
|
||||
self.assertEqual(request.code, 200)
|
||||
|
||||
store = self.hs.get_datastore()
|
||||
|
||||
# Check that the user has been marked as deactivated.
|
||||
self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id)))
|
||||
|
||||
# Check that this access token has been invalidated.
|
||||
request, channel = self.make_request("GET", "account/whoami")
|
||||
self.render(request)
|
||||
self.assertEqual(request.code, 401)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user