Compare commits
4 Commits
hhs-9
...
paul/schem
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01e83c9680 | ||
|
|
9705706a7f | ||
|
|
801a551da1 | ||
|
|
b8906b0ea8 |
@@ -1,13 +0,0 @@
|
||||
CI
|
||||
BUILDKITE
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_BRANCH
|
||||
BUILDKITE_BUILD_NUMBER
|
||||
BUILDKITE_JOB_ID
|
||||
BUILDKITE_BUILD_URL
|
||||
BUILDKITE_PROJECT_SLUG
|
||||
BUILDKITE_COMMIT
|
||||
BUILDKITE_PULL_REQUEST
|
||||
BUILDKITE_TAG
|
||||
CODECOV_TOKEN
|
||||
TRIAL_FLAGS
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.4
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:2.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:2.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.4
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.5
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.5
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:11
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,21 +0,0 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
postgres:
|
||||
image: postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
testenv:
|
||||
image: python:3.7
|
||||
depends_on:
|
||||
- postgres
|
||||
env_file: .env
|
||||
environment:
|
||||
SYNAPSE_POSTGRES_HOST: postgres
|
||||
SYNAPSE_POSTGRES_USER: postgres
|
||||
SYNAPSE_POSTGRES_PASSWORD: postgres
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ..:/app
|
||||
@@ -1,157 +0,0 @@
|
||||
env:
|
||||
CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
|
||||
|
||||
steps:
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e pep8"
|
||||
label: "\U0001F9F9 PEP-8"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e packaging"
|
||||
label: "\U0001F9F9 packaging"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e check_isort"
|
||||
label: "\U0001F9F9 isort"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "scripts-dev/check-newsfragment"
|
||||
label: ":newspaper: Newsfile"
|
||||
branches: "!master !develop !release-*"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
propagate-environment: true
|
||||
|
||||
- wait
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e check-sampleconfig"
|
||||
label: "\U0001F9F9 check-sample-config"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py27,codecov"
|
||||
label: ":python: 2.7 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:2.7"
|
||||
propagate-environment: true
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py35,codecov"
|
||||
label: ":python: 3.5 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.5"
|
||||
propagate-environment: true
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py36,codecov"
|
||||
label: ":python: 3.6 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.6"
|
||||
propagate-environment: true
|
||||
|
||||
- command:
|
||||
- "python -m pip install tox"
|
||||
- "tox -e py37,codecov"
|
||||
label: ":python: 3.7 / SQLite"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 2"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "python:3.7"
|
||||
propagate-environment: true
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.4"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg94.yaml
|
||||
|
||||
- label: ":python: 2.7 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py27.pg95.yaml
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.4"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg94.yaml
|
||||
|
||||
- label: ":python: 3.5 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py35.pg95.yaml
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 9.5"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py37.pg95.yaml
|
||||
|
||||
- label: ":python: 3.7 / :postgres: 11"
|
||||
env:
|
||||
TRIAL_FLAGS: "-j 4"
|
||||
command:
|
||||
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
|
||||
plugins:
|
||||
- docker-compose#v2.1.0:
|
||||
run: testenv
|
||||
config:
|
||||
- .buildkite/docker-compose.py37.pg11.yaml
|
||||
@@ -1,170 +0,0 @@
|
||||
version: 2
|
||||
jobs:
|
||||
dockerhubuploadrelease:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
|
||||
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||
dockerhubuploadlatest:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 .
|
||||
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||
- run: docker push matrixdotorg/synapse:latest
|
||||
- run: docker push matrixdotorg/synapse:latest-py2
|
||||
- run: docker push matrixdotorg/synapse:latest-py3
|
||||
sytestpy2:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2postgres:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2merged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy2postgresmerged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy2
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
|
||||
sytestpy3:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3postgres:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3merged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
sytestpy3postgresmerged:
|
||||
docker:
|
||||
- image: matrixdotorg/sytest-synapsepy3
|
||||
working_directory: /src
|
||||
steps:
|
||||
- checkout
|
||||
- run: bash .circleci/merge_base_branch.sh
|
||||
- run: POSTGRES=1 /synapse_sytest.sh
|
||||
- store_artifacts:
|
||||
path: /logs
|
||||
destination: logs
|
||||
- store_test_results:
|
||||
path: /logs
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- sytestpy2:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy2postgres:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy3:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy3postgres:
|
||||
filters:
|
||||
branches:
|
||||
only: /develop|master|release-.*/
|
||||
- sytestpy2merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy2postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy3merged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- sytestpy3postgresmerged:
|
||||
filters:
|
||||
branches:
|
||||
ignore: /develop|master|release-.*/
|
||||
- dockerhubuploadrelease:
|
||||
filters:
|
||||
tags:
|
||||
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||
branches:
|
||||
ignore: /.*/
|
||||
- dockerhubuploadlatest:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
|
||||
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
|
||||
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
|
||||
source $BASH_ENV
|
||||
|
||||
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
||||
then
|
||||
echo "Can't figure out what the PR number is! Assuming merge target is develop."
|
||||
|
||||
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
||||
# can probably assume it's based on it and will be merged into it.
|
||||
GITBASE="develop"
|
||||
else
|
||||
# Get the reference, using the GitHub API
|
||||
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||
fi
|
||||
|
||||
# Show what we are before
|
||||
git --no-pager show -s
|
||||
|
||||
# Set up username so it can do a merge
|
||||
git config --global user.email bot@matrix.org
|
||||
git config --global user.name "A robot"
|
||||
|
||||
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
||||
git fetch -u origin $GITBASE
|
||||
git merge --no-edit origin/$GITBASE
|
||||
|
||||
# Show what we are after.
|
||||
git --no-pager show -s
|
||||
15
.codecov.yml
15
.codecov.yml
@@ -1,15 +0,0 @@
|
||||
comment:
|
||||
layout: "diff"
|
||||
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 0 # Target % coverage, can be auto. Turned off for now
|
||||
threshold: null
|
||||
base: auto
|
||||
patch:
|
||||
default:
|
||||
target: 0
|
||||
threshold: null
|
||||
base: auto
|
||||
@@ -1,7 +0,0 @@
|
||||
[run]
|
||||
branch = True
|
||||
parallel = True
|
||||
include = synapse/*
|
||||
|
||||
[report]
|
||||
precision = 2
|
||||
@@ -1,9 +0,0 @@
|
||||
Dockerfile
|
||||
.travis.yml
|
||||
.gitignore
|
||||
demo/etc
|
||||
tox.ini
|
||||
.git/*
|
||||
.tox/*
|
||||
debian/matrix-synapse/
|
||||
debian/matrix-synapse-*/
|
||||
@@ -1,9 +0,0 @@
|
||||
# EditorConfig https://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# 4 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
66
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
66
.github/ISSUE_TEMPLATE/BUG_REPORT.md
vendored
@@ -1,66 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
|
||||
**IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**:
|
||||
You will likely get better support more quickly if you ask in ** #matrix:matrix.org ** ;)
|
||||
|
||||
|
||||
This is a bug report template. By following the instructions below and
|
||||
filling out the sections with your information, you will help the us to get all
|
||||
the necessary data to fix your issue.
|
||||
|
||||
You can also preview your report before submitting it. You may remove sections
|
||||
that aren't relevant to your particular case.
|
||||
|
||||
Text between <!-- and --> marks will be invisible in the report.
|
||||
|
||||
-->
|
||||
|
||||
### Description
|
||||
|
||||
<!-- Describe here the problem that you are experiencing -->
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
- list the steps
|
||||
- that reproduce the bug
|
||||
- using hyphens as bullet points
|
||||
|
||||
<!--
|
||||
Describe how what happens differs from what you expected.
|
||||
|
||||
If you can identify any relevant log snippets from _homeserver.log_, please include
|
||||
those (please be careful to remove any personal or private data). Please surround them with
|
||||
``` (three backticks, on a line on their own), so that they are formatted legibly.
|
||||
-->
|
||||
|
||||
### Version information
|
||||
|
||||
<!-- IMPORTANT: please answer the following questions, to help us narrow down the problem -->
|
||||
|
||||
<!-- Was this issue identified on matrix.org or another homeserver? -->
|
||||
- **Homeserver**:
|
||||
|
||||
If not matrix.org:
|
||||
|
||||
<!--
|
||||
What version of Synapse is running?
|
||||
You can find the Synapse version by inspecting the server headers (replace matrix.org with
|
||||
your own homeserver domain):
|
||||
$ curl -v https://matrix.org/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
-->
|
||||
- **Version**:
|
||||
|
||||
- **Install method**:
|
||||
<!-- examples: package manager/git clone/pip -->
|
||||
|
||||
- **Platform**:
|
||||
<!--
|
||||
Tell us about the environment in which your homeserver is operating
|
||||
distro, hardware, if it's running in a vm/container, etc.
|
||||
-->
|
||||
9
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
vendored
9
.github/ISSUE_TEMPLATE/FEATURE_REQUEST.md
vendored
@@ -1,9 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Description:**
|
||||
|
||||
<!-- Describe here the feature you are requesting. -->
|
||||
9
.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
vendored
9
.github/ISSUE_TEMPLATE/SUPPORT_REQUEST.md
vendored
@@ -1,9 +0,0 @@
|
||||
---
|
||||
name: Support request
|
||||
about: I need support for Synapse
|
||||
|
||||
---
|
||||
|
||||
# Please ask for support in [**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org)
|
||||
|
||||
## Don't file an issue as a support request.
|
||||
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +0,0 @@
|
||||
### Pull Request Checklist
|
||||
|
||||
<!-- Please read CONTRIBUTING.rst before submitting your pull request -->
|
||||
|
||||
* [ ] Pull request is based on the develop branch
|
||||
* [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
|
||||
* [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
|
||||
3
.github/SUPPORT.md
vendored
3
.github/SUPPORT.md
vendored
@@ -1,3 +0,0 @@
|
||||
[**#matrix:matrix.org**](https://matrix.to/#/#matrix:matrix.org) is the official support room for Matrix, and can be accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html
|
||||
|
||||
It can also be access via IRC bridge at irc://irc.freenode.net/matrix or on the web here: https://webchat.freenode.net/?channels=matrix
|
||||
50
.gitignore
vendored
50
.gitignore
vendored
@@ -1,36 +1,26 @@
|
||||
# filename patterns
|
||||
*~
|
||||
.*.swp
|
||||
.#*
|
||||
*.deb
|
||||
*.egg
|
||||
*.egg-info
|
||||
*.lock
|
||||
*.pyc
|
||||
*.tac
|
||||
.*.swp
|
||||
|
||||
_trial_temp/
|
||||
_trial_temp*/
|
||||
logs/
|
||||
dbs/
|
||||
*.egg
|
||||
dist/
|
||||
docs/build/
|
||||
*.egg-info
|
||||
|
||||
# stuff that is likely to exist when you run a server locally
|
||||
/*.signing.key
|
||||
/*.tls.crt
|
||||
/*.tls.key
|
||||
/uploads
|
||||
/media_store/
|
||||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
|
||||
# IDEs
|
||||
/.idea/
|
||||
/.ropeproject/
|
||||
/.vscode/
|
||||
.coverage
|
||||
htmlcov
|
||||
|
||||
# build products
|
||||
/.coverage*
|
||||
!/.coveragerc
|
||||
/.tox
|
||||
/build/
|
||||
/coverage.*
|
||||
/dist/
|
||||
/docs/build/
|
||||
/htmlcov
|
||||
/pip-wheel-metadata/
|
||||
demo/*.db
|
||||
demo/*.log
|
||||
demo/*.pid
|
||||
|
||||
graph/*.svg
|
||||
graph/*.png
|
||||
graph/*.dot
|
||||
|
||||
uploads
|
||||
|
||||
71
AUTHORS.rst
71
AUTHORS.rst
@@ -1,71 +0,0 @@
|
||||
Erik Johnston <erik at matrix.org>
|
||||
* HS core
|
||||
* Federation API impl
|
||||
|
||||
Mark Haines <mark at matrix.org>
|
||||
* HS core
|
||||
* Crypto
|
||||
* Content repository
|
||||
* CS v2 API impl
|
||||
|
||||
Kegan Dougal <kegan at matrix.org>
|
||||
* HS core
|
||||
* CS v1 API impl
|
||||
* AS API impl
|
||||
|
||||
Paul "LeoNerd" Evans <paul at matrix.org>
|
||||
* HS core
|
||||
* Presence
|
||||
* Typing Notifications
|
||||
* Performance metrics and caching layer
|
||||
|
||||
Dave Baker <dave at matrix.org>
|
||||
* Push notifications
|
||||
* Auth CS v2 impl
|
||||
|
||||
Matthew Hodgson <matthew at matrix.org>
|
||||
* General doc & housekeeping
|
||||
* Vertobot/vertobridge matrix<->verto PoC
|
||||
|
||||
Emmanuel Rohee <manu at matrix.org>
|
||||
* Supporting iOS clients (testability and fallback registration)
|
||||
|
||||
Turned to Dust <dwinslow86 at gmail.com>
|
||||
* ArchLinux installation instructions
|
||||
|
||||
Brabo <brabo at riseup.net>
|
||||
* Installation instruction fixes
|
||||
|
||||
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||
|
||||
Eric Myhre <hash at exultant.us>
|
||||
* Fix bug where ``media_store_path`` config option was ignored by v0 content
|
||||
repository API.
|
||||
|
||||
Muthu Subramanian <muthu.subramanian.karunanidhi at ericsson.com>
|
||||
* Add SAML2 support for registration and login.
|
||||
|
||||
Steven Hammerton <steven.hammerton at openmarket.com>
|
||||
* Add CAS support for registration and login.
|
||||
|
||||
Mads Robin Christensen <mads at v42 dot dk>
|
||||
* CentOS 7 installation instructions.
|
||||
|
||||
Florent Violleau <floviolleau at gmail dot com>
|
||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||
|
||||
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||
* Add JWT support for registration and login
|
||||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
Pierre Jaury <pierre at jaury.eu>
|
||||
* Docker packaging
|
||||
|
||||
Serban Constantin <serban.constantin at gmail dot com>
|
||||
* Small bug fix
|
||||
|
||||
Jason Robinson <jasonr at matrix.org>
|
||||
* Minor fixes
|
||||
3379
CHANGES.md
3379
CHANGES.md
File diff suppressed because it is too large
Load Diff
85
CHANGES.rst
Normal file
85
CHANGES.rst
Normal file
@@ -0,0 +1,85 @@
|
||||
Changes in synapse 0.2.0 (2014-09-02)
|
||||
=====================================
|
||||
This update changes many configuration options, updates the
|
||||
database schema and mandates SSL for server-server connections.
|
||||
|
||||
Homeserver:
|
||||
* Require SSL for server-server connections.
|
||||
* Add SSL listener for client-server connections.
|
||||
* Add ability to use config files.
|
||||
* Add support for kicking/banning and power levels.
|
||||
* Allow setting of room names and topics on creation.
|
||||
* Change presence to include last seen time of the user.
|
||||
* Change url path prefix to /_matrix/...
|
||||
* Bug fixes to presence.
|
||||
|
||||
Webclient:
|
||||
* Reskin the CSS for registration and login.
|
||||
* Various improvements to rooms CSS.
|
||||
* Support changes in client-server API.
|
||||
* Bug fixes to VOIP UI.
|
||||
* Various bug fixes to handling of changes to room member list.
|
||||
|
||||
Changes in synapse 0.1.2 (2014-08-29)
|
||||
=====================================
|
||||
|
||||
Webclient:
|
||||
* Add basic call state UI for VoIP calls.
|
||||
|
||||
Changes in synapse 0.1.1 (2014-08-29)
|
||||
=====================================
|
||||
|
||||
Homeserver:
|
||||
* Fix bug that caused the event stream to not notify some clients about
|
||||
changes.
|
||||
|
||||
Changes in synapse 0.1.0 (2014-08-29)
|
||||
=====================================
|
||||
Presence has been reenabled in this release.
|
||||
|
||||
Homeserver:
|
||||
* Update client to server API, including:
|
||||
- Use a more consistent url scheme.
|
||||
- Provide more useful information in the initial sync api.
|
||||
* Change the presence handling to be much more efficient.
|
||||
* Change the presence server to server API to not require explicit polling of
|
||||
all users who share a room with a user.
|
||||
* Fix races in the event streaming logic.
|
||||
|
||||
Webclient:
|
||||
* Update to use new client to server API.
|
||||
* Add basic VOIP support.
|
||||
* Add idle timers that change your status to away.
|
||||
* Add recent rooms column when viewing a room.
|
||||
* Various network efficiency improvements.
|
||||
* Add basic mobile browser support.
|
||||
* Add a settings page.
|
||||
|
||||
Changes in synapse 0.0.1 (2014-08-22)
|
||||
=====================================
|
||||
Presence has been disabled in this release due to a bug that caused the
|
||||
homeserver to spam other remote homeservers.
|
||||
|
||||
Homeserver:
|
||||
* Completely change the database schema to support generic event types.
|
||||
* Improve presence reliability.
|
||||
* Improve reliability of joining remote rooms.
|
||||
* Fix bug where room join events were duplicated.
|
||||
* Improve initial sync API to return more information to the client.
|
||||
* Stop generating fake messages for room membership events.
|
||||
|
||||
Webclient:
|
||||
* Add tab completion of names.
|
||||
* Add ability to upload and send images.
|
||||
* Add profile pages.
|
||||
* Improve CSS layout of room.
|
||||
* Disambiguate identical display names.
|
||||
* Don't get remote users display names and avatars individually.
|
||||
* Use the new initial sync API to reduce number of round trips to the homeserver.
|
||||
* Change url scheme to use room aliases instead of room ids where known.
|
||||
* Increase longpoll timeout.
|
||||
|
||||
Changes in synapse 0.0.0 (2014-08-13)
|
||||
=====================================
|
||||
|
||||
* Initial alpha release
|
||||
192
CONTRIBUTING.rst
192
CONTRIBUTING.rst
@@ -1,192 +0,0 @@
|
||||
Contributing code to Matrix
|
||||
===========================
|
||||
|
||||
Everyone is welcome to contribute code to Matrix
|
||||
(https://github.com/matrix-org), provided that they are willing to license
|
||||
their contributions under the same license as the project itself. We follow a
|
||||
simple 'inbound=outbound' model for contributions: the act of submitting an
|
||||
'inbound' contribution means that the contributor agrees to license the code
|
||||
under the same terms as the project's overall 'outbound' license - in our
|
||||
case, this is almost always Apache Software License v2 (see LICENSE).
|
||||
|
||||
How to contribute
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The preferred and easiest way to contribute changes to Matrix is to fork the
|
||||
relevant project on github, and then create a pull request to ask us to pull
|
||||
your changes into our repo
|
||||
(https://help.github.com/articles/using-pull-requests/)
|
||||
|
||||
**The single biggest thing you need to know is: please base your changes on
|
||||
the develop branch - /not/ master.**
|
||||
|
||||
We use the master branch to track the most recent release, so that folks who
|
||||
blindly clone the repo and automatically check out master get something that
|
||||
works. Develop is the unstable branch where all the development actually
|
||||
happens: the workflow is that contributors should fork the develop branch to
|
||||
make a 'feature' branch for a particular contribution, and then make a pull
|
||||
request to merge this back into the matrix.org 'official' develop branch. We
|
||||
use github's pull request workflow to review the contribution, and either ask
|
||||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
||||
pull requests to synapse get automatically tested by Travis and CircleCI.
|
||||
If your change breaks the build, this will be shown in GitHub, so please
|
||||
keep an eye on the pull request for feedback.
|
||||
|
||||
To run unit tests in a local development environment, you can use:
|
||||
|
||||
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
|
||||
SQLite-backed Synapse on Python 2.7.
|
||||
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
|
||||
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
|
||||
(requires a running local PostgreSQL with access to create databases).
|
||||
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
|
||||
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||
set up PostgreSQL yourself.
|
||||
|
||||
Docker images are available for running the integration tests (SyTest) locally,
|
||||
see the `documentation in the SyTest repo
|
||||
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
|
||||
information.
|
||||
|
||||
Code style
|
||||
~~~~~~~~~~
|
||||
|
||||
All Matrix projects have a well-defined code-style - and sometimes we've even
|
||||
got as far as documenting it... For instance, synapse's code style doc lives
|
||||
at https://github.com/matrix-org/synapse/tree/master/docs/code_style.rst.
|
||||
|
||||
Please ensure your changes match the cosmetic style of the existing project,
|
||||
and **never** mix cosmetic and functional changes in the same commit, as it
|
||||
makes it horribly hard to review otherwise.
|
||||
|
||||
Changelog
|
||||
~~~~~~~~~
|
||||
|
||||
All changes, even minor ones, need a corresponding changelog / newsfragment
|
||||
entry. These are managed by Towncrier
|
||||
(https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes).
|
||||
|
||||
The content of the file is your changelog entry, which can contain Markdown
|
||||
formatting. The entry should end with a full stop ('.') for consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
||||
For example, a fix in PR #1234 would have its changelog entry in
|
||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||
Florbs are now validated when recieved over federation. Contributed by Jane
|
||||
Matrix.".
|
||||
|
||||
Debian changelog
|
||||
----------------
|
||||
|
||||
Changes which affect the debian packaging files (in ``debian``) are an
|
||||
exception.
|
||||
|
||||
In this case, you will need to add an entry to the debian changelog for the
|
||||
next release. For this, run the following command::
|
||||
|
||||
dch
|
||||
|
||||
This will make up a new version number (if there isn't already an unreleased
|
||||
version in flight), and open an editor where you can add a new changelog entry.
|
||||
(Our release process will ensure that the version number and maintainer name is
|
||||
corrected for the release.)
|
||||
|
||||
If your change affects both the debian packaging *and* files outside the debian
|
||||
directory, you will need both a regular newsfragment *and* an entry in the
|
||||
debian changelog. (Though typically such changes should be submitted as two
|
||||
separate pull requests.)
|
||||
|
||||
Attribution
|
||||
~~~~~~~~~~~
|
||||
|
||||
Everyone who contributes anything to Matrix is welcome to be listed in the
|
||||
AUTHORS.rst file for the project in question. Please feel free to include a
|
||||
change to AUTHORS.rst in your pull request to list yourself and a short
|
||||
description of the area(s) you've worked on. Also, we sometimes have swag to
|
||||
give away to contributors - if you feel that Matrix-branded apparel is missing
|
||||
from your life, please mail us your shipping address to matrix at matrix.org and
|
||||
we'll try to fix it :)
|
||||
|
||||
Sign off
|
||||
~~~~~~~~
|
||||
|
||||
In order to have a concrete record that your contribution is intentional
|
||||
and you agree to license it under the same terms as the project's license, we've adopted the
|
||||
same lightweight approach that the Linux Kernel
|
||||
`submitting patches process <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`_, Docker
|
||||
(https://github.com/docker/docker/blob/master/CONTRIBUTING.md), and many other
|
||||
projects use: the DCO (Developer Certificate of Origin:
|
||||
http://developercertificate.org/). This is a simple declaration that you wrote
|
||||
the contribution or otherwise have the right to contribute it to Matrix::
|
||||
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
||||
If you agree to this for your contribution, then all that's needed is to
|
||||
include the line in your commit or pull request comment::
|
||||
|
||||
Signed-off-by: Your Name <your@email.example.org>
|
||||
|
||||
We accept contributions under a legally identifiable name, such as
|
||||
your name on government documentation or common-law names (names
|
||||
claimed by legitimate usage or repute). Unfortunately, we cannot
|
||||
accept anonymous contributions at this time.
|
||||
|
||||
Git allows you to add this signoff automatically when using the ``-s``
|
||||
flag to ``git commit``, which uses the name and email set in your
|
||||
``user.name`` and ``user.email`` git configs.
|
||||
|
||||
Conclusion
|
||||
~~~~~~~~~~
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect
|
||||
given our obsession with open communication. If we're going to successfully
|
||||
matrix together all the fragmented communication technologies out there we are
|
||||
reliant on contributions and collaboration from the community to do so. So
|
||||
please get involved - and we hope you have as much fun hacking on Matrix as we
|
||||
do!
|
||||
426
INSTALL.md
426
INSTALL.md
@@ -1,426 +0,0 @@
|
||||
* [Installing Synapse](#installing-synapse)
|
||||
* [Installing from source](#installing-from-source)
|
||||
* [Platform-Specific Instructions](#platform-specific-instructions)
|
||||
* [Troubleshooting Installation](#troubleshooting-installation)
|
||||
* [Prebuilt packages](#prebuilt-packages)
|
||||
* [Setting up Synapse](#setting-up-synapse)
|
||||
* [TLS certificates](#tls-certificates)
|
||||
* [Registering a user](#registering-a-user)
|
||||
* [Setting up a TURN server](#setting-up-a-turn-server)
|
||||
* [URL previews](#url-previews)
|
||||
|
||||
# Installing Synapse
|
||||
|
||||
## Installing from source
|
||||
|
||||
(Prebuilt packages are available for some platforms - see [Prebuilt packages](#prebuilt-packages).)
|
||||
|
||||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.5, 3.6, 3.7, or 2.7
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
Synapse is written in Python but some of the libraries it uses are written in
|
||||
C. So before we can install Synapse itself we need a working C compiler and the
|
||||
header files for Python C extensions. See [Platform-Specific
|
||||
Instructions](#platform-specific-instructions) for information on installing
|
||||
these on various platforms.
|
||||
|
||||
To install the Synapse homeserver run:
|
||||
|
||||
```
|
||||
mkdir -p ~/synapse
|
||||
virtualenv -p python3 ~/synapse/env
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install --upgrade pip
|
||||
pip install --upgrade setuptools
|
||||
pip install matrix-synapse[all]
|
||||
```
|
||||
|
||||
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
|
||||
and install it, along with the python libraries it uses, into a virtual environment
|
||||
under `~/synapse/env`. Feel free to pick a different directory if you
|
||||
prefer.
|
||||
|
||||
This Synapse installation can then be later upgraded by using pip again with the
|
||||
update flag:
|
||||
|
||||
```
|
||||
source ~/synapse/env/bin/activate
|
||||
pip install -U matrix-synapse[all]
|
||||
```
|
||||
|
||||
Before you can start Synapse, you will need to generate a configuration
|
||||
file. To do this, run (in your virtualenv, as before)::
|
||||
|
||||
```
|
||||
cd ~/synapse
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name my.domain.name \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config \
|
||||
--report-stats=[yes|no]
|
||||
```
|
||||
|
||||
... substituting an appropriate value for `--server-name`. The server name
|
||||
determines the "domain" part of user-ids for users on your server: these will
|
||||
all be of the format `@user:my.domain.name`. It also determines how other
|
||||
matrix servers will reach yours for Federation. For a test configuration,
|
||||
set this to the hostname of your server. For a more production-ready setup, you
|
||||
will probably want to specify your domain (`example.com`) rather than a
|
||||
matrix-specific hostname here (in the same way that your email address is
|
||||
probably `user@example.com` rather than `user@email.example.com`) - but
|
||||
doing so may require more advanced setup. - see [Setting up Federation](README.rst#setting-up-federation). Beware that the server name cannot be changed later.
|
||||
|
||||
This command will generate you a config file that you can then customise, but it will
|
||||
also generate a set of keys for you. These keys will allow your Home Server to
|
||||
identify itself to other Home Servers, so don't lose or delete them. It would be
|
||||
wise to back them up somewhere safe. (If, for whatever reason, you do need to
|
||||
change your Home Server's keys, you may find that other Home Servers have the
|
||||
old key cached. If you update the signing key, you should change the name of the
|
||||
key in the `<server name>.signing.key` file (the second word) to something
|
||||
different. See the
|
||||
[spec](https://matrix.org/docs/spec/server_server/latest.html#retrieving-server-keys)
|
||||
for more information on key management.)
|
||||
|
||||
You will need to give Synapse a TLS certficate before it will start - see [TLS
|
||||
certificates](#tls-certificates).
|
||||
|
||||
To actually run your new homeserver, pick a working directory for Synapse to
|
||||
run (e.g. `~/synapse`), and::
|
||||
|
||||
cd ~/synapse
|
||||
source env/bin/activate
|
||||
synctl start
|
||||
|
||||
### Platform-Specific Instructions
|
||||
|
||||
#### Debian/Ubuntu/Raspbian
|
||||
|
||||
Installing prerequisites on Ubuntu or Debian:
|
||||
|
||||
```
|
||||
sudo apt-get install build-essential python3-dev libffi-dev \
|
||||
python-pip python-setuptools sqlite3 \
|
||||
libssl-dev python-virtualenv libjpeg-dev libxslt1-dev
|
||||
```
|
||||
|
||||
#### ArchLinux
|
||||
|
||||
Installing prerequisites on ArchLinux:
|
||||
|
||||
```
|
||||
sudo pacman -S base-devel python python-pip \
|
||||
python-setuptools python-virtualenv sqlite3
|
||||
```
|
||||
|
||||
#### CentOS/Fedora
|
||||
|
||||
Installing prerequisites on CentOS 7 or Fedora 25:
|
||||
|
||||
```
|
||||
sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \
|
||||
lcms2-devel libwebp-devel tcl-devel tk-devel redhat-rpm-config \
|
||||
python-virtualenv libffi-devel openssl-devel
|
||||
sudo yum groupinstall "Development Tools"
|
||||
```
|
||||
|
||||
#### Mac OS X
|
||||
|
||||
Installing prerequisites on Mac OS X:
|
||||
|
||||
```
|
||||
xcode-select --install
|
||||
sudo easy_install pip
|
||||
sudo pip install virtualenv
|
||||
brew install pkg-config libffi
|
||||
```
|
||||
|
||||
#### OpenSUSE
|
||||
|
||||
Installing prerequisites on openSUSE:
|
||||
|
||||
```
|
||||
sudo zypper in -t pattern devel_basis
|
||||
sudo zypper in python-pip python-setuptools sqlite3 python-virtualenv \
|
||||
python-devel libffi-devel libopenssl-devel libjpeg62-devel
|
||||
```
|
||||
|
||||
#### OpenBSD
|
||||
|
||||
Installing prerequisites on OpenBSD:
|
||||
|
||||
```
|
||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||
libxslt jpeg
|
||||
```
|
||||
|
||||
There is currently no port for OpenBSD. Additionally, OpenBSD's security
|
||||
settings require a slightly more difficult installation process.
|
||||
|
||||
XXX: I suspect this is out of date.
|
||||
|
||||
1. Create a new directory in `/usr/local` called `_synapse`. Also, create a
|
||||
new user called `_synapse` and set that directory as the new user's home.
|
||||
This is required because, by default, OpenBSD only allows binaries which need
|
||||
write and execute permissions on the same memory space to be run from
|
||||
`/usr/local`.
|
||||
2. `su` to the new `_synapse` user and change to their home directory.
|
||||
3. Create a new virtualenv: `virtualenv -p python2.7 ~/.synapse`
|
||||
4. Source the virtualenv configuration located at
|
||||
`/usr/local/_synapse/.synapse/bin/activate`. This is done in `ksh` by
|
||||
using the `.` command, rather than `bash`'s `source`.
|
||||
5. Optionally, use `pip` to install `lxml`, which Synapse needs to parse
|
||||
webpages for their titles.
|
||||
6. Use `pip` to install this repository: `pip install matrix-synapse`
|
||||
7. Optionally, change `_synapse`'s shell to `/bin/false` to reduce the
|
||||
chance of a compromised Synapse server being used to take over your box.
|
||||
|
||||
After this, you may proceed with the rest of the install directions.
|
||||
|
||||
#### Windows
|
||||
|
||||
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||
Debian, Fedora, or source installation methods. More information about WSL can
|
||||
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
|
||||
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
|
||||
for Windows Server.
|
||||
|
||||
### Troubleshooting Installation
|
||||
|
||||
XXX a bunch of this is no longer relevant.
|
||||
|
||||
Synapse requires pip 8 or later, so if your OS provides too old a version you
|
||||
may need to manually upgrade it::
|
||||
|
||||
sudo pip install --upgrade pip
|
||||
|
||||
Installing may fail with `Could not find any downloads that satisfy the requirement pymacaroons-pynacl (from matrix-synapse==0.12.0)`.
|
||||
You can fix this by manually upgrading pip and virtualenv::
|
||||
|
||||
sudo pip install --upgrade virtualenv
|
||||
|
||||
You can next rerun `virtualenv -p python3 synapse` to update the virtual env.
|
||||
|
||||
Installing may fail during installing virtualenv with `InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.`
|
||||
You can fix this by manually installing ndg-httpsclient::
|
||||
|
||||
pip install --upgrade ndg-httpsclient
|
||||
|
||||
Installing may fail with `mock requires setuptools>=17.1. Aborting installation`.
|
||||
You can fix this by upgrading setuptools::
|
||||
|
||||
pip install --upgrade setuptools
|
||||
|
||||
If pip crashes mid-installation for reason (e.g. lost terminal), pip may
|
||||
refuse to run until you remove the temporary installation directory it
|
||||
created. To reset the installation::
|
||||
|
||||
rm -rf /tmp/pip_install_matrix
|
||||
|
||||
pip seems to leak *lots* of memory during installation. For instance, a Linux
|
||||
host with 512MB of RAM may run out of memory whilst installing Twisted. If this
|
||||
happens, you will have to individually install the dependencies which are
|
||||
failing, e.g.::
|
||||
|
||||
pip install twisted
|
||||
|
||||
## Prebuilt packages
|
||||
|
||||
As an alternative to installing from source, prebuilt packages are available
|
||||
for a number of platforms.
|
||||
|
||||
### Docker images and Ansible playbooks
|
||||
|
||||
There is an offical synapse image available at
|
||||
https://hub.docker.com/r/matrixdotorg/synapse which can be used with
|
||||
the docker-compose file available at [contrib/docker](contrib/docker). Further information on
|
||||
this including configuration options is available in the README on
|
||||
hub.docker.com.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||
Dockerfile to automate a synapse server in a single Docker image, at
|
||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Slavi Pantaleev has created an Ansible playbook,
|
||||
which installs the offical Docker image of Matrix Synapse
|
||||
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
|
||||
For more details, see
|
||||
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||
|
||||
|
||||
### Debian/Ubuntu
|
||||
|
||||
#### Matrix.org packages
|
||||
|
||||
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
|
||||
Synapse via https://matrix.org/packages/debian/. To use them:
|
||||
|
||||
```
|
||||
sudo apt install -y lsb-release curl apt-transport-https
|
||||
echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" |
|
||||
sudo tee /etc/apt/sources.list.d/matrix-org.list
|
||||
curl "https://matrix.org/packages/debian/repo-key.asc" |
|
||||
sudo apt-key add -
|
||||
sudo apt update
|
||||
sudo apt install matrix-synapse-py3
|
||||
```
|
||||
|
||||
#### Downstream Debian/Ubuntu packages
|
||||
|
||||
For `buster` and `sid`, Synapse is available in the Debian repositories and
|
||||
it should be possible to install it with simply:
|
||||
|
||||
```
|
||||
sudo apt install matrix-synapse
|
||||
```
|
||||
|
||||
There is also a version of `matrix-synapse` in `stretch-backports`. Please see
|
||||
the [Debian documentation on
|
||||
backports](https://backports.debian.org/Instructions/) for information on how
|
||||
to use them.
|
||||
|
||||
We do not recommend using the packages in downstream Ubuntu at this time, as
|
||||
they are old and suffer from known security vulnerabilities.
|
||||
|
||||
### Fedora
|
||||
|
||||
Synapse is in the Fedora repositories as `matrix-synapse`:
|
||||
|
||||
```
|
||||
sudo dnf install matrix-synapse
|
||||
```
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
### OpenSUSE
|
||||
|
||||
Synapse is in the OpenSUSE repositories as `matrix-synapse`:
|
||||
|
||||
```
|
||||
sudo zypper install matrix-synapse
|
||||
```
|
||||
|
||||
### SUSE Linux Enterprise Server
|
||||
|
||||
Unofficial package are built for SLES 15 in the openSUSE:Backports:SLE-15 repository at
|
||||
https://download.opensuse.org/repositories/openSUSE:/Backports:/SLE-15/standard/
|
||||
|
||||
### ArchLinux
|
||||
|
||||
The quickest way to get up and running with ArchLinux is probably with the community package
|
||||
https://www.archlinux.org/packages/community/any/matrix-synapse/, which should pull in most of
|
||||
the necessary dependencies.
|
||||
|
||||
pip may be outdated (6.0.7-1 and needs to be upgraded to 6.0.8-1 ):
|
||||
|
||||
```
|
||||
sudo pip install --upgrade pip
|
||||
```
|
||||
|
||||
If you encounter an error with lib bcrypt causing an Wrong ELF Class:
|
||||
ELFCLASS32 (x64 Systems), you may need to reinstall py-bcrypt to correctly
|
||||
compile it under the right architecture. (This should not be needed if
|
||||
installing under virtualenv):
|
||||
|
||||
```
|
||||
sudo pip uninstall py-bcrypt
|
||||
sudo pip install py-bcrypt
|
||||
```
|
||||
|
||||
### FreeBSD
|
||||
|
||||
Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from:
|
||||
|
||||
- Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean`
|
||||
- Packages: `pkg install py27-matrix-synapse`
|
||||
|
||||
|
||||
### NixOS
|
||||
|
||||
Robin Lambertz has packaged Synapse for NixOS at:
|
||||
https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix
|
||||
|
||||
# Setting up Synapse
|
||||
|
||||
Once you have installed synapse as above, you will need to configure it.
|
||||
|
||||
## TLS certificates
|
||||
|
||||
The default configuration exposes a single HTTP port: http://localhost:8008. It
|
||||
is suitable for local testing, but for any practical use, you will either need
|
||||
to enable a reverse proxy, or configure Synapse to expose an HTTPS port.
|
||||
|
||||
For information on using a reverse proxy, see
|
||||
[docs/reverse_proxy.rst](docs/reverse_proxy.rst).
|
||||
|
||||
To configure Synapse to expose an HTTPS port, you will need to edit
|
||||
`homeserver.yaml`, as follows:
|
||||
|
||||
* First, under the `listeners` section, uncomment the configuration for the
|
||||
TLS-enabled listener. (Remove the hash sign (`#`) at the start of
|
||||
each line). The relevant lines are like this:
|
||||
|
||||
```
|
||||
- port: 8448
|
||||
type: http
|
||||
tls: true
|
||||
resources:
|
||||
- names: [client, federation]
|
||||
```
|
||||
* You will also need to uncomment the `tls_certificate_path` and
|
||||
`tls_private_key_path` lines under the `TLS` section. You can either
|
||||
point these settings at an existing certificate and key, or you can
|
||||
enable Synapse's built-in ACME (Let's Encrypt) support. Instructions
|
||||
for having Synapse automatically provision and renew federation
|
||||
certificates through ACME can be found at [ACME.md](docs/ACME.md).
|
||||
|
||||
## Registering a user
|
||||
|
||||
You will need at least one user on your server in order to use a Matrix
|
||||
client. Users can be registered either via a Matrix client, or via a
|
||||
commandline script.
|
||||
|
||||
To get started, it is easiest to use the command line to register new
|
||||
users. This can be done as follows:
|
||||
|
||||
```
|
||||
$ source ~/synapse/env/bin/activate
|
||||
$ synctl start # if not already running
|
||||
$ register_new_matrix_user -c homeserver.yaml http://localhost:8008
|
||||
New user localpart: erikj
|
||||
Password:
|
||||
Confirm password:
|
||||
Make admin [no]:
|
||||
Success!
|
||||
```
|
||||
|
||||
This process uses a setting `registration_shared_secret` in
|
||||
`homeserver.yaml`, which is shared between Synapse itself and the
|
||||
`register_new_matrix_user` script. It doesn't matter what it is (a random
|
||||
value is generated by `--generate-config`), but it should be kept secret, as
|
||||
anyone with knowledge of it can register users on your server even if
|
||||
`enable_registration` is `false`.
|
||||
|
||||
## Setting up a TURN server
|
||||
|
||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||
a TURN server. See [docs/turn-howto.rst](docs/turn-howto.rst) for details.
|
||||
|
||||
## URL previews
|
||||
|
||||
Synapse includes support for previewing URLs, which is disabled by default. To
|
||||
turn it on you must enable the `url_preview_enabled: True` config parameter
|
||||
and explicitly specify the IP ranges that Synapse is not allowed to spider for
|
||||
previewing in the `url_preview_ip_range_blacklist` configuration parameter.
|
||||
This is critical from a security perspective to stop arbitrary Matrix users
|
||||
spidering 'internal' URLs on your network. At the very least we recommend that
|
||||
your loopback and RFC1918 IP addresses are blacklisted.
|
||||
|
||||
This also requires the optional lxml and netaddr python dependencies to be
|
||||
installed. This in turn requires the libxml2 library to be available - on
|
||||
Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for
|
||||
your OS.
|
||||
44
MANIFEST.in
44
MANIFEST.in
@@ -1,45 +1,3 @@
|
||||
include synctl
|
||||
include LICENSE
|
||||
include VERSION
|
||||
include *.rst
|
||||
include *.md
|
||||
include demo/README
|
||||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.py
|
||||
|
||||
recursive-include docs *
|
||||
recursive-include scripts *
|
||||
recursive-include scripts-dev *
|
||||
recursive-include synapse *.pyi
|
||||
recursive-include tests *.pem
|
||||
recursive-include tests *.py
|
||||
|
||||
recursive-include synapse/res *
|
||||
recursive-include synapse/static *.css
|
||||
recursive-include synapse/static *.gif
|
||||
recursive-include synapse/static *.html
|
||||
recursive-include synapse/static *.js
|
||||
|
||||
exclude Dockerfile
|
||||
exclude .dockerignore
|
||||
exclude test_postgresql.sh
|
||||
exclude .editorconfig
|
||||
|
||||
include pyproject.toml
|
||||
recursive-include changelog.d *
|
||||
|
||||
prune .github
|
||||
prune demo/etc
|
||||
prune docker
|
||||
prune .circleci
|
||||
prune .coveragerc
|
||||
prune debian
|
||||
prune .codecov.yml
|
||||
prune .buildkite
|
||||
|
||||
exclude jenkins*
|
||||
recursive-exclude jenkins *.sh
|
||||
recursive-include synapse/persistence/schema *.sql
|
||||
|
||||
35
MAP.rst
Normal file
35
MAP.rst
Normal file
@@ -0,0 +1,35 @@
|
||||
Directory Structure
|
||||
===================
|
||||
|
||||
Warning: this may be a bit stale...
|
||||
|
||||
::
|
||||
|
||||
.
|
||||
├── cmdclient Basic CLI python Matrix client
|
||||
├── demo Scripts for running standalone Matrix demos
|
||||
├── docs All doc, including the draft Matrix API spec
|
||||
│ ├── client-server The client-server Matrix API spec
|
||||
│ ├── model Domain-specific elements of the Matrix API spec
|
||||
│ ├── server-server The server-server model of the Matrix API spec
|
||||
│ └── sphinx The internal API doc of the Synapse homeserver
|
||||
├── experiments Early experiments of using Synapse's internal APIs
|
||||
├── graph Visualisation of Matrix's distributed message store
|
||||
├── synapse The reference Matrix homeserver implementation
|
||||
│ ├── api Common building blocks for the APIs
|
||||
│ │ ├── events Definition of state representation Events
|
||||
│ │ └── streams Definition of streamable Event objects
|
||||
│ ├── app The __main__ entry point for the homeserver
|
||||
│ ├── crypto The PKI client/server used for secure federation
|
||||
│ │ └── resource PKI helper objects (e.g. keys)
|
||||
│ ├── federation Server-server state replication logic
|
||||
│ ├── handlers The main business logic of the homeserver
|
||||
│ ├── http Wrappers around Twisted's HTTP server & client
|
||||
│ ├── rest Servlet-style RESTful API
|
||||
│ ├── storage Persistence subsystem (currently only sqlite3)
|
||||
│ │ └── schema sqlite persistence schema
|
||||
│ └── util Synapse-specific utilities
|
||||
├── tests Unit tests for the Synapse homeserver
|
||||
└── webclient Basic AngularJS Matrix web client
|
||||
|
||||
|
||||
602
README.rst
602
README.rst
@@ -1,414 +1,144 @@
|
||||
.. contents::
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
Matrix is an ambitious new ecosystem for open federated Instant Messaging and
|
||||
VoIP. The basics you need to know to get up and running are:
|
||||
|
||||
- Everything in Matrix happens in a room. Rooms are distributed and do not
|
||||
exist on any single server. Rooms can be located using convenience aliases
|
||||
like ``#matrix:matrix.org`` or ``#test:localhost:8448``.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a third party identifier
|
||||
(3PID): email address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
- Chatrooms are distributed and do not exist on any single server. Rooms
|
||||
can be found using names like ``#matrix:matrix.org`` or
|
||||
``#test:localhost:8008`` or they can be ephemeral.
|
||||
|
||||
- Matrix user IDs look like ``@matthew:matrix.org`` (although in the future
|
||||
you will normally refer to yourself and others using a 3PID: email
|
||||
address, phone number, etc rather than manipulating Matrix user IDs)
|
||||
|
||||
The overall architecture is::
|
||||
|
||||
client <----> homeserver <=====================> homeserver <----> client
|
||||
https://somewhere.org/_matrix https://elsewhere.net/_matrix
|
||||
https://matrix.org/_matrix https://mydomain.net/_matrix
|
||||
|
||||
``#matrix:matrix.org`` is the official support room for Matrix, and can be
|
||||
accessed by any client from https://matrix.org/docs/projects/try-matrix-now.html or
|
||||
via IRC bridge at irc://irc.freenode.net/matrix.
|
||||
Quick Start
|
||||
===========
|
||||
|
||||
Synapse is currently in rapid development, but as of version 0.5 we believe it
|
||||
is sufficiently stable to be run as an internet-facing service for real usage!
|
||||
To get up and running:
|
||||
|
||||
- To simply play with an **existing** homeserver you can
|
||||
just go straight to http://matrix.org/alpha.
|
||||
|
||||
- To run your own **private** homeserver on localhost:8008, install synapse
|
||||
with ``python setup.py develop --user`` and then run one with
|
||||
``python synapse/app/homeserver.py`` - you will find a webclient running
|
||||
at http://localhost:8008 (use a recent Chrome, Safari or Firefox for now,
|
||||
please...)
|
||||
|
||||
- To make the homeserver **public** and let it exchange messages with
|
||||
other homeservers and participate in the overall Matrix federation, open
|
||||
up port 8448 and run ``python synapse/app/homeserver.py --host
|
||||
machine.my.domain.name``. Then come join ``#matrix:matrix.org`` and
|
||||
say hi! :)
|
||||
|
||||
For more detailed setup instructions, please see further down this document.
|
||||
|
||||
|
||||
About Matrix
|
||||
============
|
||||
|
||||
Matrix specifies a set of pragmatic RESTful HTTP JSON APIs as an open standard,
|
||||
which handle:
|
||||
|
||||
- Creating and managing fully distributed chat rooms with no
|
||||
single points of control or failure
|
||||
- Eventually-consistent cryptographically secure synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption[1]
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
Facebook accounts to authenticate, identify and discover users on Matrix.
|
||||
- Placing 1:1 VoIP and Video calls
|
||||
- Creating and managing fully distributed chat rooms with no
|
||||
single points of control or failure
|
||||
- Eventually-consistent cryptographically secure[1] synchronisation of room
|
||||
state across a global open network of federated servers and services
|
||||
- Sending and receiving extensible messages in a room with (optional)
|
||||
end-to-end encryption[2]
|
||||
- Inviting, joining, leaving, kicking, banning room members
|
||||
- Managing user accounts (registration, login, logout)
|
||||
- Using 3rd Party IDs (3PIDs) such as email addresses, phone numbers,
|
||||
Facebook accounts to authenticate, identify and discover users on Matrix.
|
||||
- Placing 1:1 VoIP and Video calls
|
||||
|
||||
These APIs are intended to be implemented on a wide range of servers, services
|
||||
and clients, letting developers build messaging and VoIP functionality on top
|
||||
of the entirely open Matrix ecosystem rather than using closed or proprietary
|
||||
and clients, letting developers build messaging and VoIP functionality on top of
|
||||
the entirely open Matrix ecosystem rather than using closed or proprietary
|
||||
solutions. The hope is for Matrix to act as the building blocks for a new
|
||||
generation of fully open and interoperable messaging and VoIP apps for the
|
||||
internet.
|
||||
|
||||
Synapse is a reference "homeserver" implementation of Matrix from the core
|
||||
development team at matrix.org, written in Python/Twisted. It is intended to
|
||||
showcase the concept of Matrix and let folks see the spec in the context of a
|
||||
codebase and let you run your own homeserver and generally help bootstrap the
|
||||
ecosystem.
|
||||
development team at matrix.org, written in Python/Twisted for clarity and
|
||||
simplicity. It is intended to showcase the concept of Matrix and let folks see
|
||||
the spec in the context of a codebase and let you run your own homeserver and
|
||||
generally help bootstrap the ecosystem.
|
||||
|
||||
In Matrix, every user runs one or more Matrix clients, which connect through to
|
||||
a Matrix homeserver. The homeserver stores all their personal chat history and
|
||||
user account information - much as a mail client connects through to an
|
||||
IMAP/SMTP server. Just like email, you can either run your own Matrix
|
||||
homeserver and control and own your own communications and history or use one
|
||||
hosted by someone else (e.g. matrix.org) - there is no single point of control
|
||||
or mandatory service provider in Matrix, unlike WhatsApp, Facebook, Hangouts,
|
||||
etc.
|
||||
a Matrix homeserver which stores all their personal chat history and user
|
||||
account information - much as a mail client connects through to an IMAP/SMTP
|
||||
server. Just like email, you can either run your own Matrix homeserver and
|
||||
control and own your own communications and history or use one hosted by someone
|
||||
else (e.g. matrix.org) - there is no single point of control or mandatory
|
||||
service provider in Matrix, unlike WhatsApp, Facebook, Hangouts, etc.
|
||||
|
||||
We'd like to invite you to join #matrix:matrix.org (via
|
||||
https://matrix.org/docs/projects/try-matrix-now.html), run a homeserver, take a look
|
||||
at the `Matrix spec <https://matrix.org/docs/spec>`_, and experiment with the
|
||||
`APIs <https://matrix.org/docs/api>`_ and `Client SDKs
|
||||
<https://matrix.org/docs/projects/try-matrix-now.html#client-sdks>`_.
|
||||
Synapse ships with two basic demo Matrix clients: webclient (a basic group chat
|
||||
web client demo implemented in AngularJS) and cmdclient (a basic Python
|
||||
commandline utility which lets you easily see what the JSON APIs are up to).
|
||||
|
||||
Thanks for using Matrix!
|
||||
We'd like to invite you to take a look at the Matrix spec, try to run a
|
||||
homeserver, and join the existing Matrix chatrooms already out there, experiment
|
||||
with the APIs and the demo clients, and let us know your thoughts at
|
||||
https://github.com/matrix-org/synapse/issues or at matrix@matrix.org.
|
||||
|
||||
[1] End-to-end encryption is currently in beta: `blog post <https://matrix.org/blog/2016/11/21/matrixs-olm-end-to-end-encryption-security-assessment-released-and-implemented-cross-platform-on-riot-at-last>`_.
|
||||
Thanks for trying Matrix!
|
||||
|
||||
[1] Cryptographic signing of messages isn't turned on yet
|
||||
|
||||
Synapse Installation
|
||||
====================
|
||||
[2] End-to-end encryption is currently in development
|
||||
|
||||
For details on how to install synapse, see `<INSTALL.md>`_.
|
||||
|
||||
Homeserver Installation
|
||||
=======================
|
||||
|
||||
Connecting to Synapse from a client
|
||||
===================================
|
||||
First, the dependencies need to be installed. Start by installing
|
||||
'python2.7-dev' and the various tools of the compiler toolchain.
|
||||
N.B. synapse requires python 2.x where x >= 7
|
||||
|
||||
The easiest way to try out your new Synapse installation is by connecting to it
|
||||
from a web client.
|
||||
Installing prerequisites on ubuntu::
|
||||
|
||||
Unless you are running a test instance of Synapse on your local machine, in
|
||||
general, you will need to enable TLS support before you can successfully
|
||||
connect from a client: see `<INSTALL.md#tls-certificates>`_.
|
||||
$ sudo apt-get install build-essential python2.7-dev libffi-dev
|
||||
|
||||
An easy way to get started is to login or register via Riot at
|
||||
https://riot.im/app/#/login or https://riot.im/app/#/register respectively.
|
||||
You will need to change the server you are logging into from ``matrix.org``
|
||||
and instead specify a Homeserver URL of ``https://<server_name>:8448``
|
||||
(or just ``https://<server_name>`` if you are using a reverse proxy).
|
||||
(Leave the identity server as the default - see `Identity servers`_.)
|
||||
If you prefer to use another client, refer to our
|
||||
`client breakdown <https://matrix.org/docs/projects/clients-matrix>`_.
|
||||
Installing prerequisites on Mac OS X::
|
||||
|
||||
If all goes well you should at least be able to log in, create a room, and
|
||||
start sending messages.
|
||||
$ xcode-select --install
|
||||
|
||||
.. _`client-user-reg`:
|
||||
The homeserver has a number of external dependencies, that are easiest
|
||||
to install by making setup.py do so, in --user mode::
|
||||
|
||||
Registering a new user from a client
|
||||
------------------------------------
|
||||
$ python setup.py develop --user
|
||||
|
||||
You'll need a version of setuptools new enough to know about git, so you
|
||||
may need to also run:
|
||||
|
||||
By default, registration of new users via Matrix clients is disabled. To enable
|
||||
it, specify ``enable_registration: true`` in ``homeserver.yaml``. (It is then
|
||||
recommended to also set up CAPTCHA - see `<docs/CAPTCHA_SETUP.rst>`_.)
|
||||
$ sudo apt-get install python-pip
|
||||
$ sudo pip install --upgrade setuptools
|
||||
|
||||
If you don't have access to github, then you may need to install ``syutil``
|
||||
manually by checking it out and running ``python setup.py develop --user`` on it
|
||||
too.
|
||||
|
||||
If you get errors about ``sodium.h`` being missing, you may also need to
|
||||
manually install a newer PyNaCl via pip as setuptools installs an old one. Or
|
||||
you can check PyNaCl out of git directly (https://github.com/pyca/pynacl) and
|
||||
installing it. Installing PyNaCl using pip may also work (remember to remove any
|
||||
other versions installed by setuputils in, for example, ~/.local/lib).
|
||||
|
||||
Once ``enable_registration`` is set to ``true``, it is possible to register a
|
||||
user via `riot.im <https://riot.im/app/#/register>`_ or other Matrix clients.
|
||||
This will run a process of downloading and installing into your
|
||||
user's .local/lib directory all of the required dependencies that are
|
||||
missing.
|
||||
|
||||
Your new user name will be formed partly from the ``server_name`` (see
|
||||
`Configuring synapse`_), and partly from a localpart you specify when you
|
||||
create the account. Your name will take the form of::
|
||||
|
||||
@localpart:my.domain.name
|
||||
|
||||
(pronounced "at localpart on my dot domain dot name").
|
||||
|
||||
As when logging in, you will need to specify a "Custom server". Specify your
|
||||
desired ``localpart`` in the 'User name' box.
|
||||
|
||||
ACME setup
|
||||
==========
|
||||
|
||||
For details on having Synapse manage your federation TLS certificates
|
||||
automatically, please see `<docs/ACME.md>`_.
|
||||
|
||||
|
||||
Security Note
|
||||
=============
|
||||
|
||||
Matrix serves raw user generated data in some APIs - specifically the `content
|
||||
repository endpoints <https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid>`_.
|
||||
|
||||
Whilst we have tried to mitigate against possible XSS attacks (e.g.
|
||||
https://github.com/matrix-org/synapse/pull/1021) we recommend running
|
||||
matrix homeservers on a dedicated domain name, to limit any malicious user generated
|
||||
content served to web browsers a matrix API from being able to attack webapps hosted
|
||||
on the same domain. This is particularly true of sharing a matrix webclient and
|
||||
server on the same domain.
|
||||
|
||||
See https://github.com/vector-im/riot-web/issues/1977 and
|
||||
https://developer.github.com/changes/2014-04-25-user-content-security for more details.
|
||||
|
||||
Troubleshooting
|
||||
===============
|
||||
|
||||
Running out of File Handles
|
||||
---------------------------
|
||||
|
||||
If synapse runs out of filehandles, it typically fails badly - live-locking
|
||||
at 100% CPU, and/or failing to accept new TCP connections (blocking the
|
||||
connecting client). Matrix currently can legitimately use a lot of file handles,
|
||||
thanks to busy rooms like #matrix:matrix.org containing hundreds of participating
|
||||
servers. The first time a server talks in a room it will try to connect
|
||||
simultaneously to all participating servers, which could exhaust the available
|
||||
file descriptors between DNS queries & HTTPS sockets, especially if DNS is slow
|
||||
to respond. (We need to improve the routing algorithm used to be better than
|
||||
full mesh, but as of June 2017 this hasn't happened yet).
|
||||
|
||||
If you hit this failure mode, we recommend increasing the maximum number of
|
||||
open file handles to be at least 4096 (assuming a default of 1024 or 256).
|
||||
This is typically done by editing ``/etc/security/limits.conf``
|
||||
|
||||
Separately, Synapse may leak file handles if inbound HTTP requests get stuck
|
||||
during processing - e.g. blocked behind a lock or talking to a remote server etc.
|
||||
This is best diagnosed by matching up the 'Received request' and 'Processed request'
|
||||
log lines and looking for any 'Processed request' lines which take more than
|
||||
a few seconds to execute. Please let us know at #synapse:matrix.org if
|
||||
you see this failure mode so we can help debug it, however.
|
||||
|
||||
Help!! Synapse eats all my RAM!
|
||||
-------------------------------
|
||||
|
||||
Synapse's architecture is quite RAM hungry currently - we deliberately
|
||||
cache a lot of recent room data and metadata in RAM in order to speed up
|
||||
common requests. We'll improve this in future, but for now the easiest
|
||||
way to either reduce the RAM usage (at the risk of slowing things down)
|
||||
is to set the almost-undocumented ``SYNAPSE_CACHE_FACTOR`` environment
|
||||
variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||
in memory constrained enviroments, or increased if performance starts to
|
||||
degrade.
|
||||
|
||||
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||
improvement in overall amount, and especially in terms of giving back RAM
|
||||
to the OS. To use it, the library must simply be put in the LD_PRELOAD
|
||||
environment variable when launching Synapse. On Debian, this can be done
|
||||
by installing the ``libjemalloc1`` package and adding this line to
|
||||
``/etc/default/matrix-synapse``::
|
||||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||
|
||||
This can make a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
Upgrading an existing Synapse
|
||||
=============================
|
||||
|
||||
The instructions for upgrading synapse are in `UPGRADE.rst`_.
|
||||
Please check these instructions as upgrading may require extra steps for some
|
||||
versions of synapse.
|
||||
|
||||
.. _UPGRADE.rst: UPGRADE.rst
|
||||
|
||||
.. _federation:
|
||||
|
||||
Setting up Federation
|
||||
=====================
|
||||
|
||||
Federation is the process by which users on different servers can participate
|
||||
in the same room. For this to work, those other servers must be able to contact
|
||||
yours to send messages.
|
||||
|
||||
The ``server_name`` in your ``homeserver.yaml`` file determines the way that
|
||||
other servers will reach yours. By default, they will treat it as a hostname
|
||||
and try to connect to port 8448. This is easy to set up and will work with the
|
||||
default configuration, provided you set the ``server_name`` to match your
|
||||
machine's public DNS hostname, and give Synapse a TLS certificate which is
|
||||
valid for your ``server_name``.
|
||||
|
||||
For a more flexible configuration, you can set up a DNS SRV record. This allows
|
||||
you to run your server on a machine that might not have the same name as your
|
||||
domain name. For example, you might want to run your server at
|
||||
``synapse.example.com``, but have your Matrix user-ids look like
|
||||
``@user:example.com``. (A SRV record also allows you to change the port from
|
||||
the default 8448).
|
||||
|
||||
To use a SRV record, first create your SRV record and publish it in DNS. This
|
||||
should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
||||
<synapse.server.name>``. The DNS record should then look something like::
|
||||
|
||||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||
|
||||
Note that the server hostname cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||
its user-ids, by setting ``server_name``::
|
||||
|
||||
python -m synapse.app.homeserver \
|
||||
--server-name <yourdomain.com> \
|
||||
--config-path homeserver.yaml \
|
||||
--generate-config
|
||||
python -m synapse.app.homeserver --config-path homeserver.yaml
|
||||
|
||||
If you've already generated the config file, you need to edit the ``server_name``
|
||||
in your ``homeserver.yaml`` file. If you've already started Synapse and a
|
||||
database has been created, you will have to recreate the database.
|
||||
|
||||
If all goes well, you should be able to `connect to your server with a client`__,
|
||||
and then join a room via federation. (Try ``#matrix-dev:matrix.org`` as a first
|
||||
step. "Matrix HQ"'s sheer size and activity level tends to make even the
|
||||
largest boxes pause for thought.)
|
||||
|
||||
.. __: `Connecting to Synapse from a client`_
|
||||
|
||||
Troubleshooting
|
||||
---------------
|
||||
|
||||
You can use the `federation tester <https://matrix.org/federationtester>`_ to
|
||||
check if your homeserver is all set.
|
||||
|
||||
The typical failure mode with federation is that when you try to join a room,
|
||||
it is rejected with "401: Unauthorized". Generally this means that other
|
||||
servers in the room couldn't access yours. (Joining a room over federation is a
|
||||
complicated dance which requires connections in both directions).
|
||||
|
||||
So, things to check are:
|
||||
|
||||
* If you are not using a SRV record, check that your ``server_name`` (the part
|
||||
of your user-id after the ``:``) matches your hostname, and that port 8448 on
|
||||
that hostname is reachable from outside your network.
|
||||
* If you *are* using a SRV record, check that it matches your ``server_name``
|
||||
(it should be ``_matrix._tcp.<server_name>``), and that the port and hostname
|
||||
it specifies are reachable from outside your network.
|
||||
|
||||
Another common problem is that people on other servers can't join rooms that
|
||||
you invite them to. This can be caused by an incorrectly-configured reverse
|
||||
proxy: see `<docs/reverse_proxy.rst>`_ for instructions on how to correctly
|
||||
configure a reverse proxy.
|
||||
|
||||
Running a Demo Federation of Synapses
|
||||
-------------------------------------
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation, there is a script in the ``demo`` directory. This is mainly
|
||||
useful just for development purposes. See `<demo/README>`_.
|
||||
|
||||
|
||||
Using PostgreSQL
|
||||
================
|
||||
|
||||
As of Synapse 0.9, `PostgreSQL <https://www.postgresql.org>`_ is supported as an
|
||||
alternative to the `SQLite <https://sqlite.org/>`_ database that Synapse has
|
||||
traditionally used for convenience and simplicity.
|
||||
|
||||
The advantages of Postgres include:
|
||||
|
||||
* significant performance improvements due to the superior threading and
|
||||
caching model, smarter query optimiser
|
||||
* allowing the DB to be run on separate hardware
|
||||
* allowing basic active/backup high-availability with a "hot spare" synapse
|
||||
pointing at the same DB master, as well as enabling DB replication in
|
||||
synapse itself.
|
||||
|
||||
For information on how to install and use PostgreSQL, please see
|
||||
`docs/postgres.rst <docs/postgres.rst>`_.
|
||||
|
||||
.. _reverse-proxy:
|
||||
|
||||
Using a reverse proxy with Synapse
|
||||
==================================
|
||||
|
||||
It is recommended to put a reverse proxy such as
|
||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||
doing so is that it means that you can expose the default https port (443) to
|
||||
Matrix clients without needing to run Synapse with root privileges.
|
||||
|
||||
For information on configuring one, see `<docs/reverse_proxy.rst>`_.
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
Identity servers have the job of mapping email addresses and other 3rd Party
|
||||
IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs
|
||||
before creating that mapping.
|
||||
|
||||
**They are not where accounts or credentials are stored - these live on home
|
||||
servers. Identity Servers are just for mapping 3rd party IDs to matrix IDs.**
|
||||
|
||||
This process is very security-sensitive, as there is obvious risk of spam if it
|
||||
is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer
|
||||
term, we hope to create a decentralised system to manage it (`matrix-doc #712
|
||||
<https://github.com/matrix-org/matrix-doc/issues/712>`_), but in the meantime,
|
||||
the role of managing trusted identity in the Matrix ecosystem is farmed out to
|
||||
a cluster of known trusted ecosystem partners, who run 'Matrix Identity
|
||||
Servers' such as `Sydent <https://github.com/matrix-org/sydent>`_, whose role
|
||||
is purely to authenticate and track 3PID logins and publish end-user public
|
||||
keys.
|
||||
|
||||
You can host your own copy of Sydent, but this will prevent you reaching other
|
||||
users in the Matrix ecosystem via their email address, and prevent them finding
|
||||
you. We therefore recommend that you use one of the centralised identity servers
|
||||
at ``https://matrix.org`` or ``https://vector.im`` for now.
|
||||
|
||||
To reiterate: the Identity server will only be used if you choose to associate
|
||||
an email address with your account, or send an invite to another user via their
|
||||
email address.
|
||||
|
||||
|
||||
Password reset
|
||||
==============
|
||||
|
||||
If a user has registered an email address to their account using an identity
|
||||
server, they can request a password-reset token via clients such as Riot.
|
||||
|
||||
A manual password reset can be done via direct database access as follows.
|
||||
|
||||
First calculate the hash of the new password::
|
||||
|
||||
$ ~/synapse/env/bin/hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
|
||||
Then update the `users` table in the database::
|
||||
|
||||
UPDATE users SET password_hash='$2a$12$xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
|
||||
WHERE name='@test:test.com';
|
||||
|
||||
|
||||
Synapse Development
|
||||
===================
|
||||
|
||||
Before setting up a development environment for synapse, make sure you have the
|
||||
system dependencies (such as the python header files) installed - see
|
||||
`Installing from source <INSTALL.md#installing-from-source>`_.
|
||||
|
||||
To check out a synapse for development, clone the git repo into a working
|
||||
directory of your choice::
|
||||
|
||||
git clone https://github.com/matrix-org/synapse.git
|
||||
cd synapse
|
||||
|
||||
Synapse has a number of external dependencies, that are easiest
|
||||
to install using pip and a virtualenv::
|
||||
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install -e .[all]
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
||||
Once this is done, you may wish to run Synapse's unit tests, to
|
||||
Once this is done, you may wish to run the homeserver's unit tests, to
|
||||
check that everything is installed as it should be::
|
||||
|
||||
python -m twisted.trial tests
|
||||
$ python setup.py test
|
||||
|
||||
This should end with a 'PASSED' result::
|
||||
|
||||
@@ -416,27 +146,145 @@ This should end with a 'PASSED' result::
|
||||
|
||||
PASSED (successes=143)
|
||||
|
||||
Running the Integration Tests
|
||||
=============================
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
Upgrading an existing homeserver
|
||||
================================
|
||||
|
||||
Before upgrading an existing homeserver to a new version, please refer to
|
||||
UPGRADE.rst for any additional instructions.
|
||||
|
||||
|
||||
Setting up Federation
|
||||
=====================
|
||||
|
||||
In order for other homeservers to send messages to your server, it will need to
|
||||
be publicly visible on the internet, and they will need to know its host name.
|
||||
You have two choices here, which will influence the form of your Matrix user
|
||||
IDs:
|
||||
|
||||
1) Use the machine's own hostname as available on public DNS in the form of its
|
||||
A or AAAA records. This is easier to set up initially, perhaps for testing,
|
||||
but lacks the flexibility of SRV.
|
||||
|
||||
2) Set up a SRV record for your domain name. This requires you create a SRV
|
||||
record in DNS, but gives the flexibility to run the server on your own
|
||||
choice of TCP port, on a machine that might not be the same name as the
|
||||
domain name.
|
||||
|
||||
For the first form, simply pass the required hostname (of the machine) as the
|
||||
--host parameter::
|
||||
|
||||
$ python synapse/app/homeserver.py \
|
||||
--server-name machine.my.domain.name \
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
|
||||
For the second form, first create your SRV record and publish it in DNS. This
|
||||
needs to be named _matrix._tcp.YOURDOMAIN, and point at at least one hostname
|
||||
and port where the server is running. (At the current time synapse does not
|
||||
support clustering multiple servers into a single logical homeserver). The DNS
|
||||
record would then look something like::
|
||||
|
||||
_matrix._tcp IN SRV 10 0 8448 machine.my.domain.name.
|
||||
|
||||
At this point, you should then run the homeserver with the hostname of this
|
||||
SRV record, as that is the name other machines will expect it to have::
|
||||
|
||||
$ python synapse/app/homeserver.py \
|
||||
--server-name YOURDOMAIN \
|
||||
--bind-port 8448 \
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
|
||||
|
||||
You may additionally want to pass one or more "-v" options, in order to
|
||||
increase the verbosity of logging output; at least for initial testing.
|
||||
|
||||
For the initial alpha release, the homeserver is not speaking TLS for
|
||||
either client-server or server-server traffic for ease of debugging. We have
|
||||
also not spent any time yet getting the homeserver to run behind loadbalancers.
|
||||
|
||||
Running a Demo Federation of Homeservers
|
||||
----------------------------------------
|
||||
|
||||
If you want to get up and running quickly with a trio of homeservers in a
|
||||
private federation (``localhost:8080``, ``localhost:8081`` and
|
||||
``localhost:8082``) which you can then access through the webclient running at
|
||||
http://localhost:8080. Simply run::
|
||||
|
||||
$ demo/start.sh
|
||||
|
||||
Running The Demo Web Client
|
||||
===========================
|
||||
|
||||
The homeserver runs a web client by default at http://localhost:8080.
|
||||
|
||||
If this is the first time you have used the client from that browser (it uses
|
||||
HTML5 local storage to remember its config), you will need to log in to your
|
||||
account. If you don't yet have an account, because you've just started the
|
||||
homeserver for the first time, then you'll need to register one.
|
||||
|
||||
|
||||
Registering A New Account
|
||||
-------------------------
|
||||
|
||||
Your new user name will be formed partly from the hostname your server is
|
||||
running as, and partly from a localpart you specify when you create the
|
||||
account. Your name will take the form of::
|
||||
|
||||
@localpart:my.domain.here
|
||||
(pronounced "at localpart on my dot domain dot here")
|
||||
|
||||
Specify your desired localpart in the topmost box of the "Register for an
|
||||
account" form, and click the "Register" button. Hostnames can contain ports if
|
||||
required due to lack of SRV records (e.g. @matthew:localhost:8080 on an internal
|
||||
synapse sandbox running on localhost)
|
||||
|
||||
|
||||
Logging In To An Existing Account
|
||||
---------------------------------
|
||||
|
||||
Just enter the ``@localpart:my.domain.here`` Matrix user ID and password into
|
||||
the form and click the Login button.
|
||||
|
||||
|
||||
Identity Servers
|
||||
================
|
||||
|
||||
The job of authenticating 3PIDs and tracking which 3PIDs are associated with a
|
||||
given Matrix user is very security-sensitive, as there is obvious risk of spam
|
||||
if it is too easy to sign up for Matrix accounts or harvest 3PID data. Meanwhile
|
||||
the job of publishing the end-to-end encryption public keys for Matrix users is
|
||||
also very security-sensitive for similar reasons.
|
||||
|
||||
Therefore the role of managing trusted identity in the Matrix ecosystem is
|
||||
farmed out to a cluster of known trusted ecosystem partners, who run 'Matrix
|
||||
Identity Servers' such as ``sydent``, whose role is purely to authenticate and
|
||||
track 3PID logins and publish end-user public keys.
|
||||
|
||||
It's currently early days for identity servers as Matrix is not yet using 3PIDs
|
||||
as the primary means of identity and E2E encryption is not complete. As such,
|
||||
we're not yet running an identity server in public.
|
||||
|
||||
|
||||
Where's the spec?!
|
||||
==================
|
||||
|
||||
For now, please go spelunking in the ``docs/`` directory to find out.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
Building Internal API Documentation
|
||||
===================================
|
||||
|
||||
Before building internal API documentation install sphinx and
|
||||
Before building internal API documentation install spinx and
|
||||
sphinxcontrib-napoleon::
|
||||
|
||||
pip install sphinx
|
||||
pip install sphinxcontrib-napoleon
|
||||
$ pip install sphinx
|
||||
$ pip install sphinxcontrib-napoleon
|
||||
|
||||
Building internal API documentation::
|
||||
|
||||
python setup.py build_sphinx
|
||||
$ python setup.py build_sphinx
|
||||
|
||||
|
||||
374
UPGRADE.rst
374
UPGRADE.rst
@@ -1,363 +1,3 @@
|
||||
Upgrading Synapse
|
||||
=================
|
||||
|
||||
Before upgrading check if any special steps are required to upgrade from the
|
||||
what you currently have installed to current version of synapse. The extra
|
||||
instructions that may be required are listed later in this document.
|
||||
|
||||
1. If synapse was installed in a virtualenv then activate that virtualenv before
|
||||
upgrading. If synapse is installed in a virtualenv in ``~/synapse/env`` then
|
||||
run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
source ~/synapse/env/bin/activate
|
||||
|
||||
2. If synapse was installed using pip then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
pip install --upgrade matrix-synapse[all]
|
||||
|
||||
# restart synapse
|
||||
synctl restart
|
||||
|
||||
|
||||
If synapse was installed using git then upgrade to the latest version by
|
||||
running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# Pull the latest version of the master branch.
|
||||
git pull
|
||||
|
||||
# Update synapse and its python dependencies.
|
||||
pip install --upgrade .[all]
|
||||
|
||||
# restart synapse
|
||||
./synctl restart
|
||||
|
||||
|
||||
To check whether your update was successful, you can check the Server header
|
||||
returned by the Client-Server API:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
# replace <host.name> with the hostname of your synapse homeserver.
|
||||
# You may need to specify a port (eg, :8448) if your server is not
|
||||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to v0.99.0
|
||||
====================
|
||||
|
||||
Please be aware that, before Synapse v1.0 is released around March 2019, you
|
||||
will need to replace any self-signed certificates with those verified by a
|
||||
root CA. Information on how to do so can be found at `the ACME docs
|
||||
<docs/ACME.md>`_.
|
||||
|
||||
For more information on configuring TLS certificates see the `FAQ <docs/MSC1711_certificates_FAQ.md>`_.
|
||||
|
||||
Upgrading to v0.34.0
|
||||
====================
|
||||
|
||||
1. This release is the first to fully support Python 3. Synapse will now run on
|
||||
Python versions 3.5, or 3.6 (as well as 2.7). We recommend switching to
|
||||
Python 3, as it has been shown to give performance improvements.
|
||||
|
||||
For users who have installed Synapse into a virtualenv, we recommend doing
|
||||
this by creating a new virtualenv. For example::
|
||||
|
||||
virtualenv -p python3 ~/synapse/env3
|
||||
source ~/synapse/env3/bin/activate
|
||||
pip install matrix-synapse
|
||||
|
||||
You can then start synapse as normal, having activated the new virtualenv::
|
||||
|
||||
cd ~/synapse
|
||||
source env3/bin/activate
|
||||
synctl start
|
||||
|
||||
Users who have installed from distribution packages should see the relevant
|
||||
package documentation. See below for notes on Debian packages.
|
||||
|
||||
* When upgrading to Python 3, you **must** make sure that your log files are
|
||||
configured as UTF-8, by adding ``encoding: utf8`` to the
|
||||
``RotatingFileHandler`` configuration (if you have one) in your
|
||||
``<server>.log.config`` file. For example, if your ``log.config`` file
|
||||
contains::
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: precise
|
||||
filename: homeserver.log
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
Then you should update this to be::
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: precise
|
||||
filename: homeserver.log
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
encoding: utf8
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
There is no need to revert this change if downgrading to Python 2.
|
||||
|
||||
We are also making available Debian packages which will run Synapse on
|
||||
Python 3. You can switch to these packages with ``apt-get install
|
||||
matrix-synapse-py3``, however, please read `debian/NEWS
|
||||
<https://github.com/matrix-org/synapse/blob/release-v0.34.0/debian/NEWS>`_
|
||||
before doing so. The existing ``matrix-synapse`` packages will continue to
|
||||
use Python 2 for the time being.
|
||||
|
||||
2. This release removes the ``riot.im`` from the default list of trusted
|
||||
identity servers.
|
||||
|
||||
If ``riot.im`` is in your homeserver's list of
|
||||
``trusted_third_party_id_servers``, you should remove it. It was added in
|
||||
case a hypothetical future identity server was put there. If you don't
|
||||
remove it, users may be unable to deactivate their accounts.
|
||||
|
||||
3. This release no longer installs the (unmaintained) Matrix Console web client
|
||||
as part of the default installation. It is possible to re-enable it by
|
||||
installing it separately and setting the ``web_client_location`` config
|
||||
option, but please consider switching to another client.
|
||||
|
||||
Upgrading to v0.33.7
|
||||
====================
|
||||
|
||||
This release removes the example email notification templates from
|
||||
``res/templates`` (they are now internal to the python package). This should
|
||||
only affect you if you (a) deploy your Synapse instance from a git checkout or
|
||||
a github snapshot URL, and (b) have email notifications enabled.
|
||||
|
||||
If you have email notifications enabled, you should ensure that
|
||||
``email.template_dir`` is either configured to point at a directory where you
|
||||
have installed customised templates, or leave it unset to use the default
|
||||
templates.
|
||||
|
||||
Upgrading to v0.27.3
|
||||
====================
|
||||
|
||||
This release expands the anonymous usage stats sent if the opt-in
|
||||
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
||||
and cpu use at a very coarse level. This requires administrators to install
|
||||
the optional ``psutil`` python module.
|
||||
|
||||
We would appreciate it if you could assist by ensuring this module is available
|
||||
and ``report_stats`` is enabled. This will let us see if performance changes to
|
||||
synapse are having an impact to the general community.
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
If you want to use the new URL previewing API (/_matrix/media/r0/preview_url)
|
||||
then you have to explicitly enable it in the config and update your dependencies
|
||||
dependencies. See README.rst for details.
|
||||
|
||||
|
||||
Upgrading to v0.11.0
|
||||
====================
|
||||
|
||||
This release includes the option to send anonymous usage stats to matrix.org,
|
||||
and requires that administrators explictly opt in or out by setting the
|
||||
``report_stats`` option to either ``true`` or ``false``.
|
||||
|
||||
We would really appreciate it if you could help our project out by reporting
|
||||
anonymized usage statistics from your homeserver. Only very basic aggregate
|
||||
data (e.g. number of users) will be reported, but it helps us to track the
|
||||
growth of the Matrix community, and helps us to make Matrix a success, as well
|
||||
as to convince other networks that they should peer with us.
|
||||
|
||||
|
||||
Upgrading to v0.9.0
|
||||
===================
|
||||
|
||||
Application services have had a breaking API change in this version.
|
||||
|
||||
They can no longer register themselves with a home server using the AS HTTP API. This
|
||||
decision was made because a compromised application service with free reign to register
|
||||
any regex in effect grants full read/write access to the home server if a regex of ``.*``
|
||||
is used. An attack where a compromised AS re-registers itself with ``.*`` was deemed too
|
||||
big of a security risk to ignore, and so the ability to register with the HS remotely has
|
||||
been removed.
|
||||
|
||||
It has been replaced by specifying a list of application service registrations in
|
||||
``homeserver.yaml``::
|
||||
|
||||
app_service_config_files: ["registration-01.yaml", "registration-02.yaml"]
|
||||
|
||||
Where ``registration-01.yaml`` looks like::
|
||||
|
||||
url: <String> # e.g. "https://my.application.service.com"
|
||||
as_token: <String>
|
||||
hs_token: <String>
|
||||
sender_localpart: <String> # This is a new field which denotes the user_id localpart when using the AS token
|
||||
namespaces:
|
||||
users:
|
||||
- exclusive: <Boolean>
|
||||
regex: <String> # e.g. "@prefix_.*"
|
||||
aliases:
|
||||
- exclusive: <Boolean>
|
||||
regex: <String>
|
||||
rooms:
|
||||
- exclusive: <Boolean>
|
||||
regex: <String>
|
||||
|
||||
Upgrading to v0.8.0
|
||||
===================
|
||||
|
||||
Servers which use captchas will need to add their public key to::
|
||||
|
||||
static/client/register/register_config.js
|
||||
|
||||
window.matrixRegistrationConfig = {
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
};
|
||||
|
||||
This is required in order to support registration fallback (typically used on
|
||||
mobile devices).
|
||||
|
||||
|
||||
Upgrading to v0.7.0
|
||||
===================
|
||||
|
||||
New dependencies are:
|
||||
|
||||
- pydenticon
|
||||
- simplejson
|
||||
- syutil
|
||||
- matrix-angular-sdk
|
||||
|
||||
To pull in these dependencies in a virtual env, run::
|
||||
|
||||
python synapse/python_dependencies.py | xargs -n 1 pip install
|
||||
|
||||
Upgrading to v0.6.0
|
||||
===================
|
||||
|
||||
To pull in new dependencies, run::
|
||||
|
||||
python setup.py develop --user
|
||||
|
||||
This update includes a change to the database schema. To upgrade you first need
|
||||
to upgrade the database by running::
|
||||
|
||||
python scripts/upgrade_db_to_v0.6.0.py <db> <server_name> <signing_key>
|
||||
|
||||
Where `<db>` is the location of the database, `<server_name>` is the
|
||||
server name as specified in the synapse configuration, and `<signing_key>` is
|
||||
the location of the signing key as specified in the synapse configuration.
|
||||
|
||||
This may take some time to complete. Failures of signatures and content hashes
|
||||
can safely be ignored.
|
||||
|
||||
|
||||
Upgrading to v0.5.1
|
||||
===================
|
||||
|
||||
Depending on precisely when you installed v0.5.0 you may have ended up with
|
||||
a stale release of the reference matrix webclient installed as a python module.
|
||||
To uninstall it and ensure you are depending on the latest module, please run::
|
||||
|
||||
$ pip uninstall syweb
|
||||
|
||||
Upgrading to v0.5.0
|
||||
===================
|
||||
|
||||
The webclient has been split out into a seperate repository/pacakage in this
|
||||
release. Before you restart your homeserver you will need to pull in the
|
||||
webclient package by running::
|
||||
|
||||
python setup.py develop --user
|
||||
|
||||
This release completely changes the database schema and so requires upgrading
|
||||
it before starting the new version of the homeserver.
|
||||
|
||||
The script "database-prepare-for-0.5.0.sh" should be used to upgrade the
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
but will otherwise purge the database. This includes messages, which
|
||||
rooms the home server was a member of and room alias mappings.
|
||||
|
||||
If you would like to keep your history, please take a copy of your database
|
||||
file and ask for help in #matrix:matrix.org. The upgrade process is,
|
||||
unfortunately, non trivial and requires human intervention to resolve any
|
||||
resulting conflicts during the upgrade process.
|
||||
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./scripts/database-prepare-for-0.5.0.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
restart than usual as it reinitializes the database.
|
||||
|
||||
On startup of the new version, users can either rejoin remote rooms using room
|
||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
automatically rejoin the room.
|
||||
|
||||
Upgrading to v0.4.0
|
||||
===================
|
||||
|
||||
This release needs an updated syutil version. Run::
|
||||
|
||||
python setup.py develop
|
||||
|
||||
You will also need to upgrade your configuration as the signing key format has
|
||||
changed. Run::
|
||||
|
||||
python -m synapse.app.homeserver --config-path <CONFIG> --generate-config
|
||||
|
||||
|
||||
Upgrading to v0.3.0
|
||||
===================
|
||||
|
||||
This registration API now closely matches the login API. This introduces a bit
|
||||
more backwards and forwards between the HS and the client, but this improves
|
||||
the overall flexibility of the API. You can now GET on /register to retrieve a list
|
||||
of valid registration flows. Upon choosing one, they are submitted in the same
|
||||
way as login, e.g::
|
||||
|
||||
{
|
||||
type: m.login.password,
|
||||
user: foo,
|
||||
password: bar
|
||||
}
|
||||
|
||||
The default HS supports 2 flows, with and without Identity Server email
|
||||
authentication. Enabling captcha on the HS will add in an extra step to all
|
||||
flows: ``m.login.recaptcha`` which must be completed before you can transition
|
||||
to the next stage. There is a new login type: ``m.login.email.identity`` which
|
||||
contains the ``threepidCreds`` key which were previously sent in the original
|
||||
register request. For more information on this, see the specification.
|
||||
|
||||
Web Client
|
||||
----------
|
||||
|
||||
The VoIP specification has changed between v0.2.0 and v0.3.0. Users should
|
||||
refresh any browser tabs to get the latest web client code. Users on
|
||||
v0.2.0 of the web client will not be able to call those on v0.3.0 and
|
||||
vice versa.
|
||||
|
||||
|
||||
Upgrading to v0.2.0
|
||||
===================
|
||||
|
||||
@@ -370,7 +10,7 @@ automatically generate default config use::
|
||||
--config-path homeserver.config \
|
||||
--generate-config
|
||||
|
||||
This config can be edited if desired, for example to specify a different SSL
|
||||
This config can be edited if desired, for example to specify a different SSL
|
||||
certificate to use. Once done you can run the home server using::
|
||||
|
||||
$ python synapse/app/homeserver.py --config-path homeserver.config
|
||||
@@ -391,20 +31,20 @@ This release completely changes the database schema and so requires upgrading
|
||||
it before starting the new version of the homeserver.
|
||||
|
||||
The script "database-prepare-for-0.0.1.sh" should be used to upgrade the
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
database. This will save all user information, such as logins and profiles,
|
||||
but will otherwise purge the database. This includes messages, which
|
||||
rooms the home server was a member of and room alias mappings.
|
||||
|
||||
Before running the command the homeserver should be first completely
|
||||
Before running the command the homeserver should be first completely
|
||||
shutdown. To run it, simply specify the location of the database, e.g.:
|
||||
|
||||
./scripts/database-prepare-for-0.0.1.sh "homeserver.db"
|
||||
./database-prepare-for-0.0.1.sh "homeserver.db"
|
||||
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
Once this has successfully completed it will be safe to restart the
|
||||
homeserver. You may notice that the homeserver takes a few seconds longer to
|
||||
restart than usual as it reinitializes the database.
|
||||
|
||||
On startup of the new version, users can either rejoin remote rooms using room
|
||||
aliases or by being reinvited. Alternatively, if any other homeserver sends a
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
message to a room that the homeserver was previously in the local HS will
|
||||
automatically rejoin the room.
|
||||
|
||||
7
WISHLIST.rst
Normal file
7
WISHLIST.rst
Normal file
@@ -0,0 +1,7 @@
|
||||
Broad-sweeping stuff which would be nice to have
|
||||
================================================
|
||||
|
||||
- Additional SQL backends beyond sqlite
|
||||
- homeserver implementation in go
|
||||
- homeserver implementation in node.js
|
||||
- client SDKs
|
||||
1
changelog.d/.gitignore
vendored
1
changelog.d/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
!.gitignore
|
||||
@@ -1 +0,0 @@
|
||||
Fix attempting to paginate in rooms where server cannot see any events, to avoid unnecessarily pulling in lots of redacted events.
|
||||
@@ -1 +0,0 @@
|
||||
'event_id' is now a required parameter in federated state requests, as per the matrix spec.
|
||||
@@ -1 +0,0 @@
|
||||
Fix tightloop over connecting to replication server.
|
||||
@@ -1 +0,0 @@
|
||||
Change from TravisCI to Buildkite for CI.
|
||||
@@ -1 +0,0 @@
|
||||
Move server key queries to federation reader.
|
||||
@@ -1 +0,0 @@
|
||||
When presence is disabled don't send over replication.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for /account/3pid REST endpoint to client_reader worker.
|
||||
@@ -1 +0,0 @@
|
||||
Fix parsing of Content-Disposition headers on remote media requests and URL previews.
|
||||
@@ -1 +0,0 @@
|
||||
Minor docstring fixes for MatrixFederationAgent.
|
||||
@@ -1 +0,0 @@
|
||||
Optimise EDU transmission for the federation_sender worker.
|
||||
@@ -1 +0,0 @@
|
||||
Update test_typing to use HomeserverTestCase.
|
||||
@@ -1 +0,0 @@
|
||||
Fix incorrect log about not persisting duplicate state event.
|
||||
@@ -1 +0,0 @@
|
||||
Fix v4v6 option in HAProxy example config. Contributed by Flakebi.
|
||||
@@ -1 +0,0 @@
|
||||
Include a default configuration file in the 'docs' directory.
|
||||
@@ -1 +0,0 @@
|
||||
Add support for /keys/query and /keys/changes REST endpoints to client_reader worker.
|
||||
@@ -1 +0,0 @@
|
||||
Clean up read-receipt handling.
|
||||
@@ -1 +0,0 @@
|
||||
Add some debug about processing read receipts.
|
||||
@@ -1 +0,0 @@
|
||||
Clean up some replication code.
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -32,7 +32,7 @@ import urlparse
|
||||
import nacl.signing
|
||||
import nacl.encoding
|
||||
|
||||
from signedjson.sign import verify_signed_json, SignatureVerifyException
|
||||
from syutil.crypto.jsonsign import verify_signed_json, SignatureVerifyException
|
||||
|
||||
CONFIG_JSON = "cmdclient_config.json"
|
||||
|
||||
@@ -145,50 +145,35 @@ class SynapseCmd(cmd.Cmd):
|
||||
<noupdate> : Do not automatically clobber config values.
|
||||
"""
|
||||
args = self._parse(line, ["userid", "noupdate"])
|
||||
path = "/register"
|
||||
|
||||
password = None
|
||||
pwd = None
|
||||
pwd2 = "_"
|
||||
while pwd != pwd2:
|
||||
pwd = getpass.getpass("Type a password for this user: ")
|
||||
pwd = getpass.getpass("(Optional) Type a password for this user: ")
|
||||
if len(pwd) == 0:
|
||||
print "Not using a password for this user."
|
||||
break
|
||||
pwd2 = getpass.getpass("Retype the password: ")
|
||||
if pwd != pwd2 or len(pwd) == 0:
|
||||
if pwd != pwd2:
|
||||
print "Password mismatch."
|
||||
pwd = None
|
||||
else:
|
||||
password = pwd
|
||||
|
||||
body = {
|
||||
"type": "m.login.password"
|
||||
}
|
||||
body = {}
|
||||
if "userid" in args:
|
||||
body["user"] = args["userid"]
|
||||
body["user_id"] = args["userid"]
|
||||
if password:
|
||||
body["password"] = password
|
||||
|
||||
reactor.callFromThread(self._do_register, body,
|
||||
reactor.callFromThread(self._do_register, "POST", path, body,
|
||||
"noupdate" not in args)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _do_register(self, data, update_config):
|
||||
# check the registration flows
|
||||
url = self._url() + "/register"
|
||||
json_res = yield self.http_client.do_request("GET", url)
|
||||
print json.dumps(json_res, indent=4)
|
||||
|
||||
passwordFlow = None
|
||||
for flow in json_res["flows"]:
|
||||
if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
|
||||
print "Unable to register: Home server requires captcha."
|
||||
return
|
||||
if flow["type"] == "m.login.password" and "stages" not in flow:
|
||||
passwordFlow = flow
|
||||
break
|
||||
|
||||
if not passwordFlow:
|
||||
return
|
||||
|
||||
json_res = yield self.http_client.do_request("POST", url, data=data)
|
||||
def _do_register(self, method, path, data, update_config):
|
||||
url = self._url() + path
|
||||
json_res = yield self.http_client.do_request(method, url, data=data)
|
||||
print json.dumps(json_res, indent=4)
|
||||
if update_config and "user_id" in json_res:
|
||||
self.config["user"] = json_res["user_id"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2014 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -36,13 +36,15 @@ class HttpClient(object):
|
||||
the request body. This will be encoded as JSON.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred: Succeeds when we get *any* HTTP response.
|
||||
|
||||
The result of the deferred is a tuple of `(code, response)`,
|
||||
where `response` is a dict representing the decoded JSON body.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_json(self, url, args=None):
|
||||
""" Gets some json from the given host homeserver and path
|
||||
""" Get's some json from the given host homeserver and path
|
||||
|
||||
Args:
|
||||
url (str): The URL to GET data from.
|
||||
@@ -52,8 +54,10 @@ class HttpClient(object):
|
||||
and *not* a string.
|
||||
|
||||
Returns:
|
||||
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||
will be the decoded JSON body.
|
||||
Deferred: Succeeds when we get *any* HTTP response.
|
||||
|
||||
The result of the deferred is a tuple of `(code, response)`,
|
||||
where `response` is a dict representing the decoded JSON body.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -210,4 +214,4 @@ class _JsonProducer(object):
|
||||
pass
|
||||
|
||||
def stopProducing(self):
|
||||
pass
|
||||
pass
|
||||
@@ -1,10 +0,0 @@
|
||||
Community Contributions
|
||||
=======================
|
||||
|
||||
Everything in this directory are projects submitted by the community that may be useful
|
||||
to others. As such, the project maintainers cannot guarantee support, stability
|
||||
or backwards compatibility of these projects.
|
||||
|
||||
Files in this directory should *not* be relied on directly, as they may not
|
||||
continue to work or exist in future. If you wish to use any of these files then
|
||||
they should be copied to avoid them breaking from underneath you.
|
||||
@@ -1,41 +0,0 @@
|
||||
# Synapse Docker
|
||||
|
||||
### Automated configuration
|
||||
|
||||
It is recommended that you use Docker Compose to run your containers, including
|
||||
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
|
||||
including example labels for reverse proxying and other artifacts.
|
||||
|
||||
Read the section about environment variables and set at least mandatory variables,
|
||||
then run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
If secrets are not specified in the environment variables, they will be generated
|
||||
as part of the startup. Please ensure these secrets are kept between launches of the
|
||||
Docker container, as their loss may require users to log in again.
|
||||
|
||||
### Manual configuration
|
||||
|
||||
A sample ``docker-compose.yml`` is provided, including example labels for
|
||||
reverse proxying and other artifacts. The docker-compose file is an example,
|
||||
please comment/uncomment sections that are not suitable for your usecase.
|
||||
|
||||
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
||||
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
|
||||
|
||||
```
|
||||
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
|
||||
```
|
||||
|
||||
Then, customize your configuration and run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### More information
|
||||
|
||||
For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md)
|
||||
@@ -1,52 +0,0 @@
|
||||
# This compose file is compatible with Compose itself, it might need some
|
||||
# adjustments to run properly with stack.
|
||||
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
|
||||
synapse:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: docker/Dockerfile
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since synapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
environment:
|
||||
- SYNAPSE_SERVER_NAME=my.matrix.host
|
||||
- SYNAPSE_REPORT_STATS=no
|
||||
- SYNAPSE_ENABLE_REGISTRATION=yes
|
||||
- SYNAPSE_LOG_LEVEL=INFO
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
volumes:
|
||||
# You may either store all the files in a local folder
|
||||
- ./files:/data
|
||||
# .. or you may split this between different storage points
|
||||
# - ./files:/data
|
||||
# - /path/to/ssd:/data/uploads
|
||||
# - /path/to/large_hdd:/data/media
|
||||
depends_on:
|
||||
- db
|
||||
# In order to expose Synapse, remove one of the following, you might for
|
||||
# instance expose the TLS port directly:
|
||||
ports:
|
||||
- 8448:8448/tcp
|
||||
# ... or use a reverse proxy, here is an example for traefik:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.frontend.rule=Host:my.matrix.Host
|
||||
- traefik.port=8008
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:10-alpine
|
||||
# Change that password, of course!
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
# .. or store them on some high performance storage for better results
|
||||
# - /path/to/ssd/storage:/var/lib/postgresql/data
|
||||
@@ -1,50 +0,0 @@
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# `homeserver.yaml`, and restart synapse.
|
||||
#
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# synapse, but can be edited to give more flexibility.
|
||||
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
fmt:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
# example output to console
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
filters: [context]
|
||||
|
||||
# example output to file - to enable, edit 'root' config below.
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: fmt
|
||||
filename: /var/log/synapse/homeserver.log
|
||||
maxBytes: 100000000
|
||||
backupCount: 3
|
||||
filters: [context]
|
||||
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [console] # to use file handler instead, switch to [file]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: INFO
|
||||
|
||||
# example of enabling debugging for a component:
|
||||
#
|
||||
# synapse.federation.transport.server:
|
||||
# level: DEBUG
|
||||
@@ -1,6 +0,0 @@
|
||||
# Using the Synapse Grafana dashboard
|
||||
|
||||
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
|
||||
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.rst
|
||||
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
|
||||
3. Set up additional recording rules
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,157 +0,0 @@
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import sqlite3
|
||||
import pydot
|
||||
import cgi
|
||||
import json
|
||||
import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
|
||||
def make_graph(db_name, room_id, file_prefix, limit):
|
||||
conn = sqlite3.connect(db_name)
|
||||
|
||||
sql = (
|
||||
"SELECT json FROM event_json as j "
|
||||
"INNER JOIN events as e ON e.event_id = j.event_id "
|
||||
"WHERE j.room_id = ?"
|
||||
)
|
||||
|
||||
args = [room_id]
|
||||
|
||||
if limit:
|
||||
sql += (
|
||||
" ORDER BY topological_ordering DESC, stream_ordering DESC "
|
||||
"LIMIT ?"
|
||||
)
|
||||
|
||||
args.append(limit)
|
||||
|
||||
c = conn.execute(sql, args)
|
||||
|
||||
events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()]
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
node_map = {}
|
||||
state_groups = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
c = conn.execute(
|
||||
"SELECT state_group FROM event_to_state_groups "
|
||||
"WHERE event_id = ?",
|
||||
(event.event_id,)
|
||||
)
|
||||
|
||||
res = c.fetchone()
|
||||
state_group = res[0] if res else None
|
||||
|
||||
if state_group is not None:
|
||||
state_groups.setdefault(state_group, []).append(event.event_id)
|
||||
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]))
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
"State group: %(state_group)s<br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": cgi.escape(content, quote=True),
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
"state_group": state_group,
|
||||
}
|
||||
|
||||
node = pydot.Node(
|
||||
name=event.event_id,
|
||||
label=label,
|
||||
)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
for event in events:
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
end_node = pydot.Node(
|
||||
name=prev_id,
|
||||
label="<<b>%s</b>>" % (prev_id,),
|
||||
)
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
for group, event_ids in state_groups.items():
|
||||
if len(event_ids) <= 1:
|
||||
continue
|
||||
|
||||
cluster = pydot.Cluster(
|
||||
str(group),
|
||||
label="<State Group: %s>" % (str(group),)
|
||||
)
|
||||
|
||||
for event_id in event_ids:
|
||||
cluster.add_node(node_map[event_id])
|
||||
|
||||
graph.add_subgraph(cluster)
|
||||
|
||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by talking "
|
||||
"to the given homeserver to get the list of PDUs. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--prefix", dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Only retrieve the last N events.",
|
||||
)
|
||||
parser.add_argument('db')
|
||||
parser.add_argument('room')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.db, args.room, args.prefix, args.limit)
|
||||
@@ -1,153 +0,0 @@
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import pydot
|
||||
import cgi
|
||||
import simplejson as json
|
||||
import datetime
|
||||
import argparse
|
||||
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
with open(file_name) as f:
|
||||
lines = f.readlines()
|
||||
|
||||
print "Read lines"
|
||||
|
||||
events = [FrozenEvent(json.loads(line)) for line in lines]
|
||||
|
||||
print "Loaded events."
|
||||
|
||||
events.sort(key=lambda e: e.depth)
|
||||
|
||||
print "Sorted events"
|
||||
|
||||
if limit:
|
||||
events = events[-int(limit):]
|
||||
|
||||
node_map = {}
|
||||
|
||||
graph = pydot.Dot(graph_name="Test")
|
||||
|
||||
for event in events:
|
||||
t = datetime.datetime.fromtimestamp(
|
||||
float(event.origin_server_ts) / 1000
|
||||
).strftime('%Y-%m-%d %H:%M:%S,%f')
|
||||
|
||||
content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
|
||||
content = content.replace("\n", "<br/>\n")
|
||||
|
||||
print content
|
||||
content = []
|
||||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, string_types):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
||||
content.append(
|
||||
"<b>%s</b>: %s," % (
|
||||
cgi.escape(key, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
cgi.escape(value, quote=True).encode("ascii", 'xmlcharrefreplace'),
|
||||
)
|
||||
)
|
||||
|
||||
content = "<br/>\n".join(content)
|
||||
|
||||
print content
|
||||
|
||||
label = (
|
||||
"<"
|
||||
"<b>%(name)s </b><br/>"
|
||||
"Type: <b>%(type)s </b><br/>"
|
||||
"State key: <b>%(state_key)s </b><br/>"
|
||||
"Content: <b>%(content)s </b><br/>"
|
||||
"Time: <b>%(time)s </b><br/>"
|
||||
"Depth: <b>%(depth)s </b><br/>"
|
||||
">"
|
||||
) % {
|
||||
"name": event.event_id,
|
||||
"type": event.type,
|
||||
"state_key": event.get("state_key", None),
|
||||
"content": content,
|
||||
"time": t,
|
||||
"depth": event.depth,
|
||||
}
|
||||
|
||||
node = pydot.Node(
|
||||
name=event.event_id,
|
||||
label=label,
|
||||
)
|
||||
|
||||
node_map[event.event_id] = node
|
||||
graph.add_node(node)
|
||||
|
||||
print "Created Nodes"
|
||||
|
||||
for event in events:
|
||||
for prev_id, _ in event.prev_events:
|
||||
try:
|
||||
end_node = node_map[prev_id]
|
||||
except:
|
||||
end_node = pydot.Node(
|
||||
name=prev_id,
|
||||
label="<<b>%s</b>>" % (prev_id,),
|
||||
)
|
||||
|
||||
node_map[prev_id] = end_node
|
||||
graph.add_node(end_node)
|
||||
|
||||
edge = pydot.Edge(node_map[event.event_id], end_node)
|
||||
graph.add_edge(edge)
|
||||
|
||||
print "Created edges"
|
||||
|
||||
graph.write('%s.dot' % file_prefix, format='raw', prog='dot')
|
||||
|
||||
print "Created Dot"
|
||||
|
||||
graph.write_svg("%s.svg" % file_prefix, prog='dot')
|
||||
|
||||
print "Created svg"
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate a PDU graph for a given room by reading "
|
||||
"from a file with line deliminated events. \n"
|
||||
"Requires pydot."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-p", "--prefix", dest="prefix",
|
||||
help="String to prefix output files with",
|
||||
default="graph_output"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l", "--limit",
|
||||
help="Only retrieve the last N events.",
|
||||
)
|
||||
parser.add_argument('event_file')
|
||||
parser.add_argument('room')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
make_graph(args.event_file, args.room, args.prefix, args.limit)
|
||||
@@ -1,260 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
|
||||
video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
|
||||
of the streams from the Jitsi bridge until the second lot of SDP comes down and
|
||||
we set the remote SDP at which point the stream ends. Our video never gets to
|
||||
the bridge.
|
||||
|
||||
Requires:
|
||||
npm install jquery jsdom
|
||||
"""
|
||||
|
||||
import gevent
|
||||
import grequests
|
||||
from BeautifulSoup import BeautifulSoup
|
||||
import json
|
||||
import urllib
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
#ACCESS_TOKEN="" #
|
||||
|
||||
MATRIXBASE = 'https://matrix.org/_matrix/client/api/v1/'
|
||||
MYUSERNAME = '@davetest:matrix.org'
|
||||
|
||||
HTTPBIND = 'https://meet.jit.si/http-bind'
|
||||
#HTTPBIND = 'https://jitsi.vuc.me/http-bind'
|
||||
#ROOMNAME = "matrix"
|
||||
ROOMNAME = "pibble"
|
||||
|
||||
HOST="guest.jit.si"
|
||||
#HOST="jitsi.vuc.me"
|
||||
|
||||
TURNSERVER="turn.guest.jit.si"
|
||||
#TURNSERVER="turn.jitsi.vuc.me"
|
||||
|
||||
ROOMDOMAIN="meet.jit.si"
|
||||
#ROOMDOMAIN="conference.jitsi.vuc.me"
|
||||
|
||||
class TrivialMatrixClient:
|
||||
def __init__(self, access_token):
|
||||
self.token = None
|
||||
self.access_token = access_token
|
||||
|
||||
def getEvent(self):
|
||||
while True:
|
||||
url = MATRIXBASE+'events?access_token='+self.access_token+"&timeout=60000"
|
||||
if self.token:
|
||||
url += "&from="+self.token
|
||||
req = grequests.get(url)
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "incoming from matrix",obj
|
||||
if 'end' not in obj:
|
||||
continue
|
||||
self.token = obj['end']
|
||||
if len(obj['chunk']):
|
||||
return obj['chunk'][0]
|
||||
|
||||
def joinRoom(self, roomId):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token
|
||||
print url
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data='{}')
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
|
||||
def sendEvent(self, roomId, evType, event):
|
||||
url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token
|
||||
print url
|
||||
print json.dumps(event)
|
||||
headers={ 'Content-Type': 'application/json' }
|
||||
req = grequests.post(url, headers=headers, data=json.dumps(event))
|
||||
resps = grequests.map([req])
|
||||
obj = json.loads(resps[0].content)
|
||||
print "response: ",obj
|
||||
|
||||
|
||||
|
||||
xmppClients = {}
|
||||
|
||||
|
||||
def matrixLoop():
|
||||
while True:
|
||||
ev = matrixCli.getEvent()
|
||||
print ev
|
||||
if ev['type'] == 'm.room.member':
|
||||
print 'membership event'
|
||||
if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME:
|
||||
roomId = ev['room_id']
|
||||
print "joining room %s" % (roomId)
|
||||
matrixCli.joinRoom(roomId)
|
||||
elif ev['type'] == 'm.room.message':
|
||||
if ev['room_id'] in xmppClients:
|
||||
print "already have a bridge for that user, ignoring"
|
||||
continue
|
||||
print "got message, connecting"
|
||||
xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.invite':
|
||||
print "Incoming call"
|
||||
#sdp = ev['content']['offer']['sdp']
|
||||
#print "sdp: %s" % (sdp)
|
||||
#xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
|
||||
#gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
|
||||
elif ev['type'] == 'm.call.answer':
|
||||
print "Call answered"
|
||||
sdp = ev['content']['answer']['sdp']
|
||||
if ev['room_id'] not in xmppClients:
|
||||
print "We didn't have a call for that room"
|
||||
continue
|
||||
# should probably check call ID too
|
||||
xmppCli = xmppClients[ev['room_id']]
|
||||
xmppCli.sendAnswer(sdp)
|
||||
elif ev['type'] == 'm.call.hangup':
|
||||
if ev['room_id'] in xmppClients:
|
||||
xmppClients[ev['room_id']].stop()
|
||||
del xmppClients[ev['room_id']]
|
||||
|
||||
class TrivialXmppClient:
|
||||
def __init__(self, matrixRoom, userId):
|
||||
self.rid = 0
|
||||
self.matrixRoom = matrixRoom
|
||||
self.userId = userId
|
||||
self.running = True
|
||||
|
||||
def stop(self):
|
||||
self.running = False
|
||||
|
||||
def nextRid(self):
|
||||
self.rid += 1
|
||||
return '%d' % (self.rid)
|
||||
|
||||
def sendIq(self, xml):
|
||||
fullXml = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>" % (self.nextRid(), self.sid, xml)
|
||||
#print "\t>>>%s" % (fullXml)
|
||||
return self.xmppPoke(fullXml)
|
||||
|
||||
def xmppPoke(self, xml):
|
||||
headers = {'Content-Type': 'application/xml'}
|
||||
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
|
||||
resps = grequests.map([req])
|
||||
obj = BeautifulSoup(resps[0].content)
|
||||
return obj
|
||||
|
||||
def sendAnswer(self, answer):
|
||||
print "sdp from matrix client",answer
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
jingle, out_err = p.communicate(answer)
|
||||
jingle = jingle % {
|
||||
'tojid': self.callfrom,
|
||||
'action': 'session-accept',
|
||||
'initiator': self.callfrom,
|
||||
'responder': self.jid,
|
||||
'sid': self.callsid
|
||||
}
|
||||
print "answer jingle from sdp",jingle
|
||||
res = self.sendIq(jingle)
|
||||
print "reply from answer: ",res
|
||||
|
||||
self.ssrcs = {}
|
||||
jingleSoup = BeautifulSoup(jingle)
|
||||
for cont in jingleSoup.iq.jingle.findAll('content'):
|
||||
if cont.description:
|
||||
self.ssrcs[cont['name']] = cont.description['ssrc']
|
||||
print "my ssrcs:",self.ssrcs
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(self.advertiseSsrcs)
|
||||
])
|
||||
|
||||
def advertiseSsrcs(self):
|
||||
time.sleep(7)
|
||||
print "SSRC spammer started"
|
||||
while self.running:
|
||||
ssrcMsg = "<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] }
|
||||
res = self.sendIq(ssrcMsg)
|
||||
print "reply from ssrc announce: ",res
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
|
||||
def xmppLoop(self):
|
||||
self.matrixCallId = time.time()
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), HOST))
|
||||
|
||||
print res
|
||||
self.sid = res.body['sid']
|
||||
print "sid %s" % (self.sid)
|
||||
|
||||
res = self.sendIq("<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>")
|
||||
|
||||
res = self.xmppPoke("<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.nextRid(), self.sid, HOST))
|
||||
|
||||
res = self.sendIq("<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>")
|
||||
print res
|
||||
|
||||
self.jid = res.body.iq.bind.jid.string
|
||||
print "jid: %s" % (self.jid)
|
||||
self.shortJid = self.jid.split('-')[0]
|
||||
|
||||
res = self.sendIq("<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>")
|
||||
|
||||
#randomthing = res.body.iq['to']
|
||||
#whatsitpart = randomthing.split('-')[0]
|
||||
|
||||
#print "other random bind thing: %s" % (randomthing)
|
||||
|
||||
# advertise preence to the jitsi room, with our nick
|
||||
res = self.sendIq("<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>" % (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId))
|
||||
self.muc = {'users': []}
|
||||
for p in res.body.findAll('presence'):
|
||||
u = {}
|
||||
u['shortJid'] = p['from'].split('/')[1]
|
||||
if p.c and p.c.nick:
|
||||
u['nick'] = p.c.nick.string
|
||||
self.muc['users'].append(u)
|
||||
print "muc: ",self.muc
|
||||
|
||||
# wait for stuff
|
||||
while True:
|
||||
print "waiting..."
|
||||
res = self.sendIq("")
|
||||
print "got from stream: ",res
|
||||
if res.body.iq:
|
||||
jingles = res.body.iq.findAll('jingle')
|
||||
if len(jingles):
|
||||
self.callfrom = res.body.iq['from']
|
||||
self.handleInvite(jingles[0])
|
||||
elif 'type' in res.body and res.body['type'] == 'terminate':
|
||||
self.running = False
|
||||
del xmppClients[self.matrixRoom]
|
||||
return
|
||||
|
||||
def handleInvite(self, jingle):
|
||||
self.initiator = jingle['initiator']
|
||||
self.callsid = jingle['sid']
|
||||
p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
print "raw jingle invite",str(jingle)
|
||||
sdp, out_err = p.communicate(str(jingle))
|
||||
print "transformed remote offer sdp",sdp
|
||||
inviteEvent = {
|
||||
'offer': {
|
||||
'type': 'offer',
|
||||
'sdp': sdp
|
||||
},
|
||||
'call_id': self.matrixCallId,
|
||||
'version': 0,
|
||||
'lifetime': 30000
|
||||
}
|
||||
matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent)
|
||||
|
||||
matrixCli = TrivialMatrixClient(ACCESS_TOKEN)
|
||||
|
||||
gevent.joinall([
|
||||
gevent.spawn(matrixLoop)
|
||||
])
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
|
||||
index 9fbfff0..dc68077 100644
|
||||
--- a/syweb/webclient/app/components/matrix/matrix-call.js
|
||||
+++ b/syweb/webclient/app/components/matrix/matrix-call.js
|
||||
@@ -16,6 +16,45 @@ limitations under the License.
|
||||
|
||||
'use strict';
|
||||
|
||||
+
|
||||
+function sendKeyframe(pc) {
|
||||
+ console.log('sendkeyframe', pc.iceConnectionState);
|
||||
+ if (pc.iceConnectionState !== 'connected') return; // safe...
|
||||
+ pc.setRemoteDescription(
|
||||
+ pc.remoteDescription,
|
||||
+ function () {
|
||||
+ pc.createAnswer(
|
||||
+ function (modifiedAnswer) {
|
||||
+ pc.setLocalDescription(
|
||||
+ modifiedAnswer,
|
||||
+ function () {
|
||||
+ // noop
|
||||
+ },
|
||||
+ function (error) {
|
||||
+ console.log('triggerKeyframe setLocalDescription failed', error);
|
||||
+ messageHandler.showError();
|
||||
+ }
|
||||
+ );
|
||||
+ },
|
||||
+ function (error) {
|
||||
+ console.log('triggerKeyframe createAnswer failed', error);
|
||||
+ messageHandler.showError();
|
||||
+ }
|
||||
+ );
|
||||
+ },
|
||||
+ function (error) {
|
||||
+ console.log('triggerKeyframe setRemoteDescription failed', error);
|
||||
+ messageHandler.showError();
|
||||
+ }
|
||||
+ );
|
||||
+}
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
+
|
||||
var forAllVideoTracksOnStream = function(s, f) {
|
||||
var tracks = s.getVideoTracks();
|
||||
for (var i = 0; i < tracks.length; i++) {
|
||||
@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
|
||||
}
|
||||
|
||||
// FIXME: we should prevent any calls from being placed or accepted before this has finished
|
||||
- MatrixCall.getTurnServer();
|
||||
+ //MatrixCall.getTurnServer();
|
||||
|
||||
MatrixCall.CALL_TIMEOUT = 60000;
|
||||
MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
|
||||
@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
|
||||
pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
|
||||
pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
|
||||
pc.onaddstream = function(s) { self.onAddStream(s); };
|
||||
+
|
||||
+ var datachan = pc.createDataChannel('RTCDataChannel', {
|
||||
+ reliable: false
|
||||
+ });
|
||||
+ console.log("data chan: "+datachan);
|
||||
+ datachan.onopen = function() {
|
||||
+ console.log("data channel open");
|
||||
+ };
|
||||
+ datachan.onmessage = function() {
|
||||
+ console.log("data channel message");
|
||||
+ };
|
||||
+ pc.ondatachannel = function(event) {
|
||||
+ console.log("have data channel");
|
||||
+ event.channel.binaryType = 'blob';
|
||||
+ };
|
||||
+
|
||||
return pc;
|
||||
}
|
||||
|
||||
@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
|
||||
}, this.msg.lifetime - event.age);
|
||||
};
|
||||
|
||||
+ MatrixCall.prototype.receivedInvite = function(event) {
|
||||
+ console.log("Got second invite for call "+this.call_id);
|
||||
+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
|
||||
+ };
|
||||
+
|
||||
+
|
||||
// perverse as it may seem, sometimes we want to instantiate a call with a hangup message
|
||||
// (because when getting the state of the room on load, events come in reverse order and
|
||||
// we want to remember that a call has been hung up)
|
||||
@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
|
||||
'mandatory': {
|
||||
'OfferToReceiveAudio': true,
|
||||
'OfferToReceiveVideo': this.type == 'video'
|
||||
- },
|
||||
+ }
|
||||
};
|
||||
this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
|
||||
// This can't be in an apply() because it's called by a predecessor call under glare conditions :(
|
||||
@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
|
||||
MatrixCall.prototype.gotLocalIceCandidate = function(event) {
|
||||
if (event.candidate) {
|
||||
console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
|
||||
- this.sendCandidate(event.candidate);
|
||||
- }
|
||||
+ //this.sendCandidate(event.candidate);
|
||||
+ } else {
|
||||
+ console.log("have all candidates, sending answer");
|
||||
+ var content = {
|
||||
+ version: 0,
|
||||
+ call_id: this.call_id,
|
||||
+ answer: this.peerConn.localDescription
|
||||
+ };
|
||||
+ this.sendEventWithRetry('m.call.answer', content);
|
||||
+ var self = this;
|
||||
+ $rootScope.$apply(function() {
|
||||
+ self.state = 'connecting';
|
||||
+ });
|
||||
+ }
|
||||
}
|
||||
|
||||
MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
|
||||
@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
|
||||
console.log("Created answer: "+description);
|
||||
var self = this;
|
||||
this.peerConn.setLocalDescription(description, function() {
|
||||
- var content = {
|
||||
- version: 0,
|
||||
- call_id: self.call_id,
|
||||
- answer: self.peerConn.localDescription
|
||||
- };
|
||||
- self.sendEventWithRetry('m.call.answer', content);
|
||||
- $rootScope.$apply(function() {
|
||||
- self.state = 'connecting';
|
||||
- });
|
||||
}, function() { console.log("Error setting local description!"); } );
|
||||
};
|
||||
|
||||
@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
|
||||
$rootScope.$apply(function() {
|
||||
self.state = 'connected';
|
||||
self.didConnect = true;
|
||||
+ /*$timeout(function() {
|
||||
+ sendKeyframe(self.peerConn);
|
||||
+ }, 1000);*/
|
||||
});
|
||||
} else if (this.peerConn.iceConnectionState == 'failed') {
|
||||
this.hangup('ice_failed');
|
||||
@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
|
||||
|
||||
MatrixCall.prototype.onRemoteStreamEnded = function(event) {
|
||||
console.log("Remote stream ended");
|
||||
+ return;
|
||||
var self = this;
|
||||
$rootScope.$apply(function() {
|
||||
self.state = 'ended';
|
||||
diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
|
||||
index 55dbbf5..272fa27 100644
|
||||
--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
|
||||
+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
|
||||
@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
|
||||
return;
|
||||
}
|
||||
|
||||
+ // do we already have an entry for this call ID?
|
||||
+ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
|
||||
+ if (existingEntry) {
|
||||
+ existingEntry.receivedInvite(msg);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
var call = undefined;
|
||||
if (!isLive) {
|
||||
// if this event wasn't live then this call may already be over
|
||||
@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
|
||||
call.hangup();
|
||||
}
|
||||
} else {
|
||||
- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
|
||||
+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
|
||||
}
|
||||
} else if (event.type == 'm.call.answer') {
|
||||
var call = matrixPhoneService.allCalls[msg.call_id];
|
||||
@@ -1,712 +0,0 @@
|
||||
/* jshint -W117 */
|
||||
// SDP STUFF
|
||||
function SDP(sdp) {
|
||||
this.media = sdp.split('\r\nm=');
|
||||
for (var i = 1; i < this.media.length; i++) {
|
||||
this.media[i] = 'm=' + this.media[i];
|
||||
if (i != this.media.length - 1) {
|
||||
this.media[i] += '\r\n';
|
||||
}
|
||||
}
|
||||
this.session = this.media.shift() + '\r\n';
|
||||
this.raw = this.session + this.media.join('');
|
||||
}
|
||||
|
||||
exports.SDP = SDP;
|
||||
|
||||
var jsdom = require("jsdom");
|
||||
var window = jsdom.jsdom().parentWindow;
|
||||
var $ = require('jquery')(window);
|
||||
|
||||
var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
|
||||
|
||||
/**
|
||||
* Returns map of MediaChannel mapped per channel idx.
|
||||
*/
|
||||
SDP.prototype.getMediaSsrcMap = function() {
|
||||
var self = this;
|
||||
var media_ssrcs = {};
|
||||
for (channelNum = 0; channelNum < self.media.length; channelNum++) {
|
||||
modified = true;
|
||||
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
|
||||
var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
|
||||
var channel = new MediaChannel(channelNum, type);
|
||||
media_ssrcs[channelNum] = channel;
|
||||
tmp.forEach(function (line) {
|
||||
var linessrc = line.substring(7).split(' ')[0];
|
||||
// allocate new ChannelSsrc
|
||||
if(!channel.ssrcs[linessrc]) {
|
||||
channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
|
||||
}
|
||||
channel.ssrcs[linessrc].lines.push(line);
|
||||
});
|
||||
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
|
||||
tmp.forEach(function(line){
|
||||
var semantics = line.substr(0, idx).substr(13);
|
||||
var ssrcs = line.substr(14 + semantics.length).split(' ');
|
||||
if (ssrcs.length != 0) {
|
||||
var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
|
||||
channel.ssrcGroups.push(ssrcGroup);
|
||||
}
|
||||
});
|
||||
}
|
||||
return media_ssrcs;
|
||||
};
|
||||
/**
|
||||
* Returns <tt>true</tt> if this SDP contains given SSRC.
|
||||
* @param ssrc the ssrc to check.
|
||||
* @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
|
||||
*/
|
||||
SDP.prototype.containsSSRC = function(ssrc) {
|
||||
var channels = this.getMediaSsrcMap();
|
||||
var contains = false;
|
||||
Object.keys(channels).forEach(function(chNumber){
|
||||
var channel = channels[chNumber];
|
||||
//console.log("Check", channel, ssrc);
|
||||
if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
|
||||
contains = true;
|
||||
}
|
||||
});
|
||||
return contains;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
|
||||
* @param otherSdp the other SDP to check ssrc with.
|
||||
*/
|
||||
SDP.prototype.getNewMedia = function(otherSdp) {
|
||||
|
||||
// this could be useful in Array.prototype.
|
||||
function arrayEquals(array) {
|
||||
// if the other array is a falsy value, return
|
||||
if (!array)
|
||||
return false;
|
||||
|
||||
// compare lengths - can save a lot of time
|
||||
if (this.length != array.length)
|
||||
return false;
|
||||
|
||||
for (var i = 0, l=this.length; i < l; i++) {
|
||||
// Check if we have nested arrays
|
||||
if (this[i] instanceof Array && array[i] instanceof Array) {
|
||||
// recurse into the nested arrays
|
||||
if (!this[i].equals(array[i]))
|
||||
return false;
|
||||
}
|
||||
else if (this[i] != array[i]) {
|
||||
// Warning - two different object instances will never be equal: {x:20} != {x:20}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
var myMedia = this.getMediaSsrcMap();
|
||||
var othersMedia = otherSdp.getMediaSsrcMap();
|
||||
var newMedia = {};
|
||||
Object.keys(othersMedia).forEach(function(channelNum) {
|
||||
var myChannel = myMedia[channelNum];
|
||||
var othersChannel = othersMedia[channelNum];
|
||||
if(!myChannel && othersChannel) {
|
||||
// Add whole channel
|
||||
newMedia[channelNum] = othersChannel;
|
||||
return;
|
||||
}
|
||||
// Look for new ssrcs accross the channel
|
||||
Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
|
||||
if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
|
||||
// Allocate channel if we've found ssrc that doesn't exist in our channel
|
||||
if(!newMedia[channelNum]){
|
||||
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
|
||||
}
|
||||
newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
|
||||
}
|
||||
});
|
||||
|
||||
// Look for new ssrc groups across the channels
|
||||
othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
|
||||
|
||||
// try to match the other ssrc-group with an ssrc-group of ours
|
||||
var matched = false;
|
||||
for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
|
||||
var mySsrcGroup = myChannel.ssrcGroups[i];
|
||||
if (otherSsrcGroup.semantics == mySsrcGroup.semantics
|
||||
&& arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
|
||||
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!matched) {
|
||||
// Allocate channel if we've found an ssrc-group that doesn't
|
||||
// exist in our channel
|
||||
|
||||
if(!newMedia[channelNum]){
|
||||
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
|
||||
}
|
||||
newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
|
||||
}
|
||||
});
|
||||
});
|
||||
return newMedia;
|
||||
};
|
||||
|
||||
// remove iSAC and CN from SDP
|
||||
SDP.prototype.mangle = function () {
|
||||
var i, j, mline, lines, rtpmap, newdesc;
|
||||
for (i = 0; i < this.media.length; i++) {
|
||||
lines = this.media[i].split('\r\n');
|
||||
lines.pop(); // remove empty last element
|
||||
mline = SDPUtil.parse_mline(lines.shift());
|
||||
if (mline.media != 'audio')
|
||||
continue;
|
||||
newdesc = '';
|
||||
mline.fmt.length = 0;
|
||||
for (j = 0; j < lines.length; j++) {
|
||||
if (lines[j].substr(0, 9) == 'a=rtpmap:') {
|
||||
rtpmap = SDPUtil.parse_rtpmap(lines[j]);
|
||||
if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
|
||||
continue;
|
||||
mline.fmt.push(rtpmap.id);
|
||||
newdesc += lines[j] + '\r\n';
|
||||
} else {
|
||||
newdesc += lines[j] + '\r\n';
|
||||
}
|
||||
}
|
||||
this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
|
||||
this.media[i] += newdesc;
|
||||
}
|
||||
this.raw = this.session + this.media.join('');
|
||||
};
|
||||
|
||||
// remove lines matching prefix from session section
|
||||
SDP.prototype.removeSessionLines = function(prefix) {
|
||||
var self = this;
|
||||
var lines = SDPUtil.find_lines(this.session, prefix);
|
||||
lines.forEach(function(line) {
|
||||
self.session = self.session.replace(line + '\r\n', '');
|
||||
});
|
||||
this.raw = this.session + this.media.join('');
|
||||
return lines;
|
||||
}
|
||||
// remove lines matching prefix from a media section specified by mediaindex
|
||||
// TODO: non-numeric mediaindex could match mid
|
||||
SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
|
||||
var self = this;
|
||||
var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
|
||||
lines.forEach(function(line) {
|
||||
self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
|
||||
});
|
||||
this.raw = this.session + this.media.join('');
|
||||
return lines;
|
||||
}
|
||||
|
||||
// add content's to a jingle element
|
||||
SDP.prototype.toJingle = function (elem, thecreator) {
|
||||
var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
|
||||
var self = this;
|
||||
// new bundle plan
|
||||
if (SDPUtil.find_line(this.session, 'a=group:')) {
|
||||
lines = SDPUtil.find_lines(this.session, 'a=group:');
|
||||
for (i = 0; i < lines.length; i++) {
|
||||
tmp = lines[i].split(' ');
|
||||
var semantics = tmp.shift().substr(8);
|
||||
elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
|
||||
for (j = 0; j < tmp.length; j++) {
|
||||
elem.c('content', {name: tmp[j]}).up();
|
||||
}
|
||||
elem.up();
|
||||
}
|
||||
}
|
||||
// old bundle plan, to be removed
|
||||
var bundle = [];
|
||||
if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
|
||||
bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
|
||||
bundle.shift();
|
||||
}
|
||||
for (i = 0; i < this.media.length; i++) {
|
||||
mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
|
||||
if (!(mline.media === 'audio' ||
|
||||
mline.media === 'video' ||
|
||||
mline.media === 'application'))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
|
||||
ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
|
||||
} else {
|
||||
ssrc = false;
|
||||
}
|
||||
|
||||
elem.c('content', {creator: thecreator, name: mline.media});
|
||||
if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
|
||||
// prefer identifier from a=mid if present
|
||||
var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
|
||||
elem.attrs({ name: mid });
|
||||
|
||||
// old BUNDLE plan, to be removed
|
||||
if (bundle.indexOf(mid) !== -1) {
|
||||
elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
|
||||
bundle.splice(bundle.indexOf(mid), 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
|
||||
{
|
||||
elem.c('description',
|
||||
{xmlns: 'urn:xmpp:jingle:apps:rtp:1',
|
||||
media: mline.media });
|
||||
if (ssrc) {
|
||||
elem.attrs({ssrc: ssrc});
|
||||
}
|
||||
for (j = 0; j < mline.fmt.length; j++) {
|
||||
rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
|
||||
elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
|
||||
// put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
|
||||
if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
|
||||
tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
|
||||
for (k = 0; k < tmp.length; k++) {
|
||||
elem.c('parameter', tmp[k]).up();
|
||||
}
|
||||
}
|
||||
this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
|
||||
|
||||
elem.up();
|
||||
}
|
||||
if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
|
||||
elem.c('encryption', {required: 1});
|
||||
var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
|
||||
crypto.forEach(function(line) {
|
||||
elem.c('crypto', SDPUtil.parse_crypto(line)).up();
|
||||
});
|
||||
elem.up(); // end of encryption
|
||||
}
|
||||
|
||||
if (ssrc) {
|
||||
// new style mapping
|
||||
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
|
||||
// FIXME: group by ssrc and support multiple different ssrcs
|
||||
var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
|
||||
ssrclines.forEach(function(line) {
|
||||
idx = line.indexOf(' ');
|
||||
var linessrc = line.substr(0, idx).substr(7);
|
||||
if (linessrc != ssrc) {
|
||||
elem.up();
|
||||
ssrc = linessrc;
|
||||
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
|
||||
}
|
||||
var kv = line.substr(idx + 1);
|
||||
elem.c('parameter');
|
||||
if (kv.indexOf(':') == -1) {
|
||||
elem.attrs({ name: kv });
|
||||
} else {
|
||||
elem.attrs({ name: kv.split(':', 2)[0] });
|
||||
elem.attrs({ value: kv.split(':', 2)[1] });
|
||||
}
|
||||
elem.up();
|
||||
});
|
||||
elem.up();
|
||||
|
||||
// old proprietary mapping, to be removed at some point
|
||||
tmp = SDPUtil.parse_ssrc(this.media[i]);
|
||||
tmp.xmlns = 'http://estos.de/ns/ssrc';
|
||||
tmp.ssrc = ssrc;
|
||||
elem.c('ssrc', tmp).up(); // ssrc is part of description
|
||||
|
||||
// XEP-0339 handle ssrc-group attributes
|
||||
var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
|
||||
ssrc_group_lines.forEach(function(line) {
|
||||
idx = line.indexOf(' ');
|
||||
var semantics = line.substr(0, idx).substr(13);
|
||||
var ssrcs = line.substr(14 + semantics.length).split(' ');
|
||||
if (ssrcs.length != 0) {
|
||||
elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
|
||||
ssrcs.forEach(function(ssrc) {
|
||||
elem.c('source', { ssrc: ssrc })
|
||||
.up();
|
||||
});
|
||||
elem.up();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
|
||||
elem.c('rtcp-mux').up();
|
||||
}
|
||||
|
||||
// XEP-0293 -- map a=rtcp-fb:*
|
||||
this.RtcpFbToJingle(i, elem, '*');
|
||||
|
||||
// XEP-0294
|
||||
if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
|
||||
lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
|
||||
for (j = 0; j < lines.length; j++) {
|
||||
tmp = SDPUtil.parse_extmap(lines[j]);
|
||||
elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
|
||||
uri: tmp.uri,
|
||||
id: tmp.value });
|
||||
if (tmp.hasOwnProperty('direction')) {
|
||||
switch (tmp.direction) {
|
||||
case 'sendonly':
|
||||
elem.attrs({senders: 'responder'});
|
||||
break;
|
||||
case 'recvonly':
|
||||
elem.attrs({senders: 'initiator'});
|
||||
break;
|
||||
case 'sendrecv':
|
||||
elem.attrs({senders: 'both'});
|
||||
break;
|
||||
case 'inactive':
|
||||
elem.attrs({senders: 'none'});
|
||||
break;
|
||||
}
|
||||
}
|
||||
// TODO: handle params
|
||||
elem.up();
|
||||
}
|
||||
}
|
||||
elem.up(); // end of description
|
||||
}
|
||||
|
||||
// map ice-ufrag/pwd, dtls fingerprint, candidates
|
||||
this.TransportToJingle(i, elem);
|
||||
|
||||
if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
|
||||
elem.attrs({senders: 'both'});
|
||||
} else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
|
||||
elem.attrs({senders: 'initiator'});
|
||||
} else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
|
||||
elem.attrs({senders: 'responder'});
|
||||
} else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
|
||||
elem.attrs({senders: 'none'});
|
||||
}
|
||||
if (mline.port == '0') {
|
||||
// estos hack to reject an m-line
|
||||
elem.attrs({senders: 'rejected'});
|
||||
}
|
||||
elem.up(); // end of content
|
||||
}
|
||||
elem.up();
|
||||
return elem;
|
||||
};
|
||||
|
||||
SDP.prototype.TransportToJingle = function (mediaindex, elem) {
|
||||
var i = mediaindex;
|
||||
var tmp;
|
||||
var self = this;
|
||||
elem.c('transport');
|
||||
|
||||
// XEP-0343 DTLS/SCTP
|
||||
if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
|
||||
{
|
||||
var sctpmap = SDPUtil.find_line(
|
||||
this.media[i], 'a=sctpmap:', self.session);
|
||||
if (sctpmap)
|
||||
{
|
||||
var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
|
||||
elem.c('sctpmap',
|
||||
{
|
||||
xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
|
||||
number: sctpAttrs[0], /* SCTP port */
|
||||
protocol: sctpAttrs[1], /* protocol */
|
||||
});
|
||||
// Optional stream count attribute
|
||||
if (sctpAttrs.length > 2)
|
||||
elem.attrs({ streams: sctpAttrs[2]});
|
||||
elem.up();
|
||||
}
|
||||
}
|
||||
// XEP-0320
|
||||
var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
|
||||
fingerprints.forEach(function(line) {
|
||||
tmp = SDPUtil.parse_fingerprint(line);
|
||||
tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
|
||||
elem.c('fingerprint').t(tmp.fingerprint);
|
||||
delete tmp.fingerprint;
|
||||
line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
|
||||
if (line) {
|
||||
tmp.setup = line.substr(8);
|
||||
}
|
||||
elem.attrs(tmp);
|
||||
elem.up(); // end of fingerprint
|
||||
});
|
||||
tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
|
||||
if (tmp) {
|
||||
tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
|
||||
elem.attrs(tmp);
|
||||
// XEP-0176
|
||||
if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
|
||||
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
|
||||
lines.forEach(function (line) {
|
||||
elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
|
||||
});
|
||||
}
|
||||
}
|
||||
elem.up(); // end of transport
|
||||
}
|
||||
|
||||
SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
|
||||
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
|
||||
lines.forEach(function (line) {
|
||||
var tmp = SDPUtil.parse_rtcpfb(line);
|
||||
if (tmp.type == 'trr-int') {
|
||||
elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
|
||||
elem.up();
|
||||
} else {
|
||||
elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
|
||||
if (tmp.params.length > 0) {
|
||||
elem.attrs({'subtype': tmp.params[0]});
|
||||
}
|
||||
elem.up();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
|
||||
var media = '';
|
||||
var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
|
||||
if (tmp.length) {
|
||||
media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
|
||||
if (tmp.attr('value')) {
|
||||
media += tmp.attr('value');
|
||||
} else {
|
||||
media += '0';
|
||||
}
|
||||
media += '\r\n';
|
||||
}
|
||||
tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
|
||||
tmp.each(function () {
|
||||
media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
|
||||
if ($(this).attr('subtype')) {
|
||||
media += ' ' + $(this).attr('subtype');
|
||||
}
|
||||
media += '\r\n';
|
||||
});
|
||||
return media;
|
||||
};
|
||||
|
||||
// construct an SDP from a jingle stanza
|
||||
SDP.prototype.fromJingle = function (jingle) {
|
||||
var self = this;
|
||||
this.raw = 'v=0\r\n' +
|
||||
'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
|
||||
's=-\r\n' +
|
||||
't=0 0\r\n';
|
||||
// http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
|
||||
if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
|
||||
$(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
|
||||
var contents = $(group).find('>content').map(function (idx, content) {
|
||||
return content.getAttribute('name');
|
||||
}).get();
|
||||
if (contents.length > 0) {
|
||||
self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
|
||||
}
|
||||
});
|
||||
} else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
|
||||
// temporary namespace, not to be used. to be removed soon.
|
||||
$(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
|
||||
var contents = $(group).find('>content').map(function (idx, content) {
|
||||
return content.getAttribute('name');
|
||||
}).get();
|
||||
if (group.getAttribute('type') !== null && contents.length > 0) {
|
||||
self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// for backward compability, to be removed soon
|
||||
// assume all contents are in the same bundle group, can be improved upon later
|
||||
var bundle = $(jingle).find('>content').filter(function (idx, content) {
|
||||
//elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
|
||||
return $(content).find('>bundle').length > 0;
|
||||
}).map(function (idx, content) {
|
||||
return content.getAttribute('name');
|
||||
}).get();
|
||||
if (bundle.length) {
|
||||
this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
|
||||
}
|
||||
}
|
||||
|
||||
this.session = this.raw;
|
||||
jingle.find('>content').each(function () {
|
||||
var m = self.jingle2media($(this));
|
||||
self.media.push(m);
|
||||
});
|
||||
|
||||
// reconstruct msid-semantic -- apparently not necessary
|
||||
/*
|
||||
var msid = SDPUtil.parse_ssrc(this.raw);
|
||||
if (msid.hasOwnProperty('mslabel')) {
|
||||
this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
|
||||
}
|
||||
*/
|
||||
|
||||
this.raw = this.session + this.media.join('');
|
||||
};
|
||||
|
||||
// translate a jingle content element into an an SDP media part
|
||||
SDP.prototype.jingle2media = function (content) {
|
||||
var media = '',
|
||||
desc = content.find('description'),
|
||||
ssrc = desc.attr('ssrc'),
|
||||
self = this,
|
||||
tmp;
|
||||
var sctp = content.find(
|
||||
'>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
|
||||
|
||||
tmp = { media: desc.attr('media') };
|
||||
tmp.port = '1';
|
||||
if (content.attr('senders') == 'rejected') {
|
||||
// estos hack to reject an m-line.
|
||||
tmp.port = '0';
|
||||
}
|
||||
if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
|
||||
if (sctp.length)
|
||||
tmp.proto = 'DTLS/SCTP';
|
||||
else
|
||||
tmp.proto = 'RTP/SAVPF';
|
||||
} else {
|
||||
tmp.proto = 'RTP/AVPF';
|
||||
}
|
||||
if (!sctp.length)
|
||||
{
|
||||
tmp.fmt = desc.find('payload-type').map(
|
||||
function () { return this.getAttribute('id'); }).get();
|
||||
media += SDPUtil.build_mline(tmp) + '\r\n';
|
||||
}
|
||||
else
|
||||
{
|
||||
media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
|
||||
media += 'a=sctpmap:' + sctp.attr('number') +
|
||||
' ' + sctp.attr('protocol');
|
||||
|
||||
var streamCount = sctp.attr('streams');
|
||||
if (streamCount)
|
||||
media += ' ' + streamCount + '\r\n';
|
||||
else
|
||||
media += '\r\n';
|
||||
}
|
||||
|
||||
media += 'c=IN IP4 0.0.0.0\r\n';
|
||||
if (!sctp.length)
|
||||
media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
|
||||
//tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
|
||||
tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
|
||||
//console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
|
||||
//console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
|
||||
//console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
|
||||
if (tmp.length) {
|
||||
if (tmp.attr('ufrag')) {
|
||||
media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
|
||||
}
|
||||
if (tmp.attr('pwd')) {
|
||||
media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
|
||||
}
|
||||
tmp.find('>fingerprint').each(function () {
|
||||
// FIXME: check namespace at some point
|
||||
media += 'a=fingerprint:' + this.getAttribute('hash');
|
||||
media += ' ' + $(this).text();
|
||||
media += '\r\n';
|
||||
//console.log("mline "+media);
|
||||
if (this.getAttribute('setup')) {
|
||||
media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
|
||||
}
|
||||
});
|
||||
}
|
||||
switch (content.attr('senders')) {
|
||||
case 'initiator':
|
||||
media += 'a=sendonly\r\n';
|
||||
break;
|
||||
case 'responder':
|
||||
media += 'a=recvonly\r\n';
|
||||
break;
|
||||
case 'none':
|
||||
media += 'a=inactive\r\n';
|
||||
break;
|
||||
case 'both':
|
||||
media += 'a=sendrecv\r\n';
|
||||
break;
|
||||
}
|
||||
media += 'a=mid:' + content.attr('name') + '\r\n';
|
||||
/*if (content.attr('name') == 'video') {
|
||||
media += 'a=x-google-flag:conference' + '\r\n';
|
||||
}*/
|
||||
|
||||
// <description><rtcp-mux/></description>
|
||||
// see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
|
||||
// and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
|
||||
if (desc.find('rtcp-mux').length) {
|
||||
media += 'a=rtcp-mux\r\n';
|
||||
}
|
||||
|
||||
if (desc.find('encryption').length) {
|
||||
desc.find('encryption>crypto').each(function () {
|
||||
media += 'a=crypto:' + this.getAttribute('tag');
|
||||
media += ' ' + this.getAttribute('crypto-suite');
|
||||
media += ' ' + this.getAttribute('key-params');
|
||||
if (this.getAttribute('session-params')) {
|
||||
media += ' ' + this.getAttribute('session-params');
|
||||
}
|
||||
media += '\r\n';
|
||||
});
|
||||
}
|
||||
desc.find('payload-type').each(function () {
|
||||
media += SDPUtil.build_rtpmap(this) + '\r\n';
|
||||
if ($(this).find('>parameter').length) {
|
||||
media += 'a=fmtp:' + this.getAttribute('id') + ' ';
|
||||
media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
|
||||
media += '\r\n';
|
||||
}
|
||||
// xep-0293
|
||||
media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
|
||||
});
|
||||
|
||||
// xep-0293
|
||||
media += self.RtcpFbFromJingle(desc, '*');
|
||||
|
||||
// xep-0294
|
||||
tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
|
||||
tmp.each(function () {
|
||||
media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
|
||||
});
|
||||
|
||||
content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
|
||||
media += SDPUtil.candidateFromJingle(this);
|
||||
});
|
||||
|
||||
// XEP-0339 handle ssrc-group attributes
|
||||
tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
|
||||
var semantics = this.getAttribute('semantics');
|
||||
var ssrcs = $(this).find('>source').map(function() {
|
||||
return this.getAttribute('ssrc');
|
||||
}).get();
|
||||
|
||||
if (ssrcs.length != 0) {
|
||||
media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
|
||||
}
|
||||
});
|
||||
|
||||
tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
|
||||
tmp.each(function () {
|
||||
var ssrc = this.getAttribute('ssrc');
|
||||
$(this).find('>parameter').each(function () {
|
||||
media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
|
||||
if (this.getAttribute('value') && this.getAttribute('value').length)
|
||||
media += ':' + this.getAttribute('value');
|
||||
media += '\r\n';
|
||||
});
|
||||
});
|
||||
|
||||
if (tmp.length === 0) {
|
||||
// fallback to proprietary mapping of a=ssrc lines
|
||||
tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
|
||||
if (tmp.length) {
|
||||
media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
|
||||
media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
|
||||
media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
|
||||
media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
|
||||
}
|
||||
}
|
||||
return media;
|
||||
};
|
||||
|
||||
@@ -1,408 +0,0 @@
|
||||
/**
|
||||
* Contains utility classes used in SDP class.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Class holds a=ssrc lines and media type a=mid
|
||||
* @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
|
||||
* @param type media type eg. "audio" or "video"(a=mid frm SDP)
|
||||
* @constructor
|
||||
*/
|
||||
function ChannelSsrc(ssrc, type) {
|
||||
this.ssrc = ssrc;
|
||||
this.type = type;
|
||||
this.lines = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Class holds a=ssrc-group: lines
|
||||
* @param semantics
|
||||
* @param ssrcs
|
||||
* @constructor
|
||||
*/
|
||||
function ChannelSsrcGroup(semantics, ssrcs, line) {
|
||||
this.semantics = semantics;
|
||||
this.ssrcs = ssrcs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
|
||||
* @param channelNumber channel idx in SDP media array.
|
||||
* @param mediaType media type(a=mid)
|
||||
* @constructor
|
||||
*/
|
||||
function MediaChannel(channelNumber, mediaType) {
|
||||
/**
|
||||
* SDP channel number
|
||||
* @type {*}
|
||||
*/
|
||||
this.chNumber = channelNumber;
|
||||
/**
|
||||
* Channel media type(a=mid)
|
||||
* @type {*}
|
||||
*/
|
||||
this.mediaType = mediaType;
|
||||
/**
|
||||
* The maps of ssrc numbers to ChannelSsrc objects.
|
||||
*/
|
||||
this.ssrcs = {};
|
||||
|
||||
/**
|
||||
* The array of ChannelSsrcGroup objects.
|
||||
* @type {Array}
|
||||
*/
|
||||
this.ssrcGroups = [];
|
||||
}
|
||||
|
||||
SDPUtil = {
|
||||
iceparams: function (mediadesc, sessiondesc) {
|
||||
var data = null;
|
||||
if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
|
||||
SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
|
||||
data = {
|
||||
ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
|
||||
pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
|
||||
};
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_iceufrag: function (line) {
|
||||
return line.substring(12);
|
||||
},
|
||||
build_iceufrag: function (frag) {
|
||||
return 'a=ice-ufrag:' + frag;
|
||||
},
|
||||
parse_icepwd: function (line) {
|
||||
return line.substring(10);
|
||||
},
|
||||
build_icepwd: function (pwd) {
|
||||
return 'a=ice-pwd:' + pwd;
|
||||
},
|
||||
parse_mid: function (line) {
|
||||
return line.substring(6);
|
||||
},
|
||||
parse_mline: function (line) {
|
||||
var parts = line.substring(2).split(' '),
|
||||
data = {};
|
||||
data.media = parts.shift();
|
||||
data.port = parts.shift();
|
||||
data.proto = parts.shift();
|
||||
if (parts[parts.length - 1] === '') { // trailing whitespace
|
||||
parts.pop();
|
||||
}
|
||||
data.fmt = parts;
|
||||
return data;
|
||||
},
|
||||
build_mline: function (mline) {
|
||||
return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
|
||||
},
|
||||
parse_rtpmap: function (line) {
|
||||
var parts = line.substring(9).split(' '),
|
||||
data = {};
|
||||
data.id = parts.shift();
|
||||
parts = parts[0].split('/');
|
||||
data.name = parts.shift();
|
||||
data.clockrate = parts.shift();
|
||||
data.channels = parts.length ? parts.shift() : '1';
|
||||
return data;
|
||||
},
|
||||
/**
|
||||
* Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
|
||||
* @param line eg. "a=sctpmap:5000 webrtc-datachannel"
|
||||
* @returns [SCTP port number, protocol, streams]
|
||||
*/
|
||||
parse_sctpmap: function (line)
|
||||
{
|
||||
var parts = line.substring(10).split(' ');
|
||||
var sctpPort = parts[0];
|
||||
var protocol = parts[1];
|
||||
// Stream count is optional
|
||||
var streamCount = parts.length > 2 ? parts[2] : null;
|
||||
return [sctpPort, protocol, streamCount];// SCTP port
|
||||
},
|
||||
build_rtpmap: function (el) {
|
||||
var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
|
||||
if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
|
||||
line += '/' + el.getAttribute('channels');
|
||||
}
|
||||
return line;
|
||||
},
|
||||
parse_crypto: function (line) {
|
||||
var parts = line.substring(9).split(' '),
|
||||
data = {};
|
||||
data.tag = parts.shift();
|
||||
data['crypto-suite'] = parts.shift();
|
||||
data['key-params'] = parts.shift();
|
||||
if (parts.length) {
|
||||
data['session-params'] = parts.join(' ');
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_fingerprint: function (line) { // RFC 4572
|
||||
var parts = line.substring(14).split(' '),
|
||||
data = {};
|
||||
data.hash = parts.shift();
|
||||
data.fingerprint = parts.shift();
|
||||
// TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
|
||||
return data;
|
||||
},
|
||||
parse_fmtp: function (line) {
|
||||
var parts = line.split(' '),
|
||||
i, key, value,
|
||||
data = [];
|
||||
parts.shift();
|
||||
parts = parts.join(' ').split(';');
|
||||
for (i = 0; i < parts.length; i++) {
|
||||
key = parts[i].split('=')[0];
|
||||
while (key.length && key[0] == ' ') {
|
||||
key = key.substring(1);
|
||||
}
|
||||
value = parts[i].split('=')[1];
|
||||
if (key && value) {
|
||||
data.push({name: key, value: value});
|
||||
} else if (key) {
|
||||
// rfc 4733 (DTMF) style stuff
|
||||
data.push({name: '', value: key});
|
||||
}
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_icecandidate: function (line) {
|
||||
var candidate = {},
|
||||
elems = line.split(' ');
|
||||
candidate.foundation = elems[0].substring(12);
|
||||
candidate.component = elems[1];
|
||||
candidate.protocol = elems[2].toLowerCase();
|
||||
candidate.priority = elems[3];
|
||||
candidate.ip = elems[4];
|
||||
candidate.port = elems[5];
|
||||
// elems[6] => "typ"
|
||||
candidate.type = elems[7];
|
||||
candidate.generation = 0; // default value, may be overwritten below
|
||||
for (var i = 8; i < elems.length; i += 2) {
|
||||
switch (elems[i]) {
|
||||
case 'raddr':
|
||||
candidate['rel-addr'] = elems[i + 1];
|
||||
break;
|
||||
case 'rport':
|
||||
candidate['rel-port'] = elems[i + 1];
|
||||
break;
|
||||
case 'generation':
|
||||
candidate.generation = elems[i + 1];
|
||||
break;
|
||||
case 'tcptype':
|
||||
candidate.tcptype = elems[i + 1];
|
||||
break;
|
||||
default: // TODO
|
||||
console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
|
||||
}
|
||||
}
|
||||
candidate.network = '1';
|
||||
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
|
||||
return candidate;
|
||||
},
|
||||
build_icecandidate: function (cand) {
|
||||
var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
|
||||
line += ' ';
|
||||
switch (cand.type) {
|
||||
case 'srflx':
|
||||
case 'prflx':
|
||||
case 'relay':
|
||||
if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
|
||||
line += 'raddr';
|
||||
line += ' ';
|
||||
line += cand['rel-addr'];
|
||||
line += ' ';
|
||||
line += 'rport';
|
||||
line += ' ';
|
||||
line += cand['rel-port'];
|
||||
line += ' ';
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (cand.hasOwnAttribute('tcptype')) {
|
||||
line += 'tcptype';
|
||||
line += ' ';
|
||||
line += cand.tcptype;
|
||||
line += ' ';
|
||||
}
|
||||
line += 'generation';
|
||||
line += ' ';
|
||||
line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
|
||||
return line;
|
||||
},
|
||||
parse_ssrc: function (desc) {
|
||||
// proprietary mapping of a=ssrc lines
|
||||
// TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
|
||||
// and parse according to that
|
||||
var lines = desc.split('\r\n'),
|
||||
data = {};
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
if (lines[i].substring(0, 7) == 'a=ssrc:') {
|
||||
var idx = lines[i].indexOf(' ');
|
||||
data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
|
||||
}
|
||||
}
|
||||
return data;
|
||||
},
|
||||
parse_rtcpfb: function (line) {
|
||||
var parts = line.substr(10).split(' ');
|
||||
var data = {};
|
||||
data.pt = parts.shift();
|
||||
data.type = parts.shift();
|
||||
data.params = parts;
|
||||
return data;
|
||||
},
|
||||
parse_extmap: function (line) {
|
||||
var parts = line.substr(9).split(' ');
|
||||
var data = {};
|
||||
data.value = parts.shift();
|
||||
if (data.value.indexOf('/') != -1) {
|
||||
data.direction = data.value.substr(data.value.indexOf('/') + 1);
|
||||
data.value = data.value.substr(0, data.value.indexOf('/'));
|
||||
} else {
|
||||
data.direction = 'both';
|
||||
}
|
||||
data.uri = parts.shift();
|
||||
data.params = parts;
|
||||
return data;
|
||||
},
|
||||
find_line: function (haystack, needle, sessionpart) {
|
||||
var lines = haystack.split('\r\n');
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
if (lines[i].substring(0, needle.length) == needle) {
|
||||
return lines[i];
|
||||
}
|
||||
}
|
||||
if (!sessionpart) {
|
||||
return false;
|
||||
}
|
||||
// search session part
|
||||
lines = sessionpart.split('\r\n');
|
||||
for (var j = 0; j < lines.length; j++) {
|
||||
if (lines[j].substring(0, needle.length) == needle) {
|
||||
return lines[j];
|
||||
}
|
||||
}
|
||||
return false;
|
||||
},
|
||||
find_lines: function (haystack, needle, sessionpart) {
|
||||
var lines = haystack.split('\r\n'),
|
||||
needles = [];
|
||||
for (var i = 0; i < lines.length; i++) {
|
||||
if (lines[i].substring(0, needle.length) == needle)
|
||||
needles.push(lines[i]);
|
||||
}
|
||||
if (needles.length || !sessionpart) {
|
||||
return needles;
|
||||
}
|
||||
// search session part
|
||||
lines = sessionpart.split('\r\n');
|
||||
for (var j = 0; j < lines.length; j++) {
|
||||
if (lines[j].substring(0, needle.length) == needle) {
|
||||
needles.push(lines[j]);
|
||||
}
|
||||
}
|
||||
return needles;
|
||||
},
|
||||
candidateToJingle: function (line) {
|
||||
// a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
|
||||
// <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
|
||||
if (line.indexOf('candidate:') === 0) {
|
||||
line = 'a=' + line;
|
||||
} else if (line.substring(0, 12) != 'a=candidate:') {
|
||||
console.log('parseCandidate called with a line that is not a candidate line');
|
||||
console.log(line);
|
||||
return null;
|
||||
}
|
||||
if (line.substring(line.length - 2) == '\r\n') // chomp it
|
||||
line = line.substring(0, line.length - 2);
|
||||
var candidate = {},
|
||||
elems = line.split(' '),
|
||||
i;
|
||||
if (elems[6] != 'typ') {
|
||||
console.log('did not find typ in the right place');
|
||||
console.log(line);
|
||||
return null;
|
||||
}
|
||||
candidate.foundation = elems[0].substring(12);
|
||||
candidate.component = elems[1];
|
||||
candidate.protocol = elems[2].toLowerCase();
|
||||
candidate.priority = elems[3];
|
||||
candidate.ip = elems[4];
|
||||
candidate.port = elems[5];
|
||||
// elems[6] => "typ"
|
||||
candidate.type = elems[7];
|
||||
|
||||
candidate.generation = '0'; // default, may be overwritten below
|
||||
for (i = 8; i < elems.length; i += 2) {
|
||||
switch (elems[i]) {
|
||||
case 'raddr':
|
||||
candidate['rel-addr'] = elems[i + 1];
|
||||
break;
|
||||
case 'rport':
|
||||
candidate['rel-port'] = elems[i + 1];
|
||||
break;
|
||||
case 'generation':
|
||||
candidate.generation = elems[i + 1];
|
||||
break;
|
||||
case 'tcptype':
|
||||
candidate.tcptype = elems[i + 1];
|
||||
break;
|
||||
default: // TODO
|
||||
console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
|
||||
}
|
||||
}
|
||||
candidate.network = '1';
|
||||
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
|
||||
return candidate;
|
||||
},
|
||||
candidateFromJingle: function (cand) {
|
||||
var line = 'a=candidate:';
|
||||
line += cand.getAttribute('foundation');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('component');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
|
||||
line += ' ';
|
||||
line += cand.getAttribute('priority');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('ip');
|
||||
line += ' ';
|
||||
line += cand.getAttribute('port');
|
||||
line += ' ';
|
||||
line += 'typ';
|
||||
line += ' ' + cand.getAttribute('type');
|
||||
line += ' ';
|
||||
switch (cand.getAttribute('type')) {
|
||||
case 'srflx':
|
||||
case 'prflx':
|
||||
case 'relay':
|
||||
if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
|
||||
line += 'raddr';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('rel-addr');
|
||||
line += ' ';
|
||||
line += 'rport';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('rel-port');
|
||||
line += ' ';
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
|
||||
line += 'tcptype';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('tcptype');
|
||||
line += ' ';
|
||||
}
|
||||
line += 'generation';
|
||||
line += ' ';
|
||||
line += cand.getAttribute('generation') || '0';
|
||||
return line + '\r\n';
|
||||
}
|
||||
};
|
||||
|
||||
exports.SDPUtil = SDPUtil;
|
||||
|
||||
@@ -1,254 +0,0 @@
|
||||
/**
|
||||
* Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
|
||||
*
|
||||
* This can be used with JS designed for browsers to improve reuse of code and
|
||||
* allow the use of existing libraries.
|
||||
*
|
||||
* Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
|
||||
*
|
||||
* @todo SSL Support
|
||||
* @author Dan DeFelippi <dan@driverdan.com>
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
var Url = require("url")
|
||||
,sys = require("util");
|
||||
|
||||
exports.XMLHttpRequest = function() {
|
||||
/**
|
||||
* Private variables
|
||||
*/
|
||||
var self = this;
|
||||
var http = require('http');
|
||||
var https = require('https');
|
||||
|
||||
// Holds http.js objects
|
||||
var client;
|
||||
var request;
|
||||
var response;
|
||||
|
||||
// Request settings
|
||||
var settings = {};
|
||||
|
||||
// Set some default headers
|
||||
var defaultHeaders = {
|
||||
"User-Agent": "node.js",
|
||||
"Accept": "*/*",
|
||||
};
|
||||
|
||||
var headers = defaultHeaders;
|
||||
|
||||
/**
|
||||
* Constants
|
||||
*/
|
||||
this.UNSENT = 0;
|
||||
this.OPENED = 1;
|
||||
this.HEADERS_RECEIVED = 2;
|
||||
this.LOADING = 3;
|
||||
this.DONE = 4;
|
||||
|
||||
/**
|
||||
* Public vars
|
||||
*/
|
||||
// Current state
|
||||
this.readyState = this.UNSENT;
|
||||
|
||||
// default ready state change handler in case one is not set or is set late
|
||||
this.onreadystatechange = function() {};
|
||||
|
||||
// Result & response
|
||||
this.responseText = "";
|
||||
this.responseXML = "";
|
||||
this.status = null;
|
||||
this.statusText = null;
|
||||
|
||||
/**
|
||||
* Open the connection. Currently supports local server requests.
|
||||
*
|
||||
* @param string method Connection method (eg GET, POST)
|
||||
* @param string url URL for the connection.
|
||||
* @param boolean async Asynchronous connection. Default is true.
|
||||
* @param string user Username for basic authentication (optional)
|
||||
* @param string password Password for basic authentication (optional)
|
||||
*/
|
||||
this.open = function(method, url, async, user, password) {
|
||||
settings = {
|
||||
"method": method,
|
||||
"url": url,
|
||||
"async": async || null,
|
||||
"user": user || null,
|
||||
"password": password || null
|
||||
};
|
||||
|
||||
this.abort();
|
||||
|
||||
setState(this.OPENED);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sets a header for the request.
|
||||
*
|
||||
* @param string header Header name
|
||||
* @param string value Header value
|
||||
*/
|
||||
this.setRequestHeader = function(header, value) {
|
||||
headers[header] = value;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets a header from the server response.
|
||||
*
|
||||
* @param string header Name of header to get.
|
||||
* @return string Text of the header or null if it doesn't exist.
|
||||
*/
|
||||
this.getResponseHeader = function(header) {
|
||||
if (this.readyState > this.OPENED && response.headers[header]) {
|
||||
return header + ": " + response.headers[header];
|
||||
}
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets all the response headers.
|
||||
*
|
||||
* @return string
|
||||
*/
|
||||
this.getAllResponseHeaders = function() {
|
||||
if (this.readyState < this.HEADERS_RECEIVED) {
|
||||
throw "INVALID_STATE_ERR: Headers have not been received.";
|
||||
}
|
||||
var result = "";
|
||||
|
||||
for (var i in response.headers) {
|
||||
result += i + ": " + response.headers[i] + "\r\n";
|
||||
}
|
||||
return result.substr(0, result.length - 2);
|
||||
};
|
||||
|
||||
/**
|
||||
* Sends the request to the server.
|
||||
*
|
||||
* @param string data Optional data to send as request body.
|
||||
*/
|
||||
this.send = function(data) {
|
||||
if (this.readyState != this.OPENED) {
|
||||
throw "INVALID_STATE_ERR: connection must be opened before send() is called";
|
||||
}
|
||||
|
||||
var ssl = false;
|
||||
var url = Url.parse(settings.url);
|
||||
|
||||
// Determine the server
|
||||
switch (url.protocol) {
|
||||
case 'https:':
|
||||
ssl = true;
|
||||
// SSL & non-SSL both need host, no break here.
|
||||
case 'http:':
|
||||
var host = url.hostname;
|
||||
break;
|
||||
|
||||
case undefined:
|
||||
case '':
|
||||
var host = "localhost";
|
||||
break;
|
||||
|
||||
default:
|
||||
throw "Protocol not supported.";
|
||||
}
|
||||
|
||||
// Default to port 80. If accessing localhost on another port be sure
|
||||
// to use http://localhost:port/path
|
||||
var port = url.port || (ssl ? 443 : 80);
|
||||
// Add query string if one is used
|
||||
var uri = url.pathname + (url.search ? url.search : '');
|
||||
|
||||
// Set the Host header or the server may reject the request
|
||||
this.setRequestHeader("Host", host);
|
||||
|
||||
// Set content length header
|
||||
if (settings.method == "GET" || settings.method == "HEAD") {
|
||||
data = null;
|
||||
} else if (data) {
|
||||
this.setRequestHeader("Content-Length", Buffer.byteLength(data));
|
||||
|
||||
if (!headers["Content-Type"]) {
|
||||
this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
|
||||
}
|
||||
}
|
||||
|
||||
// Use the proper protocol
|
||||
var doRequest = ssl ? https.request : http.request;
|
||||
|
||||
var options = {
|
||||
host: host,
|
||||
port: port,
|
||||
path: uri,
|
||||
method: settings.method,
|
||||
headers: headers,
|
||||
agent: false
|
||||
};
|
||||
|
||||
var req = doRequest(options, function(res) {
|
||||
response = res;
|
||||
response.setEncoding("utf8");
|
||||
|
||||
setState(self.HEADERS_RECEIVED);
|
||||
self.status = response.statusCode;
|
||||
|
||||
response.on('data', function(chunk) {
|
||||
// Make sure there's some data
|
||||
if (chunk) {
|
||||
self.responseText += chunk;
|
||||
}
|
||||
setState(self.LOADING);
|
||||
});
|
||||
|
||||
response.on('end', function() {
|
||||
setState(self.DONE);
|
||||
});
|
||||
|
||||
response.on('error', function() {
|
||||
self.handleError(error);
|
||||
});
|
||||
}).on('error', function(error) {
|
||||
self.handleError(error);
|
||||
});
|
||||
|
||||
req.setHeader("Connection", "Close");
|
||||
|
||||
// Node 0.4 and later won't accept empty data. Make sure it's needed.
|
||||
if (data) {
|
||||
req.write(data);
|
||||
}
|
||||
|
||||
req.end();
|
||||
};
|
||||
|
||||
this.handleError = function(error) {
|
||||
this.status = 503;
|
||||
this.statusText = error;
|
||||
this.responseText = error.stack;
|
||||
setState(this.DONE);
|
||||
};
|
||||
|
||||
/**
|
||||
* Aborts a request.
|
||||
*/
|
||||
this.abort = function() {
|
||||
headers = defaultHeaders;
|
||||
this.readyState = this.UNSENT;
|
||||
this.responseText = "";
|
||||
this.responseXML = "";
|
||||
};
|
||||
|
||||
/**
|
||||
* Changes readyState and calls onreadystatechange.
|
||||
*
|
||||
* @param int state New state
|
||||
*/
|
||||
var setState = function(state) {
|
||||
self.readyState = state;
|
||||
self.onreadystatechange();
|
||||
}
|
||||
};
|
||||
@@ -1,83 +0,0 @@
|
||||
// This code was written by Tyler Akins and has been placed in the
|
||||
// public domain. It would be nice if you left this header intact.
|
||||
// Base64 code from Tyler Akins -- http://rumkin.com
|
||||
|
||||
var Base64 = (function () {
|
||||
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
|
||||
|
||||
var obj = {
|
||||
/**
|
||||
* Encodes a string in base64
|
||||
* @param {String} input The string to encode in base64.
|
||||
*/
|
||||
encode: function (input) {
|
||||
var output = "";
|
||||
var chr1, chr2, chr3;
|
||||
var enc1, enc2, enc3, enc4;
|
||||
var i = 0;
|
||||
|
||||
do {
|
||||
chr1 = input.charCodeAt(i++);
|
||||
chr2 = input.charCodeAt(i++);
|
||||
chr3 = input.charCodeAt(i++);
|
||||
|
||||
enc1 = chr1 >> 2;
|
||||
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
|
||||
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
|
||||
enc4 = chr3 & 63;
|
||||
|
||||
if (isNaN(chr2)) {
|
||||
enc3 = enc4 = 64;
|
||||
} else if (isNaN(chr3)) {
|
||||
enc4 = 64;
|
||||
}
|
||||
|
||||
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
|
||||
keyStr.charAt(enc3) + keyStr.charAt(enc4);
|
||||
} while (i < input.length);
|
||||
|
||||
return output;
|
||||
},
|
||||
|
||||
/**
|
||||
* Decodes a base64 string.
|
||||
* @param {String} input The string to decode.
|
||||
*/
|
||||
decode: function (input) {
|
||||
var output = "";
|
||||
var chr1, chr2, chr3;
|
||||
var enc1, enc2, enc3, enc4;
|
||||
var i = 0;
|
||||
|
||||
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
|
||||
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
|
||||
|
||||
do {
|
||||
enc1 = keyStr.indexOf(input.charAt(i++));
|
||||
enc2 = keyStr.indexOf(input.charAt(i++));
|
||||
enc3 = keyStr.indexOf(input.charAt(i++));
|
||||
enc4 = keyStr.indexOf(input.charAt(i++));
|
||||
|
||||
chr1 = (enc1 << 2) | (enc2 >> 4);
|
||||
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
|
||||
chr3 = ((enc3 & 3) << 6) | enc4;
|
||||
|
||||
output = output + String.fromCharCode(chr1);
|
||||
|
||||
if (enc3 != 64) {
|
||||
output = output + String.fromCharCode(chr2);
|
||||
}
|
||||
if (enc4 != 64) {
|
||||
output = output + String.fromCharCode(chr3);
|
||||
}
|
||||
} while (i < input.length);
|
||||
|
||||
return output;
|
||||
}
|
||||
};
|
||||
|
||||
return obj;
|
||||
})();
|
||||
|
||||
// Nodify
|
||||
exports.Base64 = Base64;
|
||||
@@ -1,279 +0,0 @@
|
||||
/*
|
||||
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
|
||||
* Digest Algorithm, as defined in RFC 1321.
|
||||
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
|
||||
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
|
||||
* Distributed under the BSD License
|
||||
* See http://pajhome.org.uk/crypt/md5 for more info.
|
||||
*/
|
||||
|
||||
var MD5 = (function () {
|
||||
/*
|
||||
* Configurable variables. You may need to tweak these to be compatible with
|
||||
* the server-side, but the defaults work in most cases.
|
||||
*/
|
||||
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
|
||||
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
|
||||
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
|
||||
|
||||
/*
|
||||
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
|
||||
* to work around bugs in some JS interpreters.
|
||||
*/
|
||||
var safe_add = function (x, y) {
|
||||
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
|
||||
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
|
||||
return (msw << 16) | (lsw & 0xFFFF);
|
||||
};
|
||||
|
||||
/*
|
||||
* Bitwise rotate a 32-bit number to the left.
|
||||
*/
|
||||
var bit_rol = function (num, cnt) {
|
||||
return (num << cnt) | (num >>> (32 - cnt));
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert a string to an array of little-endian words
|
||||
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
|
||||
*/
|
||||
var str2binl = function (str) {
|
||||
var bin = [];
|
||||
var mask = (1 << chrsz) - 1;
|
||||
for(var i = 0; i < str.length * chrsz; i += chrsz)
|
||||
{
|
||||
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
|
||||
}
|
||||
return bin;
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an array of little-endian words to a string
|
||||
*/
|
||||
var binl2str = function (bin) {
|
||||
var str = "";
|
||||
var mask = (1 << chrsz) - 1;
|
||||
for(var i = 0; i < bin.length * 32; i += chrsz)
|
||||
{
|
||||
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an array of little-endian words to a hex string.
|
||||
*/
|
||||
var binl2hex = function (binarray) {
|
||||
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
|
||||
var str = "";
|
||||
for(var i = 0; i < binarray.length * 4; i++)
|
||||
{
|
||||
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
|
||||
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
/*
|
||||
* Convert an array of little-endian words to a base-64 string
|
||||
*/
|
||||
var binl2b64 = function (binarray) {
|
||||
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
||||
var str = "";
|
||||
var triplet, j;
|
||||
for(var i = 0; i < binarray.length * 4; i += 3)
|
||||
{
|
||||
triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
|
||||
(((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
|
||||
((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
|
||||
for(j = 0; j < 4; j++)
|
||||
{
|
||||
if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
|
||||
else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
|
||||
}
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
/*
|
||||
* These functions implement the four basic operations the algorithm uses.
|
||||
*/
|
||||
var md5_cmn = function (q, a, b, x, s, t) {
|
||||
return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
|
||||
};
|
||||
|
||||
var md5_ff = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
|
||||
};
|
||||
|
||||
var md5_gg = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
|
||||
};
|
||||
|
||||
var md5_hh = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
|
||||
};
|
||||
|
||||
var md5_ii = function (a, b, c, d, x, s, t) {
|
||||
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
|
||||
};
|
||||
|
||||
/*
|
||||
* Calculate the MD5 of an array of little-endian words, and a bit length
|
||||
*/
|
||||
var core_md5 = function (x, len) {
|
||||
/* append padding */
|
||||
x[len >> 5] |= 0x80 << ((len) % 32);
|
||||
x[(((len + 64) >>> 9) << 4) + 14] = len;
|
||||
|
||||
var a = 1732584193;
|
||||
var b = -271733879;
|
||||
var c = -1732584194;
|
||||
var d = 271733878;
|
||||
|
||||
var olda, oldb, oldc, oldd;
|
||||
for (var i = 0; i < x.length; i += 16)
|
||||
{
|
||||
olda = a;
|
||||
oldb = b;
|
||||
oldc = c;
|
||||
oldd = d;
|
||||
|
||||
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
|
||||
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
|
||||
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
|
||||
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
|
||||
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
|
||||
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
|
||||
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
|
||||
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
|
||||
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
|
||||
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
|
||||
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
|
||||
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
|
||||
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
|
||||
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
|
||||
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
|
||||
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
|
||||
|
||||
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
|
||||
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
|
||||
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
|
||||
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
|
||||
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
|
||||
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
|
||||
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
|
||||
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
|
||||
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
|
||||
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
|
||||
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
|
||||
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
|
||||
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
|
||||
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
|
||||
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
|
||||
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
|
||||
|
||||
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
|
||||
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
|
||||
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
|
||||
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
|
||||
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
|
||||
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
|
||||
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
|
||||
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
|
||||
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
|
||||
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
|
||||
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
|
||||
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
|
||||
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
|
||||
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
|
||||
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
|
||||
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
|
||||
|
||||
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
|
||||
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
|
||||
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
|
||||
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
|
||||
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
|
||||
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
|
||||
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
|
||||
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
|
||||
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
|
||||
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
|
||||
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
|
||||
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
|
||||
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
|
||||
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
|
||||
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
|
||||
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
|
||||
|
||||
a = safe_add(a, olda);
|
||||
b = safe_add(b, oldb);
|
||||
c = safe_add(c, oldc);
|
||||
d = safe_add(d, oldd);
|
||||
}
|
||||
return [a, b, c, d];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Calculate the HMAC-MD5, of a key and some data
|
||||
*/
|
||||
var core_hmac_md5 = function (key, data) {
|
||||
var bkey = str2binl(key);
|
||||
if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
|
||||
|
||||
var ipad = new Array(16), opad = new Array(16);
|
||||
for(var i = 0; i < 16; i++)
|
||||
{
|
||||
ipad[i] = bkey[i] ^ 0x36363636;
|
||||
opad[i] = bkey[i] ^ 0x5C5C5C5C;
|
||||
}
|
||||
|
||||
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
|
||||
return core_md5(opad.concat(hash), 512 + 128);
|
||||
};
|
||||
|
||||
var obj = {
|
||||
/*
|
||||
* These are the functions you'll usually want to call.
|
||||
* They take string arguments and return either hex or base-64 encoded
|
||||
* strings.
|
||||
*/
|
||||
hexdigest: function (s) {
|
||||
return binl2hex(core_md5(str2binl(s), s.length * chrsz));
|
||||
},
|
||||
|
||||
b64digest: function (s) {
|
||||
return binl2b64(core_md5(str2binl(s), s.length * chrsz));
|
||||
},
|
||||
|
||||
hash: function (s) {
|
||||
return binl2str(core_md5(str2binl(s), s.length * chrsz));
|
||||
},
|
||||
|
||||
hmac_hexdigest: function (key, data) {
|
||||
return binl2hex(core_hmac_md5(key, data));
|
||||
},
|
||||
|
||||
hmac_b64digest: function (key, data) {
|
||||
return binl2b64(core_hmac_md5(key, data));
|
||||
},
|
||||
|
||||
hmac_hash: function (key, data) {
|
||||
return binl2str(core_hmac_md5(key, data));
|
||||
},
|
||||
|
||||
/*
|
||||
* Perform a simple self-test to see if the VM is working
|
||||
*/
|
||||
test: function () {
|
||||
return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
|
||||
}
|
||||
};
|
||||
|
||||
return obj;
|
||||
})();
|
||||
|
||||
// Nodify
|
||||
exports.MD5 = MD5;
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
||||
var strophe = require("./strophe/strophe.js").Strophe;
|
||||
|
||||
var Strophe = strophe.Strophe;
|
||||
var $iq = strophe.$iq;
|
||||
var $msg = strophe.$msg;
|
||||
var $build = strophe.$build;
|
||||
var $pres = strophe.$pres;
|
||||
|
||||
var jsdom = require("jsdom");
|
||||
var window = jsdom.jsdom().parentWindow;
|
||||
var $ = require('jquery')(window);
|
||||
|
||||
var stropheJingle = require("./strophe.jingle.sdp.js");
|
||||
|
||||
|
||||
var input = '';
|
||||
|
||||
process.stdin.on('readable', function() {
|
||||
var chunk = process.stdin.read();
|
||||
if (chunk !== null) {
|
||||
input += chunk;
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', function() {
|
||||
if (process.argv[2] == '--jingle') {
|
||||
var elem = $(input);
|
||||
// app does:
|
||||
// sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
|
||||
//console.log(elem.find('>content'));
|
||||
var sdp = new stropheJingle.SDP('');
|
||||
sdp.fromJingle(elem);
|
||||
console.log(sdp.raw);
|
||||
} else if (process.argv[2] == '--sdp') {
|
||||
var sdp = new stropheJingle.SDP(input);
|
||||
var accept = $iq({to: '%(tojid)s',
|
||||
type: 'set'})
|
||||
.c('jingle', {xmlns: 'urn:xmpp:jingle:1',
|
||||
//action: 'session-accept',
|
||||
action: '%(action)s',
|
||||
initiator: '%(initiator)s',
|
||||
responder: '%(responder)s',
|
||||
sid: '%(sid)s' });
|
||||
sdp.toJingle(accept, 'responder');
|
||||
console.log(Strophe.serialize(accept));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
This directory contains some sample monitoring config for using the
|
||||
'Prometheus' monitoring server against synapse.
|
||||
|
||||
To use it, first install prometheus by following the instructions at
|
||||
|
||||
http://prometheus.io/
|
||||
|
||||
### for Prometheus v1
|
||||
|
||||
Add a new job to the main prometheus.conf file:
|
||||
|
||||
```yaml
|
||||
job: {
|
||||
name: "synapse"
|
||||
|
||||
target_group: {
|
||||
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### for Prometheus v2
|
||||
Add a new job to the main prometheus.yml file:
|
||||
|
||||
```yaml
|
||||
- job_name: "synapse"
|
||||
metrics_path: "/_synapse/metrics"
|
||||
# when endpoint uses https:
|
||||
scheme: "https"
|
||||
|
||||
static_configs:
|
||||
- targets: ['SERVER.LOCATION:PORT']
|
||||
```
|
||||
|
||||
To use `synapse.rules` add
|
||||
|
||||
```yaml
|
||||
rule_files:
|
||||
- "/PATH/TO/synapse-v2.rules"
|
||||
```
|
||||
|
||||
Metrics are disabled by default when running synapse; they must be enabled
|
||||
with the 'enable-metrics' option, either in the synapse config file or as a
|
||||
command-line option.
|
||||
@@ -1,395 +0,0 @@
|
||||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>System Resources</h1>
|
||||
|
||||
<h3>CPU</h3>
|
||||
<div id="process_resource_utime"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_utime"),
|
||||
expr: "rate(process_cpu_seconds_total[2m]) * 100",
|
||||
name: "[[job]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "%",
|
||||
yTitle: "CPU Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="process_resource_maxrss"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_resource_maxrss"),
|
||||
expr: "process_psutil_rss:max",
|
||||
name: "Maxrss",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "bytes",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>File descriptors</h3>
|
||||
<div id="process_fds"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#process_fds"),
|
||||
expr: "process_open_fds{job='synapse'}",
|
||||
name: "FDs",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "",
|
||||
yTitle: "Descriptors"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Reactor</h1>
|
||||
|
||||
<h3>Total reactor time</h3>
|
||||
<div id="reactor_total_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_total_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / 1000",
|
||||
name: "time",
|
||||
max: 1,
|
||||
min: 0,
|
||||
renderer: "area",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average reactor tick time</h3>
|
||||
<div id="reactor_average_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_average_time"),
|
||||
expr: "rate(python_twisted_reactor_tick_time:total[2m]) / rate(python_twisted_reactor_tick_time:count[2m]) / 1000",
|
||||
name: "time",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s",
|
||||
yTitle: "Time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Pending calls per tick</h3>
|
||||
<div id="reactor_pending_calls"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#reactor_pending_calls"),
|
||||
expr: "rate(python_twisted_reactor_pending_calls:total[30s])/rate(python_twisted_reactor_pending_calls:count[30s])",
|
||||
name: "calls",
|
||||
min: 0,
|
||||
renderer: "line",
|
||||
height: 150,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yTitle: "Pending Cals"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Storage</h1>
|
||||
|
||||
<h3>Queries</h3>
|
||||
<div id="synapse_storage_query_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_query_time"),
|
||||
expr: "rate(synapse_storage_query_time:count[2m])",
|
||||
name: "[[verb]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "queries/s",
|
||||
yTitle: "Queries"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Transactions</h3>
|
||||
<div id="synapse_storage_transaction_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transaction_time"),
|
||||
expr: "rate(synapse_storage_transaction_time:count[2m])",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "txn/s",
|
||||
yTitle: "Transactions"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Transaction execution time</h3>
|
||||
<div id="synapse_storage_transactions_time_msec"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_transactions_time_msec"),
|
||||
expr: "rate(synapse_storage_transaction_time:total[2m]) / 1000",
|
||||
name: "[[desc]]",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Database scheduling latency</h3>
|
||||
<div id="synapse_storage_schedule_time"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_storage_schedule_time"),
|
||||
expr: "rate(synapse_storage_schedule_time:total[2m]) / 1000",
|
||||
name: "Total latency",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache hit ratio</h3>
|
||||
<div id="synapse_cache_ratio"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_ratio"),
|
||||
expr: "rate(synapse_util_caches_cache:total[2m]) * 100",
|
||||
name: "[[name]]",
|
||||
min: 0,
|
||||
max: 100,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: "Percentage"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Cache size</h3>
|
||||
<div id="synapse_cache_size"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_cache_size"),
|
||||
expr: "synapse_util_caches_cache:size",
|
||||
name: "[[name]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Items"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Requests</h1>
|
||||
|
||||
<h3>Requests by Servlet</h3>
|
||||
<div id="synapse_http_server_request_count_servlet"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Average response times</h3>
|
||||
<div id="synapse_http_server_response_time_avg"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
yTitle: "Response time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>All responses by code</h3>
|
||||
<div id="synapse_http_server_responses"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_responses"),
|
||||
expr: "rate(synapse_http_server_responses[2m])",
|
||||
name: "[[method]] / [[code]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Error responses by code</h3>
|
||||
<div id="synapse_http_server_responses_err"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_responses_err"),
|
||||
expr: "rate(synapse_http_server_responses{code=~\"[45]..\"}[2m])",
|
||||
name: "[[method]] / [[code]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="synapse_http_server_response_ru_utime"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "CPU Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>DB Usage</h3>
|
||||
<div id="synapse_http_server_response_db_txn_duration"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/s",
|
||||
yTitle: "DB Usage"
|
||||
})
|
||||
</script>
|
||||
|
||||
|
||||
<h3>Average event send times</h3>
|
||||
<div id="synapse_http_server_send_time_avg"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "s/req",
|
||||
yTitle: "Response time"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Federation</h1>
|
||||
|
||||
<h3>Sent Messages</h3>
|
||||
<div id="synapse_federation_client_sent"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_client_sent"),
|
||||
expr: "rate(synapse_federation_client_sent[2m])",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Received Messages</h3>
|
||||
<div id="synapse_federation_server_received"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_server_received"),
|
||||
expr: "rate(synapse_federation_server_received[2m])",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "req/s",
|
||||
yTitle: "Requests"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Pending</h3>
|
||||
<div id="synapse_federation_transaction_queue_pending"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_federation_transaction_queue_pending"),
|
||||
expr: "synapse_federation_transaction_queue_pending",
|
||||
name: "[[type]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Units"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h1>Clients</h1>
|
||||
|
||||
<h3>Notifiers</h3>
|
||||
<div id="synapse_notifier_listeners"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_listeners"),
|
||||
expr: "synapse_notifier_listeners",
|
||||
name: "listeners",
|
||||
min: 0,
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "",
|
||||
yTitle: "Listeners"
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Notified Events</h3>
|
||||
<div id="synapse_notifier_notified_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_notifier_notified_events"),
|
||||
expr: "rate(synapse_notifier_notified_events[2m])",
|
||||
name: "events",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yUnits: "events/s",
|
||||
yTitle: "Event rate"
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
||||
@@ -1,21 +0,0 @@
|
||||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
||||
synapse_federation_client_sent{type="EDU"} = synapse_federation_client_sent_edus + 0
|
||||
synapse_federation_client_sent{type="PDU"} = synapse_federation_client_sent_pdu_destinations:count + 0
|
||||
synapse_federation_client_sent{type="Query"} = sum(synapse_federation_client_sent_queries) by (job)
|
||||
|
||||
synapse_federation_server_received{type="EDU"} = synapse_federation_server_received_edus + 0
|
||||
synapse_federation_server_received{type="PDU"} = synapse_federation_server_received_pdus + 0
|
||||
synapse_federation_server_received{type="Query"} = sum(synapse_federation_server_received_queries) by (job)
|
||||
|
||||
synapse_federation_transaction_queue_pending{type="EDU"} = synapse_federation_transaction_queue_pending_edus + 0
|
||||
synapse_federation_transaction_queue_pending{type="PDU"} = synapse_federation_transaction_queue_pending_pdus + 0
|
||||
@@ -1,60 +0,0 @@
|
||||
groups:
|
||||
- name: synapse
|
||||
rules:
|
||||
- record: "synapse_federation_transaction_queue_pendingEdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
- record: 'synapse_cache:hit_ratio_30s'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])'
|
||||
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_client_sent_edus + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_client_sent_pdu_destinations:count + 0'
|
||||
- record: 'synapse_federation_client_sent'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
|
||||
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_server_received_edus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_server_received_pdus + 0'
|
||||
- record: 'synapse_federation_server_received'
|
||||
labels:
|
||||
type: "Query"
|
||||
expr: 'sum(synapse_federation_server_received_queries) by (job)'
|
||||
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "EDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_edus + 0'
|
||||
- record: 'synapse_federation_transaction_queue_pending'
|
||||
labels:
|
||||
type: "PDU"
|
||||
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
|
||||
@@ -1,16 +0,0 @@
|
||||
Purge history API examples
|
||||
==========================
|
||||
|
||||
# `purge_history.sh`
|
||||
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
|
||||
purge all messages in a list of rooms up to a certain event. You can select a
|
||||
timeframe or a number of messages that you want to keep in the room.
|
||||
|
||||
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
|
||||
the script.
|
||||
|
||||
# `purge_remote_media.sh`
|
||||
|
||||
A bash file, that uses the [purge history API](/docs/admin_api/README.rst) to
|
||||
purge all old cached remote media.
|
||||
@@ -1,141 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# this script will use the api:
|
||||
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
|
||||
#
|
||||
# It will purge all messages in a list of rooms up to a cetrain event
|
||||
|
||||
###################################################################################################
|
||||
# define your domain and admin user
|
||||
###################################################################################################
|
||||
# add this user as admin in your home server:
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
###################################################################################################
|
||||
#choose the rooms to prune old messages from (add a free comment at the end)
|
||||
###################################################################################################
|
||||
# the room_id's you can get e.g. from your Riot clients "View Source" button on each message
|
||||
ROOMS_ARRAY=(
|
||||
'!DgvjtOljKujDBrxyHk:matrix.org#riot:matrix.org'
|
||||
'!QtykxKocfZaZOUrTwp:matrix.org#Matrix HQ'
|
||||
)
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# you can select all the rooms that are not encrypted and loop over the result:
|
||||
# SELECT room_id FROM rooms WHERE room_id NOT IN (SELECT DISTINCT room_id FROM events WHERE type ='m.room.encrypted')
|
||||
# or
|
||||
# select all rooms with at least 100 members:
|
||||
# SELECT q.room_id FROM (select count(*) as numberofusers, room_id FROM current_state_events WHERE type ='m.room.member'
|
||||
# GROUP BY room_id) AS q LEFT JOIN room_aliases a ON q.room_id=a.room_id WHERE q.numberofusers > 100 ORDER BY numberofusers desc
|
||||
|
||||
###################################################################################################
|
||||
# evaluate the EVENT_ID before which should be pruned
|
||||
###################################################################################################
|
||||
# choose a time before which the messages should be pruned:
|
||||
TIME='12 months ago'
|
||||
# ALTERNATIVELY:
|
||||
# a certain time:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# prune all messages that are older than 1000 messages ago:
|
||||
# LAST_MESSAGES=1000
|
||||
# SQL_GET_EVENT="SELECT event_id from events WHERE type='m.room.message' AND room_id ='$ROOM' ORDER BY received_ts DESC LIMIT 1 offset $(($LAST_MESSAGES - 1))"
|
||||
|
||||
# ALTERNATIVELY:
|
||||
# select the EVENT_ID manually:
|
||||
#EVENT_ID='$1471814088343495zpPNI:matrix.org' # an example event from 21st of Aug 2016 by Matthew
|
||||
|
||||
###################################################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###################################################################################################
|
||||
# psql -A -t --dbname=synapse -c "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###################################################################################################
|
||||
# get an access token
|
||||
###################################################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
AUTH="Authorization: Bearer $TOKEN"
|
||||
|
||||
###################################################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###################################################################################################
|
||||
# $ curl --header "$AUTH" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###################################################################################################
|
||||
# finally start pruning the room:
|
||||
###################################################################################################
|
||||
POSTDATA='{"delete_local_events":"true"}' # this will really delete local events, so the messages in the room really disappear unless they are restored by remote federation
|
||||
|
||||
for ROOM in "${ROOMS_ARRAY[@]}"; do
|
||||
echo "########################################### $(date) ################# "
|
||||
echo "pruning room: $ROOM ..."
|
||||
ROOM=${ROOM%#*}
|
||||
#set -x
|
||||
echo "check for alias in db..."
|
||||
# for postgres:
|
||||
sql "SELECT * FROM room_aliases WHERE room_id='$ROOM'"
|
||||
echo "get event..."
|
||||
# for postgres:
|
||||
EVENT_ID=$(sql "SELECT event_id FROM events WHERE type='m.room.message' AND received_ts<'$UNIX_TIMESTAMP' AND room_id='$ROOM' ORDER BY received_ts DESC LIMIT 1;")
|
||||
if [ "$EVENT_ID" == "" ]; then
|
||||
echo "no event $TIME"
|
||||
else
|
||||
echo "event: $EVENT_ID"
|
||||
SLEEP=2
|
||||
set -x
|
||||
# call purge
|
||||
OUT=$(curl --header "$AUTH" -s -d $POSTDATA POST "$API_URL/admin/purge_history/$ROOM/$EVENT_ID")
|
||||
PURGE_ID=$(echo "$OUT" |grep purge_id|cut -d'"' -f4 )
|
||||
if [ "$PURGE_ID" == "" ]; then
|
||||
# probably the history purge is already in progress for $ROOM
|
||||
: "continuing with next room"
|
||||
else
|
||||
while : ; do
|
||||
# get status of purge and sleep longer each time if still active
|
||||
sleep $SLEEP
|
||||
STATUS=$(curl --header "$AUTH" -s GET "$API_URL/admin/purge_history_status/$PURGE_ID" |grep status|cut -d'"' -f4)
|
||||
: "$ROOM --> Status: $STATUS"
|
||||
[[ "$STATUS" == "active" ]] || break
|
||||
SLEEP=$((SLEEP + 1))
|
||||
done
|
||||
fi
|
||||
set +x
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# additionally
|
||||
###################################################################################################
|
||||
# to benefit from pruning large amounts of data, you need to call VACUUM to free the unused space.
|
||||
# This can take a very long time (hours) and the client have to be stopped while you do so:
|
||||
# $ synctl stop
|
||||
# $ sqlite3 -line homeserver.db "vacuum;"
|
||||
# $ synctl start
|
||||
|
||||
# This could be set, so you don't need to prune every time after deleting some rows:
|
||||
# $ sqlite3 homeserver.db "PRAGMA auto_vacuum = FULL;"
|
||||
# be cautious, it could make the database somewhat slow if there are a lot of deletions
|
||||
|
||||
exit
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DOMAIN=yourserver.tld
|
||||
# add this user as admin in your home server:
|
||||
ADMIN="@you_admin_username:$DOMAIN"
|
||||
|
||||
API_URL="$DOMAIN:8008/_matrix/client/r0"
|
||||
|
||||
# choose a time before which the messages should be pruned:
|
||||
# TIME='2016-08-31 23:59:59'
|
||||
TIME='12 months ago'
|
||||
|
||||
# creates a timestamp from the given time string:
|
||||
UNIX_TIMESTAMP=$(date +%s%3N --date='TZ="UTC+2" '"$TIME")
|
||||
|
||||
|
||||
###################################################################################################
|
||||
# database function
|
||||
###################################################################################################
|
||||
sql (){
|
||||
# for sqlite3:
|
||||
#sqlite3 homeserver.db "pragma busy_timeout=20000;$1" | awk '{print $2}'
|
||||
# for postgres:
|
||||
psql -A -t --dbname=synapse -c "$1" | grep -v 'Pager'
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# make the admin user a server admin in the database with
|
||||
###############################################################################
|
||||
# sql "UPDATE users SET admin=1 WHERE name LIKE '$ADMIN'"
|
||||
|
||||
###############################################################################
|
||||
# get an access token
|
||||
###############################################################################
|
||||
# for example externally by watching Riot in your browser's network inspector
|
||||
# or internally on the server locally, use this:
|
||||
TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id DESC LIMIT 1")
|
||||
|
||||
###############################################################################
|
||||
# check, if your TOKEN works. For example this works:
|
||||
###############################################################################
|
||||
# curl --header "Authorization: Bearer $TOKEN" "$API_URL/rooms/$ROOM/state/m.room.power_levels"
|
||||
|
||||
###############################################################################
|
||||
# optional check size before
|
||||
###############################################################################
|
||||
# echo calculate used storage before ...
|
||||
# du -shc ../.synapse/media_store/*
|
||||
|
||||
###############################################################################
|
||||
# finally start pruning media:
|
||||
###############################################################################
|
||||
set -x # for debugging the generated string
|
||||
curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP"
|
||||
@@ -1,93 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
from argparse import ArgumentParser
|
||||
import json
|
||||
import requests
|
||||
import sys
|
||||
import urllib
|
||||
|
||||
def _mkurl(template, kws):
|
||||
for key in kws:
|
||||
template = template.replace(key, kws[key])
|
||||
return template
|
||||
|
||||
def main(hs, room_id, access_token, user_id_prefix, why):
|
||||
if not why:
|
||||
why = "Automated kick."
|
||||
print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
|
||||
room_state_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
|
||||
{
|
||||
"$HS": hs,
|
||||
"$ROOM": room_id,
|
||||
"$TOKEN": access_token
|
||||
}
|
||||
)
|
||||
print "Getting room state => %s" % room_state_url
|
||||
res = requests.get(room_state_url)
|
||||
print "HTTP %s" % res.status_code
|
||||
state_events = res.json()
|
||||
if "error" in state_events:
|
||||
print "FATAL"
|
||||
print state_events
|
||||
return
|
||||
|
||||
kick_list = []
|
||||
room_name = room_id
|
||||
for event in state_events:
|
||||
if not event["type"] == "m.room.member":
|
||||
if event["type"] == "m.room.name":
|
||||
room_name = event["content"].get("name")
|
||||
continue
|
||||
if not event["content"].get("membership") == "join":
|
||||
continue
|
||||
if event["state_key"].startswith(user_id_prefix):
|
||||
kick_list.append(event["state_key"])
|
||||
|
||||
if len(kick_list) == 0:
|
||||
print "No user IDs match the prefix '%s'" % user_id_prefix
|
||||
return
|
||||
|
||||
print "The following user IDs will be kicked from %s" % room_name
|
||||
for uid in kick_list:
|
||||
print uid
|
||||
doit = raw_input("Continue? [Y]es\n")
|
||||
if len(doit) > 0 and doit.lower() == 'y':
|
||||
print "Kicking members..."
|
||||
# encode them all
|
||||
kick_list = [urllib.quote(uid) for uid in kick_list]
|
||||
for uid in kick_list:
|
||||
kick_url = _mkurl(
|
||||
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
|
||||
{
|
||||
"$HS": hs,
|
||||
"$UID": uid,
|
||||
"$ROOM": room_id,
|
||||
"$TOKEN": access_token
|
||||
}
|
||||
)
|
||||
kick_body = {
|
||||
"membership": "leave",
|
||||
"reason": why
|
||||
}
|
||||
print "Kicking %s" % uid
|
||||
res = requests.put(kick_url, data=json.dumps(kick_body))
|
||||
if res.status_code != 200:
|
||||
print "ERROR: HTTP %s" % res.status_code
|
||||
if res.json().get("error"):
|
||||
print "ERROR: JSON %s" % res.json()
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
|
||||
parser.add_argument("-u","--user-id",help="The user ID prefix e.g. '@irc_'")
|
||||
parser.add_argument("-t","--token",help="Your access_token")
|
||||
parser.add_argument("-r","--room",help="The room ID to kick members in")
|
||||
parser.add_argument("-s","--homeserver",help="The base HS url e.g. http://matrix.org")
|
||||
parser.add_argument("-w","--why",help="Reason for the kick. Optional.")
|
||||
args = parser.parse_args()
|
||||
if not args.room or not args.token or not args.user_id or not args.homeserver:
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
main(args.homeserver, args.room, args.token, args.user_id, args.why)
|
||||
@@ -1,25 +0,0 @@
|
||||
version: 1
|
||||
|
||||
# In systemd's journal, loglevel is implicitly stored, so let's omit it
|
||||
# from the message text.
|
||||
formatters:
|
||||
journal_fmt:
|
||||
format: '%(name)s: [%(request)s] %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
journal:
|
||||
class: systemd.journal.JournalHandler
|
||||
formatter: journal_fmt
|
||||
filters: [context]
|
||||
SYSLOG_IDENTIFIER: synapse
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [journal]
|
||||
|
||||
disable_existing_loggers: False
|
||||
@@ -1,31 +0,0 @@
|
||||
# Example systemd configuration file for synapse. Copy into
|
||||
# /etc/systemd/system/, update the paths if necessary, then:
|
||||
#
|
||||
# systemctl enable matrix-synapse
|
||||
# systemctl start matrix-synapse
|
||||
#
|
||||
# This assumes that Synapse has been installed in a virtualenv in
|
||||
# /opt/synapse/env.
|
||||
#
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=on-abort
|
||||
|
||||
User=synapse
|
||||
Group=nogroup
|
||||
|
||||
WorkingDirectory=/opt/synapse
|
||||
ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml
|
||||
|
||||
# adjust the cache factor if necessary
|
||||
# Environment=SYNAPSE_CACHE_FACTOR=2.0
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
2
contrib/vertobot/.gitignore
vendored
2
contrib/vertobot/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
vucbot.yaml
|
||||
vertobot.yaml
|
||||
@@ -1,309 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use 5.010; # //
|
||||
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
|
||||
use IO::Async::Loop;
|
||||
use Net::Async::WebSocket::Client;
|
||||
use Net::Async::Matrix 0.11_002;
|
||||
use JSON;
|
||||
use YAML;
|
||||
use Data::UUID;
|
||||
use Getopt::Long;
|
||||
use Data::Dumper;
|
||||
|
||||
binmode STDOUT, ":encoding(UTF-8)";
|
||||
binmode STDERR, ":encoding(UTF-8)";
|
||||
|
||||
my $loop = IO::Async::Loop->new;
|
||||
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
|
||||
# https://rt.cpan.org/Ticket/Display.html?id=93107
|
||||
ref $loop eq "IO::Async::Loop::Poll" and
|
||||
warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
|
||||
|
||||
GetOptions(
|
||||
'C|config=s' => \my $CONFIG,
|
||||
'eval-from=s' => \my $EVAL_FROM,
|
||||
) or exit 1;
|
||||
|
||||
if( defined $EVAL_FROM ) {
|
||||
# An emergency 'eval() this file' hack
|
||||
$SIG{HUP} = sub {
|
||||
my $code = do {
|
||||
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
|
||||
local $/; <$fh>
|
||||
};
|
||||
|
||||
eval $code or warn "Cannot eval() - $@";
|
||||
};
|
||||
}
|
||||
|
||||
defined $CONFIG or die "Must supply --config\n";
|
||||
|
||||
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
|
||||
|
||||
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
|
||||
# No harm in always applying this
|
||||
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
|
||||
|
||||
# Track every Room object, so we can ->leave them all on shutdown
|
||||
my %bot_matrix_rooms;
|
||||
|
||||
my $bridgestate = {};
|
||||
my $roomid_by_callid = {};
|
||||
|
||||
my $bot_verto = Net::Async::WebSocket::Client->new(
|
||||
on_frame => sub {
|
||||
my ( $self, $frame ) = @_;
|
||||
warn "[Verto] receiving $frame";
|
||||
on_verto_json($frame);
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_verto );
|
||||
|
||||
my $sessid = lc new Data::UUID->create_str();
|
||||
|
||||
my $bot_matrix = Net::Async::Matrix->new(
|
||||
%MATRIX_CONFIG,
|
||||
on_log => sub { warn "log: @_\n" },
|
||||
on_invite => sub {
|
||||
my ($matrix, $invite) = @_;
|
||||
warn "[Matrix] invited to: " . $invite->{room_id} . " by " . $invite->{inviter} . "\n";
|
||||
|
||||
$matrix->join_room( $invite->{room_id} )->get;
|
||||
},
|
||||
on_room_new => sub {
|
||||
my ($matrix, $room) = @_;
|
||||
|
||||
warn "[Matrix] have a room ID: " . $room->room_id . "\n";
|
||||
|
||||
$bot_matrix_rooms{$room->room_id} = $room;
|
||||
|
||||
# log in to verto on behalf of this room
|
||||
$bridgestate->{$room->room_id}->{sessid} = $sessid;
|
||||
|
||||
$room->configure(
|
||||
on_message => \&on_room_message,
|
||||
);
|
||||
|
||||
my $f = send_verto_json_request("login", {
|
||||
'login' => $CONFIG{'verto-dialog-params'}{'login'},
|
||||
'passwd' => $CONFIG{'verto-config'}{'passwd'},
|
||||
'sessid' => $sessid,
|
||||
});
|
||||
$matrix->adopt_future($f);
|
||||
|
||||
# we deliberately don't paginate the room, as we only care about
|
||||
# new calls
|
||||
},
|
||||
on_unknown_event => \&on_unknown_event,
|
||||
on_error => sub {
|
||||
print STDERR "Matrix failure: @_\n";
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_matrix );
|
||||
|
||||
sub on_unknown_event
|
||||
{
|
||||
my ($matrix, $event) = @_;
|
||||
print Dumper($event);
|
||||
|
||||
my $room_id = $event->{room_id};
|
||||
my %dp = %{$CONFIG{'verto-dialog-params'}};
|
||||
$dp{callID} = $bridgestate->{$room_id}->{callid};
|
||||
|
||||
if ($event->{type} eq 'm.call.invite') {
|
||||
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
|
||||
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
|
||||
$bridgestate->{$room_id}->{gathered_candidates} = 0;
|
||||
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
|
||||
# no trickle ICE in verto apparently
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.candidates') {
|
||||
# XXX: compare call IDs
|
||||
if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
$bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
my $offer = $bridgestate->{$room_id}->{offer};
|
||||
my $candidate_block = {
|
||||
audio => '',
|
||||
video => '',
|
||||
};
|
||||
foreach (@{$event->{content}->{candidates}}) {
|
||||
if ($_->{sdpMid}) {
|
||||
$candidate_block->{$_->{sdpMid}} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
else {
|
||||
$candidate_block->{audio} .= "a=" . $_->{candidate} . "\r\n";
|
||||
$candidate_block->{video} .= "a=" . $_->{candidate} . "\r\n";
|
||||
}
|
||||
}
|
||||
|
||||
# XXX: assumes audio comes first
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{audio}/;
|
||||
#$offer =~ s/(a=rtcp-mux[\r\n]+)/$1$candidate_block->{video}/;
|
||||
|
||||
$offer =~ s/(m=video)/$candidate_block->{audio}$1/;
|
||||
$offer =~ s/(.$)/$1\n$candidate_block->{video}$1/;
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$matrix->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
# ignore them, as no trickle ICE, although we might as well
|
||||
# batch them up
|
||||
# foreach (@{$event->{content}->{candidates}}) {
|
||||
# push @{$bridgestate->{$room_id}->{candidates}}, $_;
|
||||
# }
|
||||
}
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.hangup') {
|
||||
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
|
||||
my $f = send_verto_json_request("verto.bye", {
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$matrix->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn "Unhandled event: $event->{type}";
|
||||
}
|
||||
}
|
||||
|
||||
sub on_room_message
|
||||
{
|
||||
my ($room, $from, $content) = @_;
|
||||
my $room_id = $room->room_id;
|
||||
warn "[Matrix] in $room_id: $from: " . $content->{body} . "\n";
|
||||
}
|
||||
|
||||
Future->needs_all(
|
||||
$bot_matrix->login( %{ $CONFIG{"matrix-bot"} } )->then( sub {
|
||||
$bot_matrix->start;
|
||||
}),
|
||||
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
)->on_done( sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
}),
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
PIPE => sub { warn "pipe\n" }
|
||||
);
|
||||
$loop->attach_signal(
|
||||
INT => sub { $loop->stop },
|
||||
);
|
||||
$loop->attach_signal(
|
||||
TERM => sub { $loop->stop },
|
||||
);
|
||||
|
||||
eval {
|
||||
$loop->run;
|
||||
} or my $e = $@;
|
||||
|
||||
# When the bot gets shut down, have it leave the rooms so it's clear to observers
|
||||
# that it is no longer running.
|
||||
# if( $CONFIG{"leave-on-shutdown"} // 1 ) {
|
||||
# print STDERR "Removing bot from Matrix rooms...\n";
|
||||
# Future->wait_all( map { $_->leave->else_done() } values %bot_matrix_rooms )->get;
|
||||
# }
|
||||
# else {
|
||||
# print STDERR "Leaving bot users in Matrix rooms.\n";
|
||||
# }
|
||||
|
||||
die $e if $e;
|
||||
|
||||
exit 0;
|
||||
|
||||
{
|
||||
my $json_id;
|
||||
my $requests;
|
||||
|
||||
sub send_verto_json_request
|
||||
{
|
||||
$json_id ||= 1;
|
||||
|
||||
my ($method, $params) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
method => $method,
|
||||
params => $params,
|
||||
id => $json_id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
my $request = $loop->new_future;
|
||||
$requests->{$json_id} = $request;
|
||||
$json_id++;
|
||||
return $request;
|
||||
}
|
||||
|
||||
sub send_verto_json_response
|
||||
{
|
||||
my ($result, $id) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
result => $result,
|
||||
id => $id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
}
|
||||
|
||||
sub on_verto_json
|
||||
{
|
||||
my $json = JSON->new->decode( $_[0] );
|
||||
if ($json->{method}) {
|
||||
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
|
||||
$json->{method} eq 'verto.media') {
|
||||
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
my $room = $bot_matrix_rooms{$room_id};
|
||||
|
||||
if ($json->{params}->{sdp}) {
|
||||
# HACK HACK HACK HACK
|
||||
$room->_do_POST_json( "/send/m.call.answer", {
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "answer",
|
||||
},
|
||||
})->then( sub {
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn ("[Verto] unhandled method: " . $json->{method});
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
}
|
||||
}
|
||||
elsif ($json->{result}) {
|
||||
$requests->{$json->{id}}->done($json->{result});
|
||||
}
|
||||
elsif ($json->{error}) {
|
||||
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,493 +0,0 @@
|
||||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
use 5.010; # //
|
||||
use IO::Socket::SSL qw(SSL_VERIFY_NONE);
|
||||
use IO::Async::Loop;
|
||||
use Net::Async::WebSocket::Client;
|
||||
use Net::Async::HTTP;
|
||||
use Net::Async::HTTP::Server;
|
||||
use JSON;
|
||||
use YAML;
|
||||
use Data::UUID;
|
||||
use Getopt::Long;
|
||||
use Data::Dumper;
|
||||
use URI::Encode qw(uri_encode uri_decode);
|
||||
|
||||
binmode STDOUT, ":encoding(UTF-8)";
|
||||
binmode STDERR, ":encoding(UTF-8)";
|
||||
|
||||
my $msisdn_to_matrix = {
|
||||
'447417892400' => '@matthew:matrix.org',
|
||||
};
|
||||
|
||||
my $matrix_to_msisdn = {};
|
||||
foreach (keys %$msisdn_to_matrix) {
|
||||
$matrix_to_msisdn->{$msisdn_to_matrix->{$_}} = $_;
|
||||
}
|
||||
|
||||
|
||||
my $loop = IO::Async::Loop->new;
|
||||
# Net::Async::HTTP + SSL + IO::Poll doesn't play well. See
|
||||
# https://rt.cpan.org/Ticket/Display.html?id=93107
|
||||
# ref $loop eq "IO::Async::Loop::Poll" and
|
||||
# warn "Using SSL with IO::Poll causes known memory-leaks!!\n";
|
||||
|
||||
GetOptions(
|
||||
'C|config=s' => \my $CONFIG,
|
||||
'eval-from=s' => \my $EVAL_FROM,
|
||||
) or exit 1;
|
||||
|
||||
if( defined $EVAL_FROM ) {
|
||||
# An emergency 'eval() this file' hack
|
||||
$SIG{HUP} = sub {
|
||||
my $code = do {
|
||||
open my $fh, "<", $EVAL_FROM or warn( "Cannot read - $!" ), return;
|
||||
local $/; <$fh>
|
||||
};
|
||||
|
||||
eval $code or warn "Cannot eval() - $@";
|
||||
};
|
||||
}
|
||||
|
||||
defined $CONFIG or die "Must supply --config\n";
|
||||
|
||||
my %CONFIG = %{ YAML::LoadFile( $CONFIG ) };
|
||||
|
||||
my %MATRIX_CONFIG = %{ $CONFIG{matrix} };
|
||||
# No harm in always applying this
|
||||
$MATRIX_CONFIG{SSL_verify_mode} = SSL_VERIFY_NONE;
|
||||
|
||||
my $bridgestate = {};
|
||||
my $roomid_by_callid = {};
|
||||
|
||||
my $sessid = lc new Data::UUID->create_str();
|
||||
my $as_token = $CONFIG{"matrix-bot"}->{as_token};
|
||||
my $hs_domain = $CONFIG{"matrix-bot"}->{domain};
|
||||
|
||||
my $http = Net::Async::HTTP->new();
|
||||
$loop->add( $http );
|
||||
|
||||
sub create_virtual_user
|
||||
{
|
||||
my ($localpart) = @_;
|
||||
my ( $response ) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/register?".
|
||||
"access_token=$as_token&user_id=$localpart"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"type": "m.login.application_service",
|
||||
"user": "$localpart"
|
||||
}
|
||||
EOT
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
|
||||
my $http_server = Net::Async::HTTP::Server->new(
|
||||
on_request => sub {
|
||||
my $self = shift;
|
||||
my ( $req ) = @_;
|
||||
|
||||
my $response;
|
||||
my $path = uri_decode($req->path);
|
||||
warn("request: $path");
|
||||
if ($path =~ m#/users/\@(\+.*)#) {
|
||||
# when queried about virtual users, auto-create them in the HS
|
||||
my $localpart = $1;
|
||||
create_virtual_user($localpart);
|
||||
$response = HTTP::Response->new( 200 );
|
||||
$response->add_content('{}');
|
||||
$response->content_type( "application/json" );
|
||||
}
|
||||
elsif ($path =~ m#/transactions/(.*)#) {
|
||||
my $event = JSON->new->decode($req->body);
|
||||
print Dumper($event);
|
||||
|
||||
my $room_id = $event->{room_id};
|
||||
my %dp = %{$CONFIG{'verto-dialog-params'}};
|
||||
$dp{callID} = $bridgestate->{$room_id}->{callid};
|
||||
|
||||
if ($event->{type} eq 'm.room.membership') {
|
||||
my $membership = $event->{content}->{membership};
|
||||
my $state_key = $event->{state_key};
|
||||
my $room_id = $event->{state_id};
|
||||
|
||||
if ($membership eq 'invite') {
|
||||
# autojoin invites
|
||||
my ( $response ) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/rooms/$room_id/join?".
|
||||
"access_token=$as_token&user_id=$state_key"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => "{}",
|
||||
)->get;
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
}
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.invite') {
|
||||
my $room_id = $event->{room_id};
|
||||
$bridgestate->{$room_id}->{matrix_callid} = $event->{content}->{call_id};
|
||||
$bridgestate->{$room_id}->{callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||
# $bridgestate->{$room_id}->{offer} = $event->{content}->{offer}->{sdp};
|
||||
my $offer = $event->{content}->{offer}->{sdp};
|
||||
# $bridgestate->{$room_id}->{gathered_candidates} = 0;
|
||||
$roomid_by_callid->{ $bridgestate->{$room_id}->{callid} } = $room_id;
|
||||
# no trickle ICE in verto apparently
|
||||
|
||||
my $f = send_verto_json_request("verto.invite", {
|
||||
"sdp" => $offer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
# elsif ($event->{type} eq 'm.call.candidates') {
|
||||
# # XXX: this could fire for both matrix->verto and verto->matrix calls
|
||||
# # and races as it collects candidates. much better to just turn off
|
||||
# # candidate gathering in the webclient entirely for now
|
||||
#
|
||||
# my $room_id = $event->{room_id};
|
||||
# # XXX: compare call IDs
|
||||
# if (!$bridgestate->{$room_id}->{gathered_candidates}) {
|
||||
# $bridgestate->{$room_id}->{gathered_candidates} = 1;
|
||||
# my $offer = $bridgestate->{$room_id}->{offer};
|
||||
# my $candidate_block = "";
|
||||
# foreach (@{$event->{content}->{candidates}}) {
|
||||
# $candidate_block .= "a=" . $_->{candidate} . "\r\n";
|
||||
# }
|
||||
# # XXX: collate using the right m= line - for now assume audio call
|
||||
# $offer =~ s/(a=rtcp.*[\r\n]+)/$1$candidate_block/;
|
||||
#
|
||||
# my $f = send_verto_json_request("verto.invite", {
|
||||
# "sdp" => $offer,
|
||||
# "dialogParams" => \%dp,
|
||||
# "sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
# });
|
||||
# $self->adopt_future($f);
|
||||
# }
|
||||
# else {
|
||||
# # ignore them, as no trickle ICE, although we might as well
|
||||
# # batch them up
|
||||
# # foreach (@{$event->{content}->{candidates}}) {
|
||||
# # push @{$bridgestate->{$room_id}->{candidates}}, $_;
|
||||
# # }
|
||||
# }
|
||||
# }
|
||||
elsif ($event->{type} eq 'm.call.answer') {
|
||||
# grab the answer and relay it to verto as a verto.answer
|
||||
my $room_id = $event->{room_id};
|
||||
|
||||
my $answer = $event->{content}->{answer}->{sdp};
|
||||
my $f = send_verto_json_request("verto.answer", {
|
||||
"sdp" => $answer,
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
elsif ($event->{type} eq 'm.call.hangup') {
|
||||
my $room_id = $event->{room_id};
|
||||
if ($bridgestate->{$room_id}->{matrix_callid} eq $event->{content}->{call_id}) {
|
||||
my $f = send_verto_json_request("verto.bye", {
|
||||
"dialogParams" => \%dp,
|
||||
"sessid" => $bridgestate->{$room_id}->{sessid},
|
||||
});
|
||||
$self->adopt_future($f);
|
||||
}
|
||||
else {
|
||||
warn "Ignoring unrecognised callid: ".$event->{content}->{call_id};
|
||||
}
|
||||
}
|
||||
else {
|
||||
warn "Unhandled event: $event->{type}";
|
||||
}
|
||||
|
||||
$response = HTTP::Response->new( 200 );
|
||||
$response->add_content('{}');
|
||||
$response->content_type( "application/json" );
|
||||
}
|
||||
else {
|
||||
warn "Unhandled path: $path";
|
||||
$response = HTTP::Response->new( 404 );
|
||||
}
|
||||
|
||||
$req->respond( $response );
|
||||
},
|
||||
);
|
||||
$loop->add( $http_server );
|
||||
|
||||
$http_server->listen(
|
||||
addr => { family => "inet", socktype => "stream", port => 8009 },
|
||||
on_listen_error => sub { die "Cannot listen - $_[-1]\n" },
|
||||
);
|
||||
|
||||
my $bot_verto = Net::Async::WebSocket::Client->new(
|
||||
on_frame => sub {
|
||||
my ( $self, $frame ) = @_;
|
||||
warn "[Verto] receiving $frame";
|
||||
on_verto_json($frame);
|
||||
},
|
||||
);
|
||||
$loop->add( $bot_verto );
|
||||
|
||||
my $verto_connecting = $loop->new_future;
|
||||
$bot_verto->connect(
|
||||
%{ $CONFIG{"verto-bot"} },
|
||||
on_connected => sub {
|
||||
warn("[Verto] connected to websocket");
|
||||
if (not $verto_connecting->is_done) {
|
||||
$verto_connecting->done($bot_verto);
|
||||
|
||||
send_verto_json_request("login", {
|
||||
'login' => $CONFIG{'verto-dialog-params'}{'login'},
|
||||
'passwd' => $CONFIG{'verto-config'}{'passwd'},
|
||||
'sessid' => $sessid,
|
||||
});
|
||||
}
|
||||
},
|
||||
on_connect_error => sub { die "Cannot connect to verto - $_[-1]" },
|
||||
on_resolve_error => sub { die "Cannot resolve to verto - $_[-1]" },
|
||||
);
|
||||
|
||||
# die Dumper($verto_connecting);
|
||||
|
||||
my $as_url = $CONFIG{"matrix-bot"}->{as_url};
|
||||
|
||||
Future->needs_all(
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new( $CONFIG{"matrix"}->{server}."/_matrix/appservice/v1/register" ),
|
||||
content_type => "application/json",
|
||||
content => <<EOT
|
||||
{
|
||||
"as_token": "$as_token",
|
||||
"url": "$as_url",
|
||||
"namespaces": { "users": [ { "regex": "\@\\\\+.*", "exclusive": false } ] }
|
||||
}
|
||||
EOT
|
||||
)->then( sub{
|
||||
my ($response) = (@_);
|
||||
warn $response->as_string if ($response->code != 200);
|
||||
return Future->done;
|
||||
}),
|
||||
$verto_connecting,
|
||||
)->get;
|
||||
|
||||
$loop->attach_signal(
|
||||
PIPE => sub { warn "pipe\n" }
|
||||
);
|
||||
$loop->attach_signal(
|
||||
INT => sub { $loop->stop },
|
||||
);
|
||||
$loop->attach_signal(
|
||||
TERM => sub { $loop->stop },
|
||||
);
|
||||
|
||||
eval {
|
||||
$loop->run;
|
||||
} or my $e = $@;
|
||||
|
||||
die $e if $e;
|
||||
|
||||
exit 0;
|
||||
|
||||
{
|
||||
my $json_id;
|
||||
my $requests;
|
||||
|
||||
sub send_verto_json_request
|
||||
{
|
||||
$json_id ||= 1;
|
||||
|
||||
my ($method, $params) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
method => $method,
|
||||
params => $params,
|
||||
id => $json_id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
my $request = $loop->new_future;
|
||||
$requests->{$json_id} = $request;
|
||||
$json_id++;
|
||||
return $request;
|
||||
}
|
||||
|
||||
sub send_verto_json_response
|
||||
{
|
||||
my ($result, $id) = @_;
|
||||
my $json = {
|
||||
jsonrpc => "2.0",
|
||||
result => $result,
|
||||
id => $id,
|
||||
};
|
||||
my $text = JSON->new->encode( $json );
|
||||
warn "[Verto] sending $text";
|
||||
$bot_verto->send_frame ( $text );
|
||||
}
|
||||
|
||||
sub on_verto_json
|
||||
{
|
||||
my $json = JSON->new->decode( $_[0] );
|
||||
if ($json->{method}) {
|
||||
if (($json->{method} eq 'verto.answer' && $json->{params}->{sdp}) ||
|
||||
$json->{method} eq 'verto.media') {
|
||||
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
|
||||
if ($json->{params}->{sdp}) {
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.answer?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "answer",
|
||||
},
|
||||
}),
|
||||
)->then( sub {
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
}
|
||||
elsif ($json->{method} eq 'verto.invite') {
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
|
||||
my $alias = ($caller lt $callee) ? ($caller.'-'.$callee) : ($callee.'-'.$caller);
|
||||
my $room_id;
|
||||
|
||||
# create a virtual user for the caller if needed.
|
||||
create_virtual_user($caller);
|
||||
|
||||
# create a room of form #peer-peer and invite the callee
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/createRoom?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
room_alias_name => $alias,
|
||||
invite => [ $callee_user ],
|
||||
}),
|
||||
)->then( sub {
|
||||
my ( $response ) = @_;
|
||||
my $resp = JSON->new->decode($response->content);
|
||||
$room_id = $resp->{room_id};
|
||||
$roomid_by_callid->{$json->{params}->{callID}} = $room_id;
|
||||
})->get;
|
||||
|
||||
# join it
|
||||
my ($response) = $http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/join/$room_id?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => '{}',
|
||||
)->get;
|
||||
|
||||
$bridgestate->{$room_id}->{matrix_callid} = lc new Data::UUID->create_str();
|
||||
$bridgestate->{$room_id}->{callid} = $json->{dialogParams}->{callID};
|
||||
$bridgestate->{$room_id}->{sessid} = $sessid;
|
||||
|
||||
# put the m.call.invite in there
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.invite?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
answer => {
|
||||
sdp => $json->{params}->{sdp},
|
||||
type => "offer",
|
||||
},
|
||||
}),
|
||||
)->then( sub {
|
||||
# acknowledge the verto
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
elsif ($json->{method} eq 'verto.bye') {
|
||||
my $caller = $json->{dialogParams}->{caller_id_number};
|
||||
my $callee = $json->{dialogParams}->{destination_number};
|
||||
my $caller_user = '@+' . $caller . ':' . $hs_domain;
|
||||
my $callee_user = $msisdn_to_matrix->{$callee} || warn "unrecogised callee: $callee";
|
||||
my $room_id = $roomid_by_callid->{$json->{params}->{callID}};
|
||||
|
||||
# put the m.call.hangup into the room
|
||||
$http->do_request(
|
||||
method => "POST",
|
||||
uri => URI->new(
|
||||
$CONFIG{"matrix"}->{server}.
|
||||
"/_matrix/client/api/v1/send/m.call.hangup?".
|
||||
"access_token=$as_token&user_id=$caller_user"
|
||||
),
|
||||
content_type => "application/json",
|
||||
content => JSON->new->encode({
|
||||
call_id => $bridgestate->{$room_id}->{matrix_callid},
|
||||
version => 0,
|
||||
}),
|
||||
)->then( sub {
|
||||
# acknowledge the verto
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
})->get;
|
||||
}
|
||||
else {
|
||||
warn ("[Verto] unhandled method: " . $json->{method});
|
||||
send_verto_json_response( {
|
||||
method => $json->{method},
|
||||
}, $json->{id});
|
||||
}
|
||||
}
|
||||
elsif ($json->{result}) {
|
||||
$requests->{$json->{id}}->done($json->{result});
|
||||
}
|
||||
elsif ($json->{error}) {
|
||||
$requests->{$json->{id}}->fail($json->{error}->{message}, $json->{error});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
# Generic Matrix connection params
|
||||
matrix:
|
||||
server: 'matrix.org'
|
||||
SSL: 1
|
||||
|
||||
# Bot-user connection details
|
||||
matrix-bot:
|
||||
user_id: '@vertobot:matrix.org'
|
||||
password: ''
|
||||
domain: 'matrix.org"
|
||||
as_url: 'http://localhost:8009'
|
||||
as_token: 'vertobot123'
|
||||
|
||||
verto-bot:
|
||||
host: webrtc.freeswitch.org
|
||||
service: 8081
|
||||
url: "ws://webrtc.freeswitch.org:8081/"
|
||||
|
||||
verto-config:
|
||||
passwd: 1234
|
||||
|
||||
verto-dialog-params:
|
||||
useVideo: false
|
||||
useStereo: false
|
||||
tag: "webcam"
|
||||
login: "1008@webrtc.freeswitch.org"
|
||||
destination_number: "9664"
|
||||
caller_id_name: "FreeSWITCH User"
|
||||
caller_id_number: "1008"
|
||||
callID: ""
|
||||
remote_caller_id_name: "Outbound Call"
|
||||
remote_caller_id_number: "9664"
|
||||
@@ -1,14 +0,0 @@
|
||||
requires 'parent', 0;
|
||||
requires 'Future', '>= 0.29';
|
||||
requires 'Net::Async::Matrix', '>= 0.11_002';
|
||||
requires 'Net::Async::Matrix::Utils';
|
||||
requires 'Net::Async::WebSocket::Protocol', 0;
|
||||
requires 'Data::UUID', 0;
|
||||
requires 'IO::Async', '>= 0.63';
|
||||
requires 'IO::Async::SSL', 0;
|
||||
requires 'IO::Socket::SSL', 0;
|
||||
requires 'YAML', 0;
|
||||
requires 'JSON', 0;
|
||||
requires 'Getopt::Long', 0;
|
||||
|
||||
|
||||
@@ -1,207 +0,0 @@
|
||||
# JSON is shown in *reverse* chronological order.
|
||||
# Send v. Receive is implicit.
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 7,
|
||||
"result": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"message": "CALL ENDED",
|
||||
"causeCode": 16,
|
||||
"cause": "NORMAL_CLEARING",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.bye",
|
||||
"params": {
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 7
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 6,
|
||||
"result": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"action": "toggleHold",
|
||||
"holdState": "active",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.modify",
|
||||
"params": {
|
||||
"action": "toggleHold",
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 6
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 5,
|
||||
"result": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"action": "toggleHold",
|
||||
"holdState": "held",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.modify",
|
||||
"params": {
|
||||
"action": "toggleHold",
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 5
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 349819,
|
||||
"result": {
|
||||
"method": "verto.answer"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 349819,
|
||||
"method": "verto.answer",
|
||||
"params": {
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"sdp": "v=0\no=FreeSWITCH 1417101432 1417101433 IN IP4 209.105.235.10\ns=FreeSWITCH\nc=IN IP4 209.105.235.10\nt=0 0\na=msid-semantic: WMS jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\nm=audio 30134 RTP/SAVPF 111 126\na=rtpmap:111 opus/48000/2\na=fmtp:111 minptime=10; stereo=1\na=rtpmap:126 telephone-event/8000\na=silenceSupp:off - - - -\na=ptime:20\na=sendrecv\na=fingerprint:sha-256 F8:72:18:E9:72:89:99:22:5B:F8:B6:C6:C6:0D:C5:9B:B2:FB:BC:CA:8D:AB:13:8A:66:E1:37:38:A0:16:AA:41\na=rtcp-mux\na=rtcp:30134 IN IP4 209.105.235.10\na=ssrc:210967934 cname:rOIEajpw4FocakWY\na=ssrc:210967934 msid:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq a0\na=ssrc:210967934 mslabel:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vq\na=ssrc:210967934 label:jA3rmwLVwUq1iE6TYEYHeLk2YTUlh1Vqa0\na=ice-ufrag:OKwTmGLapwmxn7OF\na=ice-pwd:MmaMwq8rVmtWxfLbQ7U2Ew3T\na=candidate:2372654928 1 udp 659136 209.105.235.10 30134 typ host generation 0\n"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 4,
|
||||
"result": {
|
||||
"message": "CALL CREATED",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "verto.invite",
|
||||
"params": {
|
||||
"sdp": "v=0\r\no=- 1381685806032722557 2 IN IP4 127.0.0.1\r\ns=-\r\nt=0 0\r\na=group:BUNDLE audio\r\na=msid-semantic: WMS 6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\nm=audio 63088 RTP/SAVPF 111 103 104 0 8 106 105 13 126\r\nc=IN IP4 81.138.8.249\r\na=rtcp:63088 IN IP4 81.138.8.249\r\na=candidate:460398169 1 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:460398169 2 udp 2122260223 10.10.79.10 49945 typ host generation 0\r\na=candidate:3460887983 1 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:3460887983 2 udp 2122194687 192.168.1.64 63088 typ host generation 0\r\na=candidate:945327227 1 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:945327227 2 udp 1685987071 81.138.8.249 63088 typ srflx raddr 192.168.1.64 rport 63088 generation 0\r\na=candidate:1441981097 1 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:1441981097 2 tcp 1518280447 10.10.79.10 0 typ host tcptype active generation 0\r\na=candidate:2160789855 1 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=candidate:2160789855 2 tcp 1518214911 192.168.1.64 0 typ host tcptype active generation 0\r\na=ice-ufrag:cP4qeRhn0LpcpA88\r\na=ice-pwd:fREmgSkXsDLGUUH1bwfrBQhW\r\na=ice-options:google-ice\r\na=fingerprint:sha-256 AF:35:64:1B:62:8A:EF:27:AE:2B:88:2E:FE:78:29:0B:08:DA:64:6C:DE:02:57:E3:EE:B1:D7:86:B8:36:8F:B0\r\na=setup:actpass\r\na=mid:audio\r\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\r\na=extmap:3 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\r\na=sendrecv\r\na=rtcp-mux\r\na=rtpmap:111 opus/48000/2\r\na=fmtp:111 minptime=10; stereo=1\r\na=rtpmap:103 ISAC/16000\r\na=rtpmap:104 ISAC/32000\r\na=rtpmap:0 PCMU/8000\r\na=rtpmap:8 PCMA/8000\r\na=rtpmap:106 CN/32000\r\na=rtpmap:105 CN/16000\r\na=rtpmap:13 CN/8000\r\na=rtpmap:126 telephone-event/8000\r\na=maxptime:60\r\na=ssrc:558827154 cname:vdKHBNqa17t2gmE3\r\na=ssrc:558827154 msid:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\na=ssrc:558827154 mslabel:6OOMyGAyJakjwaOOBtV7WcBCCuIW6PpuXsNg\r\na=ssrc:558827154 label:bf1303fb-9833-4d7d-b9e4-b32cfe04acc3\r\n",
|
||||
"dialogParams": {
|
||||
"useVideo": false,
|
||||
"useStereo": true,
|
||||
"tag": "webcam",
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"destination_number": "9664",
|
||||
"caller_id_name": "FreeSWITCH User",
|
||||
"caller_id_number": "1008",
|
||||
"callID": "12795aa6-2a8d-84ee-ce63-2e82ffe825ef",
|
||||
"remote_caller_id_name": "Outbound Call",
|
||||
"remote_caller_id_number": "9664"
|
||||
},
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 4
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 3,
|
||||
"result": {
|
||||
"message": "logged in",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"message": "Authentication Required"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "login",
|
||||
"params": {
|
||||
"login": "1008@webrtc.freeswitch.org",
|
||||
"passwd": "1234",
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 3
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"message": "Authentication Required"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "login",
|
||||
"params": {
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "login",
|
||||
"params": {
|
||||
"sessid": "03a11060-3e14-23b6-c620-51b892c52983"
|
||||
},
|
||||
"id": 2
|
||||
}
|
||||
21
database-prepare-for-0.0.1.sh
Executable file
21
database-prepare-for-0.0.1.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This is will prepare a synapse database for running with v0.0.1 of synapse.
|
||||
# It will store all the user information, but will *delete* all messages and
|
||||
# room data.
|
||||
|
||||
set -e
|
||||
|
||||
cp "$1" "$1.bak"
|
||||
|
||||
DUMP=$(sqlite3 "$1" << 'EOF'
|
||||
.dump users
|
||||
.dump access_tokens
|
||||
.dump presence
|
||||
.dump profiles
|
||||
EOF
|
||||
)
|
||||
|
||||
rm "$1"
|
||||
|
||||
sqlite3 "$1" <<< "$DUMP"
|
||||
7
debian/.gitignore
vendored
7
debian/.gitignore
vendored
@@ -1,7 +0,0 @@
|
||||
/matrix-synapse-py3.*.debhelper
|
||||
/matrix-synapse-py3.debhelper.log
|
||||
/matrix-synapse-py3.substvars
|
||||
/matrix-synapse-*/
|
||||
/files
|
||||
/debhelper-build-stamp
|
||||
/.debhelper
|
||||
32
debian/NEWS
vendored
32
debian/NEWS
vendored
@@ -1,32 +0,0 @@
|
||||
matrix-synapse-py3 (0.34.0) stable; urgency=medium
|
||||
|
||||
matrix-synapse-py3 is intended as a drop-in replacement for the existing
|
||||
matrix-synapse package. When the package is installed, matrix-synapse will be
|
||||
automatically uninstalled. The replacement should be relatively seamless,
|
||||
however, please note the following important differences to matrix-synapse:
|
||||
|
||||
* Most importantly, the matrix-synapse service now runs under Python 3 rather
|
||||
than Python 2.7.
|
||||
|
||||
* Synapse is installed into its own virtualenv (in /opt/venvs/matrix-synapse)
|
||||
instead of using the system python libraries. (This may mean that you can
|
||||
remove a number of old dependencies with `apt autoremove`).
|
||||
|
||||
* If you have previously manually installed any custom python extensions
|
||||
(such as matrix-synapse-rest-auth) into the system python directories, you
|
||||
will need to reinstall them in the new virtualenv. Please consult the
|
||||
documentation of the relevant extensions for further details.
|
||||
|
||||
matrix-synapse-py3 will take over responsibility for the existing
|
||||
configuration files, including the matrix-synapse systemd service.
|
||||
|
||||
Beware, however, that `apt purge matrix-synapse` will *disable* the
|
||||
matrix-synapse service (so that it will not be started on reboot), even
|
||||
though that service is no longer being provided by the matrix-synapse
|
||||
package. It can be re-enabled with `systemctl enable matrix-synapse`.
|
||||
|
||||
The matrix.org team will continue to provide Python 2 `matrix-synapse`
|
||||
packages for the next couple of releases, to allow time for system
|
||||
administrators to test the new packages.
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Wed, 19 Dec 2018 14:00:00 +0000
|
||||
91
debian/build_virtualenv
vendored
91
debian/build_virtualenv
vendored
@@ -1,91 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# runs dh_virtualenv to build the virtualenv in the build directory,
|
||||
# and then runs the trial tests against the installed synapse.
|
||||
|
||||
set -e
|
||||
|
||||
export DH_VIRTUALENV_INSTALL_ROOT=/opt/venvs
|
||||
|
||||
# make sure that the virtualenv links to the specific version of python, by
|
||||
# dereferencing the python3 symlink.
|
||||
#
|
||||
# Otherwise, if somebody tries to install (say) the stretch package on buster,
|
||||
# they will get a confusing error about "No module named 'synapse'", because
|
||||
# python won't look in the right directory. At least this way, the error will
|
||||
# be a *bit* more obvious.
|
||||
#
|
||||
SNAKE=`readlink -e /usr/bin/python3`
|
||||
|
||||
# try to set the CFLAGS so any compiled C extensions are compiled with the most
|
||||
# generic as possible x64 instructions, so that compiling it on a new Intel chip
|
||||
# doesn't enable features not available on older ones or AMD.
|
||||
#
|
||||
# TODO: add similar things for non-amd64, or figure out a more generic way to
|
||||
# do this.
|
||||
|
||||
case `dpkg-architecture -q DEB_HOST_ARCH` in
|
||||
amd64)
|
||||
export CFLAGS=-march=x86-64
|
||||
;;
|
||||
esac
|
||||
|
||||
# Use --builtin-venv to use the better `venv` module from CPython 3.4+ rather
|
||||
# than the 2/3 compatible `virtualenv`.
|
||||
|
||||
dh_virtualenv \
|
||||
--install-suffix "matrix-synapse" \
|
||||
--builtin-venv \
|
||||
--setuptools \
|
||||
--python "$SNAKE" \
|
||||
--upgrade-pip \
|
||||
--preinstall="lxml" \
|
||||
--preinstall="mock" \
|
||||
--extra-pip-arg="--no-cache-dir" \
|
||||
--extra-pip-arg="--compile" \
|
||||
--extras="all"
|
||||
|
||||
PACKAGE_BUILD_DIR="debian/matrix-synapse-py3"
|
||||
VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse"
|
||||
TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python"
|
||||
|
||||
# we copy the tests to a temporary directory so that we can put them on the
|
||||
# PYTHONPATH without putting the uninstalled synapse on the pythonpath.
|
||||
tmpdir=`mktemp -d`
|
||||
trap "rm -r $tmpdir" EXIT
|
||||
|
||||
cp -r tests "$tmpdir"
|
||||
|
||||
PYTHONPATH="$tmpdir" \
|
||||
"${TARGET_PYTHON}" -B -m twisted.trial --reporter=text -j2 tests
|
||||
|
||||
# build the config file
|
||||
"${TARGET_PYTHON}" -B "${VIRTUALENV_DIR}/bin/generate_config" \
|
||||
--config-dir="/etc/matrix-synapse" \
|
||||
--data-dir="/var/lib/matrix-synapse" |
|
||||
perl -pe '
|
||||
# tweak the paths to the tls certs and signing keys
|
||||
/^tls_.*_path:/ and s/SERVERNAME/homeserver/;
|
||||
/^signing_key_path:/ and s/SERVERNAME/homeserver/;
|
||||
|
||||
# tweak the pid file location
|
||||
/^pid_file:/ and s#:.*#: "/var/run/matrix-synapse.pid"#;
|
||||
|
||||
# tweak the path to the log config
|
||||
/^log_config:/ and s/SERVERNAME\.log\.config/log.yaml/;
|
||||
|
||||
# tweak the path to the media store
|
||||
/^media_store_path:/ and s#/media_store#/media#;
|
||||
|
||||
# remove the server_name setting, which is set in a separate file
|
||||
/^server_name:/ and $_ = "#\n# This is set in /etc/matrix-synapse/conf.d/server_name.yaml for Debian installations.\n# $_";
|
||||
|
||||
# remove the report_stats setting, which is set in a separate file
|
||||
/^# report_stats:/ and $_ = "";
|
||||
|
||||
' > "${PACKAGE_BUILD_DIR}/etc/matrix-synapse/homeserver.yaml"
|
||||
|
||||
|
||||
# add a dependency on the right version of python to substvars.
|
||||
PYPKG=`basename $SNAKE`
|
||||
echo "synapse:pydepends=$PYPKG" >> debian/matrix-synapse-py3.substvars
|
||||
698
debian/changelog
vendored
698
debian/changelog
vendored
@@ -1,698 +0,0 @@
|
||||
matrix-synapse-py3 (0.99.2) stable; urgency=medium
|
||||
|
||||
* Fix overwriting of config settings on upgrade.
|
||||
* New synapse release 0.99.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 01 Mar 2019 10:55:08 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.1.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 17:19:44 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.1) stable; urgency=medium
|
||||
|
||||
[ Damjan Georgievski ]
|
||||
* Added ExecReload= in service unit file to send a HUP signal
|
||||
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 0.99.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 14 Feb 2019 14:12:26 +0000
|
||||
|
||||
matrix-synapse-py3 (0.99.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.99.0
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 5 Feb 2019 18:25:00 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1.1++1) stable; urgency=medium
|
||||
|
||||
* Update conflicts specifications to allow smoother transition from matrix-synapse.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Sat, 12 Jan 2019 12:58:35 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1.1) stable; urgency=high
|
||||
|
||||
* New synapse release 0.34.1.1
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 10 Jan 2019 15:04:52 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1+1) stable; urgency=medium
|
||||
|
||||
* Remove 'Breaks: matrix-synapse-ldap3'. (matrix-synapse-py3 includes
|
||||
the matrix-synapse-ldap3 python files, which makes the
|
||||
matrix-synapse-ldap3 debian package redundant but not broken.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Jan 2019 15:30:00 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.1) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.34.1.
|
||||
* Update Conflicts specifications to allow installation alongside our
|
||||
matrix-synapse transitional package.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 09 Jan 2019 14:52:24 +0000
|
||||
|
||||
matrix-synapse-py3 (0.34.0) stable; urgency=medium
|
||||
|
||||
* New synapse release 0.34.0.
|
||||
* Synapse is now installed into a Python 3 virtual environment with
|
||||
up-to-date dependencies.
|
||||
* The matrix-synapse service will now be restarted when the package is
|
||||
upgraded.
|
||||
(Fixes https://github.com/matrix-org/package-synapse-debian/issues/18)
|
||||
|
||||
-- Synapse packaging team <packages@matrix.org> Wed, 19 Dec 2018 14:00:00 +0000
|
||||
|
||||
matrix-synapse (0.33.9-1matrix1) stretch; urgency=medium
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Remove dependency on python-pydenticon
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* New upstream version 0.33.9
|
||||
* Refresh patches for 0.33.9
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Tue, 20 Nov 2018 10:26:05 +0000
|
||||
|
||||
matrix-synapse (0.33.8-1) stretch; urgency=medium
|
||||
|
||||
* New upstream version 0.33.8
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Thu, 01 Nov 2018 14:33:26 +0000
|
||||
|
||||
matrix-synapse (0.33.7-1matrix1) stretch; urgency=medium
|
||||
|
||||
* New upstream version 0.33.7
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 18 Oct 2018 16:18:26 +0100
|
||||
|
||||
matrix-synapse (0.33.6-1matrix1) stretch; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.33.6
|
||||
* Remove redundant explicit dep on python-bcrypt
|
||||
* Run the tests during build
|
||||
* Add dependency on python-attr 16.0
|
||||
* Refresh patches for 0.33.6
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 04 Oct 2018 14:40:29 +0100
|
||||
|
||||
matrix-synapse (0.33.5.1-1matrix1) stretch; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.33.5.1
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Mon, 24 Sep 2018 18:20:51 +0100
|
||||
|
||||
matrix-synapse (0.33.5-1matrix1) stretch; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.33.5
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Mon, 24 Sep 2018 16:06:23 +0100
|
||||
|
||||
matrix-synapse (0.33.4-1mx1) stretch; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.33.4
|
||||
* Avoid telling people to install packages with pip
|
||||
(fixes https://github.com/matrix-org/synapse/issues/3743)
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Fri, 07 Sep 2018 14:06:17 +0100
|
||||
|
||||
matrix-synapse (0.33.3.1-1mx1) stretch; urgency=critical
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Imported Upstream version 0.33.3.1
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 06 Sep 2018 11:20:37 +0100
|
||||
|
||||
matrix-synapse (0.33.3-2) stretch; urgency=medium
|
||||
|
||||
* We now require python-twisted 17.1.0 or later
|
||||
* Add recommendations for python-psycopg2 and python-lxml
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 23 Aug 2018 19:04:08 +0100
|
||||
|
||||
matrix-synapse (0.33.3-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.33.3
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Wed, 22 Aug 2018 14:50:30 +0100
|
||||
|
||||
matrix-synapse (0.33.2-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.33.2
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 09 Aug 2018 15:40:42 +0100
|
||||
|
||||
matrix-synapse (0.33.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.33.1
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Thu, 02 Aug 2018 15:52:19 +0100
|
||||
|
||||
matrix-synapse (0.33.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.33.0
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 19 Jul 2018 13:38:41 +0100
|
||||
|
||||
matrix-synapse (0.32.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.32.1
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Fri, 06 Jul 2018 17:16:29 +0100
|
||||
|
||||
matrix-synapse (0.32.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.32.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Fri, 06 Jul 2018 15:34:06 +0100
|
||||
|
||||
matrix-synapse (0.31.2-1) jessie; urgency=high
|
||||
|
||||
* New upstream version 0.31.2
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Thu, 14 Jun 2018 16:49:07 +0100
|
||||
|
||||
matrix-synapse (0.31.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.31.1
|
||||
* Require python-prometheus-client >= 0.0.14
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Fri, 08 Jun 2018 16:11:55 +0100
|
||||
|
||||
matrix-synapse (0.31.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.31.0
|
||||
|
||||
-- Richard van der Hoff <richard@matrix.org> Wed, 06 Jun 2018 17:23:10 +0100
|
||||
|
||||
matrix-synapse (0.30.0-1) jessie; urgency=medium
|
||||
|
||||
[ Michael Kaye ]
|
||||
* update homeserver.yaml to be somewhat more modern.
|
||||
|
||||
[ Erik Johnston ]
|
||||
* New upstream version 0.30.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Thu, 24 May 2018 16:43:16 +0100
|
||||
|
||||
matrix-synapse (0.29.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.29.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Wed, 16 May 2018 17:43:06 +0100
|
||||
|
||||
matrix-synapse (0.28.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.28.1
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Tue, 01 May 2018 19:21:39 +0100
|
||||
|
||||
matrix-synapse (0.28.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream 0.28.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Fri, 27 Apr 2018 13:15:49 +0100
|
||||
|
||||
matrix-synapse (0.27.4-1) jessie; urgency=medium
|
||||
|
||||
* Bump canonicaljson version
|
||||
* New upstream 0.27.4
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Fri, 13 Apr 2018 13:37:47 +0100
|
||||
|
||||
matrix-synapse (0.27.3-1) jessie; urgency=medium
|
||||
|
||||
* Report stats should default to off
|
||||
* Refresh patches
|
||||
* New upstream 0.27.3
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Wed, 11 Apr 2018 11:43:47 +0100
|
||||
|
||||
matrix-synapse (0.27.2-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.27.2
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Mon, 26 Mar 2018 16:41:57 +0100
|
||||
|
||||
matrix-synapse (0.27.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.27.1
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Mon, 26 Mar 2018 16:22:03 +0100
|
||||
|
||||
matrix-synapse (0.27.0-2) jessie; urgency=medium
|
||||
|
||||
* Fix bcrypt dependency
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Mon, 26 Mar 2018 16:00:26 +0100
|
||||
|
||||
matrix-synapse (0.27.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.27.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Mon, 26 Mar 2018 15:07:52 +0100
|
||||
|
||||
matrix-synapse (0.26.1-1) jessie; urgency=medium
|
||||
|
||||
* Ignore RC
|
||||
* New upstream version 0.26.1
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Fri, 16 Mar 2018 00:40:08 +0000
|
||||
|
||||
matrix-synapse (0.26.0-1) jessie; urgency=medium
|
||||
|
||||
[ Richard van der Hoff ]
|
||||
* Remove `level` for `file` log handler
|
||||
|
||||
[ Erik Johnston ]
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Fri, 05 Jan 2018 11:21:26 +0000
|
||||
|
||||
matrix-synapse (0.25.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.25.1
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Mon, 20 Nov 2017 10:05:37 +0000
|
||||
|
||||
matrix-synapse (0.25.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.25.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Wed, 15 Nov 2017 11:36:32 +0000
|
||||
|
||||
matrix-synapse (0.24.1-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.24.1
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Tue, 24 Oct 2017 15:05:03 +0100
|
||||
|
||||
matrix-synapse (0.24.0-1) jessie; urgency=medium
|
||||
|
||||
* New upstream version 0.24.0
|
||||
|
||||
-- Erik Johnston <erik@matrix.org> Mon, 23 Oct 2017 14:11:46 +0100
|
||||
|
||||
matrix-synapse (0.23.1-1) xenial; urgency=medium
|
||||
|
||||
* Imported upstream version 0.23.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 05 Oct 2017 15:28:25 +0100
|
||||
|
||||
matrix-synapse (0.23.0-1) jessie; urgency=medium
|
||||
|
||||
* Fix patch after refactor
|
||||
* Add patch to remove requirement on affinity package
|
||||
* refresh webclient patch
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Mon, 02 Oct 2017 15:34:57 +0100
|
||||
|
||||
matrix-synapse (0.22.1-1) jessie; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.22.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 06 Jul 2017 18:14:13 +0100
|
||||
|
||||
matrix-synapse (0.22.0-1) jessie; urgency=medium
|
||||
|
||||
* Imported upstream version 0.22.0
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 06 Jul 2017 10:47:45 +0100
|
||||
|
||||
matrix-synapse (0.21.1-1) jessie; urgency=medium
|
||||
|
||||
* Imported upstream version 0.21.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 15 Jun 2017 13:31:13 +0100
|
||||
|
||||
matrix-synapse (0.21.0-1) jessie; urgency=medium
|
||||
|
||||
* Imported upstream version 0.21.0
|
||||
* Update patches
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 18 May 2017 14:16:54 +0100
|
||||
|
||||
matrix-synapse (0.20.0-2) jessie; urgency=medium
|
||||
|
||||
* Depend on python-jsonschema
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 12 Apr 2017 10:41:46 +0100
|
||||
|
||||
matrix-synapse (0.20.0-1) jessie; urgency=medium
|
||||
|
||||
* Imported upstream version 0.20.0
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 11 Apr 2017 12:58:26 +0100
|
||||
|
||||
matrix-synapse (0.19.3-1) jessie; urgency=medium
|
||||
|
||||
* Imported upstream version 0.19.3
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 21 Mar 2017 13:45:41 +0000
|
||||
|
||||
matrix-synapse (0.19.2-1) jessie; urgency=medium
|
||||
|
||||
[ Sunil Mohan Adapa ]
|
||||
* Bump standards version to 3.9.8
|
||||
* Add debian/copyright file
|
||||
* Don't ignore errors in debian/config
|
||||
* Reformat depenedencies in debian/control
|
||||
* Internationalize strings in template file
|
||||
* Update package description
|
||||
* Add lsb-base as dependency
|
||||
* Update questions for debconf style
|
||||
* Add man pages for all binaries
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Imported upstream version 0.19.2
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 21 Feb 2017 13:55:00 +0000
|
||||
|
||||
matrix-synapse (0.19.1-1) jessie; urgency=medium
|
||||
|
||||
* Imported upstream version 0.19.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 09 Feb 2017 11:53:27 +0000
|
||||
|
||||
matrix-synapse (0.19.0-1) jessie; urgency=medium
|
||||
|
||||
This build requires python-twisted 0.19.0, which may need to be installed
|
||||
from backports.
|
||||
|
||||
[ Bryce Chidester ]
|
||||
* Add EnvironmentFile to the systemd service
|
||||
* Create matrix-synapse.default
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Imported upstream version 0.19.0
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Sat, 04 Feb 2017 09:58:29 +0000
|
||||
|
||||
matrix-synapse (0.18.7-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.4
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Mon, 09 Jan 2017 15:10:21 +0000
|
||||
|
||||
matrix-synapse (0.18.5-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.5
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 16 Dec 2016 10:51:59 +0000
|
||||
|
||||
matrix-synapse (0.18.4-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.4
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 22 Nov 2016 10:33:41 +0000
|
||||
|
||||
matrix-synapse (0.18.3-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.3
|
||||
* Remove upstreamed ldap3 patch
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 08 Nov 2016 15:01:49 +0000
|
||||
|
||||
matrix-synapse (0.18.2-2) trusty; urgency=high
|
||||
|
||||
* Patch ldap3 support to workaround differences in python-ldap3 0.9,
|
||||
bug allowed unauthorized logins if ldap3 0.9 was used.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 08 Nov 2016 13:48:09 +0000
|
||||
|
||||
matrix-synapse (0.18.2-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.2
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 01 Nov 2016 13:30:45 +0000
|
||||
|
||||
matrix-synapse (0.18.1-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 05 Oct 2016 14:52:53 +0100
|
||||
|
||||
matrix-synapse (0.18.0-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.18.0
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Mon, 19 Sep 2016 17:38:48 +0100
|
||||
|
||||
matrix-synapse (0.17.3-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.17.3
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 09 Sep 2016 11:18:18 +0100
|
||||
|
||||
matrix-synapse (0.17.2-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.17.2
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 08 Sep 2016 15:37:14 +0100
|
||||
|
||||
matrix-synapse (0.17.1-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.17.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 24 Aug 2016 15:11:29 +0100
|
||||
|
||||
matrix-synapse (0.17.0-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.17.0
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Mon, 08 Aug 2016 13:56:15 +0100
|
||||
|
||||
matrix-synapse (0.16.1-r1-1) trusty; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.16.1-r1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 08 Jul 2016 16:47:35 +0100
|
||||
|
||||
matrix-synapse (0.16.1-2) trusty; urgency=critical
|
||||
|
||||
* Apply security patch
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 08 Jul 2016 11:05:27 +0100
|
||||
|
||||
matrix-synapse (0.16.1-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 21 Jun 2016 14:56:48 +0100
|
||||
|
||||
matrix-synapse (0.16.0-3) trusty; urgency=medium
|
||||
|
||||
* Don't require strict nacl==0.3.0 requirement
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Mon, 20 Jun 2016 13:24:22 +0100
|
||||
|
||||
matrix-synapse (0.16.0-2) trusty; urgency=medium
|
||||
|
||||
* Also change the permissions of /etc/matrix-synapse
|
||||
* Add apt webclient instructions
|
||||
* Fix up patches
|
||||
* Update default homeserver.yaml
|
||||
* Add patch
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 10 Jun 2016 14:06:20 +0100
|
||||
|
||||
matrix-synapse (0.16.0-1) trusty; urgency=medium
|
||||
|
||||
[ David A Roberts ]
|
||||
* systemd
|
||||
|
||||
[ Erik Johnston ]
|
||||
* Fixup postinst and matrix-synapse.service
|
||||
* Handle email optional deps
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 09 Jun 2016 16:17:01 +0100
|
||||
|
||||
matrix-synapse (0.14.0-1) trusty; urgency=medium
|
||||
|
||||
* Remove saml2 module requirements
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 30 Mar 2016 14:31:17 +0100
|
||||
|
||||
matrix-synapse (0.13.3-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 11 Feb 2016 16:35:39 +0000
|
||||
|
||||
matrix-synapse (0.13.2-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 11 Feb 2016 11:01:16 +0000
|
||||
|
||||
matrix-synapse (0.13.0-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 10 Feb 2016 16:34:39 +0000
|
||||
|
||||
matrix-synapse (0.12.0-2) trusty; urgency=medium
|
||||
|
||||
* Don't default `registerion_shared_secret` config option
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 06 Jan 2016 16:34:02 +0000
|
||||
|
||||
matrix-synapse (0.12.0-1) stable; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.12.0
|
||||
|
||||
-- Mark Haines <mark@matrix.org> Mon, 04 Jan 2016 15:38:33 +0000
|
||||
|
||||
matrix-synapse (0.11.1-1) unstable; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.11.1
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 20 Nov 2015 17:56:52 +0000
|
||||
|
||||
matrix-synapse (0.11.0-r2-1) stable; urgency=medium
|
||||
|
||||
* Imported Upstream version 0.11.0-r2
|
||||
* Add gbp.conf
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 19 Nov 2015 13:52:36 +0000
|
||||
|
||||
matrix-synapse (0.11.0-1) wheezy; urgency=medium
|
||||
|
||||
* Fix dependencies.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 17 Nov 2015 16:28:06 +0000
|
||||
|
||||
matrix-synapse (0.11.0-0) wheezy; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 17 Nov 2015 16:03:01 +0000
|
||||
|
||||
matrix-synapse (0.10.0-2) wheezy; urgency=medium
|
||||
|
||||
* Rebuild for wheezy.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 04 Sep 2015 14:21:03 +0100
|
||||
|
||||
matrix-synapse (0.10.0-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 03 Sep 2015 10:08:34 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc6-3) trusty; urgency=medium
|
||||
|
||||
* Create log directory.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 02 Sep 2015 17:49:07 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc6-2) trusty; urgency=medium
|
||||
|
||||
* Add patch to work around upstream bug in config directory handling.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 02 Sep 2015 17:42:42 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc6-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 02 Sep 2015 17:21:21 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc5-3) trusty; urgency=medium
|
||||
|
||||
* Update init script to work.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Fri, 28 Aug 2015 10:51:56 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc5-2) trusty; urgency=medium
|
||||
|
||||
* Fix where python files are installed.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 27 Aug 2015 11:55:39 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc5-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 27 Aug 2015 11:26:54 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc4-1) trusty; urgency=medium
|
||||
|
||||
* New upstream version.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 27 Aug 2015 10:29:31 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-7) trusty; urgency=medium
|
||||
|
||||
* Add debian/watch
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 26 Aug 2015 17:57:08 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-6) trusty; urgency=medium
|
||||
|
||||
* Deps.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 26 Aug 2015 17:07:13 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-5) trusty; urgency=medium
|
||||
|
||||
* Deps.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 26 Aug 2015 16:18:02 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-4) trusty; urgency=medium
|
||||
|
||||
* More deps.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 26 Aug 2015 14:09:27 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-3) trusty; urgency=medium
|
||||
|
||||
* Update deps.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 26 Aug 2015 13:49:20 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-2) trusty; urgency=medium
|
||||
|
||||
* Add more deps.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Wed, 26 Aug 2015 13:25:45 +0100
|
||||
|
||||
matrix-synapse (0.10.0~rc3-1) trusty; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Tue, 25 Aug 2015 17:52:33 +0100
|
||||
|
||||
matrix-synapse (0.9.3-1~trusty1) trusty; urgency=medium
|
||||
|
||||
* Rebuild for trusty.
|
||||
|
||||
-- Erik Johnston <erikj@matrix.org> Thu, 20 Aug 2015 15:05:43 +0100
|
||||
|
||||
matrix-synapse (0.9.3-1) wheezy; urgency=medium
|
||||
|
||||
* New upstream release
|
||||
* Create a user, "matrix-synapse", to run as
|
||||
* Log to /var/log/matrix-synapse/ directory
|
||||
* Override the way synapse looks for the angular SDK (syweb) so it finds the
|
||||
packaged one
|
||||
|
||||
-- Paul "LeoNerd" Evans <paul@matrix.org> Fri, 07 Aug 2015 15:32:12 +0100
|
||||
|
||||
matrix-synapse (0.9.2-2) wheezy; urgency=medium
|
||||
|
||||
* Supply a default config file
|
||||
* Create directory in /var/lib
|
||||
* Use debconf to ask the user for the server name at installation time
|
||||
|
||||
-- Paul "LeoNerd" Evans <paul@matrix.org> Thu, 06 Aug 2015 15:28:00 +0100
|
||||
|
||||
matrix-synapse (0.9.2-1) wheezy; urgency=low
|
||||
|
||||
* source package automatically created by stdeb 0.8.2
|
||||
|
||||
-- Paul "LeoNerd" Evans <paul@matrix.org> Fri, 12 Jun 2015 14:32:03 +0100
|
||||
1
debian/compat
vendored
1
debian/compat
vendored
@@ -1 +0,0 @@
|
||||
9
|
||||
40
debian/control
vendored
40
debian/control
vendored
@@ -1,40 +0,0 @@
|
||||
Source: matrix-synapse-py3
|
||||
Section: contrib/python
|
||||
Priority: extra
|
||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||
Build-Depends:
|
||||
debhelper (>= 9),
|
||||
dh-systemd,
|
||||
dh-virtualenv (>= 1.1),
|
||||
lsb-release,
|
||||
python3-dev,
|
||||
python3,
|
||||
python3-setuptools,
|
||||
python3-pip,
|
||||
python3-venv,
|
||||
tar,
|
||||
Standards-Version: 3.9.8
|
||||
Homepage: https://github.com/matrix-org/synapse
|
||||
|
||||
Package: matrix-synapse-py3
|
||||
Architecture: amd64
|
||||
Provides: matrix-synapse
|
||||
Conflicts:
|
||||
matrix-synapse (<< 0.34.0.1-0matrix2),
|
||||
matrix-synapse (>= 0.34.0.1-1),
|
||||
Pre-Depends: dpkg (>= 1.16.1)
|
||||
Depends:
|
||||
adduser,
|
||||
debconf,
|
||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||
${misc:Depends},
|
||||
${synapse:pydepends},
|
||||
# some of our scripts use perl, but none of them are important,
|
||||
# so we put perl:Depends in Suggests rather than Depends.
|
||||
Suggests:
|
||||
sqlite3,
|
||||
${perl:Depends},
|
||||
Description: Open federated Instant Messaging and VoIP server
|
||||
Matrix is an ambitious new ecosystem for open federated Instant
|
||||
Messaging and VoIP. Synapse is a reference Matrix server
|
||||
implementation.
|
||||
118
debian/copyright
vendored
118
debian/copyright
vendored
@@ -1,118 +0,0 @@
|
||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
Upstream-Name: synapse
|
||||
Source: https://github.com/matrix-org/synapse
|
||||
|
||||
Files: *
|
||||
Copyright: 2014-2017, OpenMarket Ltd, 2017-2018 New Vector Ltd
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/saml2.py
|
||||
Copyright: 2015, Ericsson
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/jwt.py
|
||||
Copyright: 2015, Niklas Riekenbrauck
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/workers.py
|
||||
Copyright: 2016, matrix.org
|
||||
License: Apache-2.0
|
||||
|
||||
Files: synapse/config/repository.py
|
||||
Copyright: 2014-2015, matrix.org
|
||||
License: Apache-2.0
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js
|
||||
Copyright: Public Domain (Tyler Akins http://rumkin.com)
|
||||
License: public-domain
|
||||
This code was written by Tyler Akins and has been placed in the
|
||||
public domain. It would be nice if you left this header intact.
|
||||
Base64 code from Tyler Akins -- http://rumkin.com
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js
|
||||
Copyright: 1999-2002, Paul Johnston & Contributors
|
||||
License: BSD-3-clause
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js
|
||||
Copyright: 2006-2008, OGG, LLC
|
||||
License: Expat
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
|
||||
Copyright: 2010 passive.ly LLC
|
||||
License: Expat
|
||||
|
||||
Files: contrib/jitsimeetbridge/unjingle/*.js
|
||||
Copyright: 2014 Jitsi
|
||||
License: Apache-2.0
|
||||
|
||||
Files: debian/*
|
||||
Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
|
||||
2017, Rahul De <rahulde@swecha.net>
|
||||
2017, Sunil Mohan Adapa <sunil@medhas.org>
|
||||
License: Apache-2.0
|
||||
|
||||
License: Apache-2.0
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
.
|
||||
On Debian systems, the full text of the Apache License version
|
||||
2.0 can be found in the file
|
||||
`/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
License: BSD-3-clause
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
.
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following
|
||||
disclaimer. Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following
|
||||
disclaimer in the documentation and/or other materials provided with
|
||||
the distribution.
|
||||
.
|
||||
Neither the name of the author nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
.
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
License: Expat
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
.
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
3
debian/dirs
vendored
3
debian/dirs
vendored
@@ -1,3 +0,0 @@
|
||||
etc/matrix-synapse
|
||||
var/lib/matrix-synapse
|
||||
var/log/matrix-synapse
|
||||
90
debian/hash_password.1
vendored
90
debian/hash_password.1
vendored
@@ -1,90 +0,0 @@
|
||||
.\" generated with Ronn/v0.7.3
|
||||
.\" http://github.com/rtomayko/ronn/tree/0.7.3
|
||||
.
|
||||
.TH "HASH_PASSWORD" "1" "February 2017" "" ""
|
||||
.
|
||||
.SH "NAME"
|
||||
\fBhash_password\fR \- Calculate the hash of a new password, so that passwords can be reset
|
||||
.
|
||||
.SH "SYNOPSIS"
|
||||
\fBhash_password\fR [\fB\-p\fR|\fB\-\-password\fR [password]] [\fB\-c\fR|\fB\-\-config\fR \fIfile\fR]
|
||||
.
|
||||
.SH "DESCRIPTION"
|
||||
\fBhash_password\fR calculates the hash of a supplied password using bcrypt\.
|
||||
.
|
||||
.P
|
||||
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
|
||||
.
|
||||
.P
|
||||
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
|
||||
.
|
||||
.P
|
||||
The hashed password is written on the \fBSTDOUT\fR\.
|
||||
.
|
||||
.SH "FILES"
|
||||
A sample YAML file accepted by \fBhash_password\fR is described below:
|
||||
.
|
||||
.P
|
||||
bcrypt_rounds: 17 password_config: pepper: "random hashing pepper"
|
||||
.
|
||||
.SH "OPTIONS"
|
||||
.
|
||||
.TP
|
||||
\fB\-p\fR, \fB\-\-password\fR
|
||||
Read the password form the command line if [password] is supplied\. If not, prompt the user and read the password form the \fBSTDIN\fR\. It is not recommended to type the password on the command line directly\. Use the STDIN instead\.
|
||||
.
|
||||
.TP
|
||||
\fB\-c\fR, \fB\-\-config\fR
|
||||
Read the supplied YAML \fIfile\fR containing the options \fBbcrypt_rounds\fR and the \fBpassword_config\fR section containing the \fBpepper\fR value\.
|
||||
.
|
||||
.SH "EXAMPLES"
|
||||
Hash from the command line:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ hash_password \-p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8\.X8fWFpum7SxZ9MFe
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.P
|
||||
Hash from the STDIN:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX\.rcuAbM8ErLoUhybG
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.P
|
||||
Using a config file:
|
||||
.
|
||||
.IP "" 4
|
||||
.
|
||||
.nf
|
||||
|
||||
$ hash_password \-c config\.yml
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$CwI\.wBNr\.w3kmiUlV3T5s\.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
.
|
||||
.fi
|
||||
.
|
||||
.IP "" 0
|
||||
.
|
||||
.SH "COPYRIGHT"
|
||||
This man page was written by Rahul De <\fIrahulde@swecha\.net\fR> for Debian GNU/Linux distribution\.
|
||||
.
|
||||
.SH "SEE ALSO"
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1)
|
||||
69
debian/hash_password.ronn
vendored
69
debian/hash_password.ronn
vendored
@@ -1,69 +0,0 @@
|
||||
hash_password(1) -- Calculate the hash of a new password, so that passwords can be reset
|
||||
========================================================================================
|
||||
|
||||
## SYNOPSIS
|
||||
|
||||
`hash_password` [`-p`|`--password` [password]] [`-c`|`--config` <file>]
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
**hash_password** calculates the hash of a supplied password using bcrypt.
|
||||
|
||||
`hash_password` takes a password as an parameter either on the command line
|
||||
or the `STDIN` if not supplied.
|
||||
|
||||
It accepts an YAML file which can be used to specify parameters like the
|
||||
number of rounds for bcrypt and password_config section having the pepper
|
||||
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
|
||||
|
||||
The hashed password is written on the `STDOUT`.
|
||||
|
||||
## FILES
|
||||
|
||||
A sample YAML file accepted by `hash_password` is described below:
|
||||
|
||||
bcrypt_rounds: 17
|
||||
password_config:
|
||||
pepper: "random hashing pepper"
|
||||
|
||||
## OPTIONS
|
||||
|
||||
* `-p`, `--password`:
|
||||
Read the password form the command line if [password] is supplied.
|
||||
If not, prompt the user and read the password form the `STDIN`.
|
||||
It is not recommended to type the password on the command line
|
||||
directly. Use the STDIN instead.
|
||||
|
||||
* `-c`, `--config`:
|
||||
Read the supplied YAML <file> containing the options `bcrypt_rounds`
|
||||
and the `password_config` section containing the `pepper` value.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Hash from the command line:
|
||||
|
||||
$ hash_password -p "p@ssw0rd"
|
||||
$2b$12$VJNqWQYfsWTEwcELfoSi4Oa8eA17movHqqi8.X8fWFpum7SxZ9MFe
|
||||
|
||||
Hash from the STDIN:
|
||||
|
||||
$ hash_password
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$AszlvfmJl2esnyhmn8m/kuR2tdXgROWtWxnX.rcuAbM8ErLoUhybG
|
||||
|
||||
Using a config file:
|
||||
|
||||
$ hash_password -c config.yml
|
||||
Password:
|
||||
Confirm password:
|
||||
$2b$12$CwI.wBNr.w3kmiUlV3T5s.GT2wH7uebDCovDrCOh18dFedlANK99O
|
||||
|
||||
## COPYRIGHT
|
||||
|
||||
This man page was written by Rahul De <<rahulde@swecha.net>>
|
||||
for Debian GNU/Linux distribution.
|
||||
|
||||
## SEE ALSO
|
||||
|
||||
synctl(1), synapse_port_db(1), register_new_matrix_user(1)
|
||||
2
debian/install
vendored
2
debian/install
vendored
@@ -1,2 +0,0 @@
|
||||
debian/log.yaml etc/matrix-synapse
|
||||
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/
|
||||
36
debian/log.yaml
vendored
36
debian/log.yaml
vendored
@@ -1,36 +0,0 @@
|
||||
|
||||
version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
file:
|
||||
class: logging.handlers.RotatingFileHandler
|
||||
formatter: precise
|
||||
filename: /var/log/matrix-synapse/homeserver.log
|
||||
maxBytes: 104857600
|
||||
backupCount: 10
|
||||
filters: [context]
|
||||
encoding: utf8
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
level: WARN
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
||||
synapse.storage.SQL:
|
||||
level: INFO
|
||||
|
||||
root:
|
||||
level: INFO
|
||||
handlers: [file, console]
|
||||
130
debian/manage_debconf.pl
vendored
130
debian/manage_debconf.pl
vendored
@@ -1,130 +0,0 @@
|
||||
#!/usr/bin/perl
|
||||
#
|
||||
# Interface between our config files and the debconf database.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# manage_debconf.pl <action>
|
||||
#
|
||||
# where <action> can be:
|
||||
#
|
||||
# read: read the configuration from the yaml into debconf
|
||||
# update: update the yaml config according to the debconf database
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use Debconf::Client::ConfModule (qw/get set/);
|
||||
|
||||
# map from the name of a setting in our .yaml file to the relevant debconf
|
||||
# setting.
|
||||
my %MAPPINGS=(
|
||||
server_name => 'matrix-synapse/server-name',
|
||||
report_stats => 'matrix-synapse/report-stats',
|
||||
);
|
||||
|
||||
# enable debug if dpkg --debug
|
||||
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
|
||||
|
||||
sub read_config {
|
||||
my @files = @_;
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "reading $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
# rudimentary parsing which (a) avoids having to depend on a yaml library,
|
||||
# and (b) is tolerant of yaml errors
|
||||
while($_ = <$FH>) {
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
$setting = quotemeta $setting;
|
||||
if(/^${setting}\s*:(.*)$/) {
|
||||
my $val = $1;
|
||||
|
||||
# remove leading/trailing whitespace
|
||||
$val =~ s/^\s*//;
|
||||
$val =~ s/\s*$//;
|
||||
|
||||
# remove surrounding quotes
|
||||
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
|
||||
$val = $1;
|
||||
}
|
||||
|
||||
print STDERR ">> $debconf = $val\n" if $DEBUG;
|
||||
set($debconf, $val);
|
||||
}
|
||||
}
|
||||
}
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
sub update_config {
|
||||
my @files = @_;
|
||||
|
||||
my %substs = ();
|
||||
while (my ($setting, $debconf) = each %MAPPINGS) {
|
||||
my @res = get($debconf);
|
||||
$substs{$setting} = $res[1] if $res[0] == 0;
|
||||
}
|
||||
|
||||
foreach my $file (@files) {
|
||||
print STDERR "checking $file\n" if $DEBUG;
|
||||
|
||||
open my $FH, "<", $file or next;
|
||||
|
||||
my $updated = 0;
|
||||
|
||||
# read the whole file into memory
|
||||
my @lines = <$FH>;
|
||||
|
||||
while (my ($setting, $val) = each %substs) {
|
||||
$setting = quotemeta $setting;
|
||||
|
||||
map {
|
||||
if (/^${setting}\s*:\s*(.*)\s*$/) {
|
||||
my $current = $1;
|
||||
if ($val ne $current) {
|
||||
$_ = "${setting}: $val\n";
|
||||
$updated = 1;
|
||||
}
|
||||
}
|
||||
} @lines;
|
||||
}
|
||||
close $FH;
|
||||
|
||||
next unless $updated;
|
||||
|
||||
print STDERR "updating $file\n" if $DEBUG;
|
||||
open $FH, ">", $file or die "unable to update $file";
|
||||
print $FH @lines;
|
||||
close $FH;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
my $cmd = $ARGV[0];
|
||||
|
||||
my $read = 0;
|
||||
my $update = 0;
|
||||
|
||||
if (not $cmd) {
|
||||
die "must specify a command to perform\n";
|
||||
} elsif ($cmd eq 'read') {
|
||||
$read = 1;
|
||||
} elsif ($cmd eq 'update') {
|
||||
$update = 1;
|
||||
} else {
|
||||
die "unknown command '$cmd'\n";
|
||||
}
|
||||
|
||||
my @files = (
|
||||
"/etc/matrix-synapse/homeserver.yaml",
|
||||
glob("/etc/matrix-synapse/conf.d/*.yaml"),
|
||||
);
|
||||
|
||||
if ($read) {
|
||||
read_config(@files);
|
||||
} elsif ($update) {
|
||||
update_config(@files);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user