Merge branch 'develop' into madlittlemods/11850-migrate-to-opentelemetry

Conflicts:
	.github/workflows/tests.yml
	poetry.lock
	synapse/storage/schema/__init__.py
This commit is contained in:
Eric Eastwood 2022-09-20 18:00:28 -05:00
commit b86869feef
108 changed files with 2155 additions and 1053 deletions

View file

@ -1,31 +0,0 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import psycopg2
# a very simple replacment for `psql`, to make up for the lack of the postgres client
# libraries in the synapse docker image.
# We use "postgres" as a database because it's bound to exist and the "synapse" one
# doesn't exist yet.
db_conn = psycopg2.connect(
user="postgres", host="localhost", password="postgres", dbname="postgres"
)
db_conn.autocommit = True
cur = db_conn.cursor()
for c in sys.argv[1:]:
cur.execute(c)

View file

@ -32,7 +32,7 @@ else
fi
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
psql -c "CREATE DATABASE synapse"
# Port the SQLite databse to postgres so we can check command works against postgres
echo "+++ Port SQLite3 databse to postgres"

View file

@ -2,27 +2,27 @@
#
# Test script for 'synapse_port_db'.
# - configures synapse and a postgres server.
# - runs the port script on a prepopulated test sqlite db
# - also runs it against an new sqlite db
# - runs the port script on a prepopulated test sqlite db. Checks that the
# return code is zero.
# - reruns the port script on the same sqlite db, targetting the same postgres db.
# Checks that the return code is zero.
# - runs the port script against a new sqlite db. Checks the return code is zero.
#
# Expects Synapse to have been already installed with `poetry install --extras postgres`.
# Expects `poetry` to be available on the `PATH`.
set -xe
set -xe -o pipefail
cd "$(dirname "$0")/../.."
echo "--- Generate the signing key"
# Generate the server's signing key.
poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml
echo "--- Prepare test database"
# Make sure the SQLite3 database is using the latest schema and has no pending background update.
# Make sure the SQLite3 database is using the latest schema and has no pending background updates.
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# Create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse"
psql -c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against test database"
# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`,
@ -45,9 +45,23 @@ rm .ci/test_db.db
poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates
# re-create the PostgreSQL database.
poetry run .ci/scripts/postgres_exec.py \
"DROP DATABASE synapse" \
"CREATE DATABASE synapse"
psql \
-c "DROP DATABASE synapse" \
-c "CREATE DATABASE synapse"
echo "+++ Run synapse_port_db against empty database"
poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml
echo "--- Create a brand new postgres database from schema"
cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml
sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml
psql -c "CREATE DATABASE synapse_unported"
poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates
echo "+++ Comparing ported schema with unported schema"
# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?)
psql synapse -c "DROP TABLE port_from_sqlite3;"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql
# By default, `diff` returns zero if there are no changes and nonzero otherwise
diff -u unported.sql ported.sql | tee schema_diff

View file

@ -11,5 +11,6 @@
!build_rust.py
rust/target
synapse/*.so
**/__pycache__

View file

@ -32,9 +32,11 @@ jobs:
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install .
- run: scripts-dev/generate_sample_config.sh --check
- run: scripts-dev/config-lint.sh
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
check-schema-delta:
runs-on: ubuntu-latest
@ -44,92 +46,91 @@ jobs:
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
# lint:
# uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
# with:
# typechecking-extras: "all"
lint:
uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1"
with:
typechecking-extras: "all"
# lint-crlf:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - name: Check line endings
# run: scripts-dev/check_line_terminators.sh
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
# lint-newsfile:
# if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# with:
# ref: ${{ github.event.pull_request.head.sha }}
# fetch-depth: 0
# - uses: actions/setup-python@v2
# - run: "pip install 'towncrier>=18.6.0rc1'"
# - run: scripts-dev/check-newsfragment.sh
# env:
# PULL_REQUEST_NUMBER: ${{ github.event.number }}
lint-newsfile:
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
- uses: actions/setup-python@v2
- run: "pip install 'towncrier>=18.6.0rc1'"
- run: scripts-dev/check-newsfragment.sh
env:
PULL_REQUEST_NUMBER: ${{ github.event.number }}
# lint-pydantic:
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# with:
# ref: ${{ github.event.pull_request.head.sha }}
# fetch-depth: 0
# - uses: matrix-org/setup-python-poetry@v1
# with:
# extras: "all"
# - run: poetry run scripts-dev/check_pydantic_models.py
lint-pydantic:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "all"
- run: poetry run scripts-dev/check_pydantic_models.py
# lint-clippy:
# runs-on: ubuntu-latest
# needs: changes
# if: ${{ needs.changes.outputs.rust == 'true' }}
lint-clippy:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
# steps:
# - uses: actions/checkout@v2
steps:
- uses: actions/checkout@v2
# - name: Install Rust
# uses: actions-rs/toolchain@v1
# with:
# toolchain: 1.61.0
# override: true
# components: clippy
# - uses: Swatinem/rust-cache@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.61.0
override: true
components: clippy
- uses: Swatinem/rust-cache@v2
# - run: cargo clippy
- run: cargo clippy
# lint-rustfmt:
# runs-on: ubuntu-latest
# needs: changes
# if: ${{ needs.changes.outputs.rust == 'true' }}
lint-rustfmt:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.rust == 'true' }}
# steps:
# - uses: actions/checkout@v2
steps:
- uses: actions/checkout@v2
# - name: Install Rust
# uses: actions-rs/toolchain@v1
# with:
# toolchain: 1.61.0
# override: true
# components: rustfmt
# - uses: Swatinem/rust-cache@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.61.0
override: true
components: rustfmt
- uses: Swatinem/rust-cache@v2
# - run: cargo fmt --check
- run: cargo fmt --check
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
# needs:
# - lint
# - lint-crlf
# - lint-newsfile
# - lint-pydantic
# - check-sampleconfig
# - check-schema-delta
# - lint-clippy
# - lint-rustfmt
needs:
- lint
- lint-crlf
- lint-newsfile
- lint-pydantic
- check-sampleconfig
- check-schema-delta
- lint-clippy
- lint-rustfmt
runs-on: ubuntu-latest
steps:
- run: "true"
@ -361,18 +362,22 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
portdb:
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
env:
TOP: ${{ github.workspace }}
strategy:
matrix:
include:
@ -398,12 +403,27 @@ jobs:
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
env:
PGHOST: localhost
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- name: "Upload schema differences"
uses: actions/upload-artifact@v3
if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }}
with:
name: Schema dumps
path: |
unported.sql
ported.sql
schema_diff
complement:
if: "${{ !failure() && !cancelled() }}"

1
.rustfmt.toml Normal file
View file

@ -0,0 +1 @@
group_imports = "StdExternalCrate"

View file

@ -1,3 +1,92 @@
Synapse 1.68.0rc1 (2022-09-20)
==============================
Please note that Synapse will now refuse to start if configured to use a version of SQLite earlier than 3.27.
In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler.
Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected.
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1670).
Features
--------
- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814))
- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672))
- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810))
- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680))
- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736))
- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741))
- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801))
Bugfixes
--------
- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506))
- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723))
- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738))
- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746))
- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826))
- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766))
- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789))
- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825))
Improved Documentation
----------------------
- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480))
- Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse v1.22.0. ([\#13726](https://github.com/matrix-org/synapse/issues/13726))
- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727))
- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728))
- Add docs for common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785))
- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794))
Deprecations and Removals
-------------------------
- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760))
- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791))
Internal Changes
----------------
- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778))
- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162))
- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703))
- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706))
- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725))
- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718))
- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724))
- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729))
- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730))
- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745))
- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748))
- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750))
- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752))
- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753))
- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754))
- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756))
- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759))
- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761))
- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765))
- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770))
- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780))
- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784))
- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788))
- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795))
- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798))
- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802))
- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808))
- Fix Docker build when Rust .so has been build locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811))
- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819))
- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822))
- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827))
Synapse 1.67.0 (2022-09-13)
===========================

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Note that `libpq` is required on ARM-based Macs.

View file

@ -1 +0,0 @@
Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`).

View file

@ -1 +0,0 @@
Keep track when we attempt to backfill an event but fail so we can intelligently back-off in the future.

View file

@ -1 +0,0 @@
Add admin APIs to fetch messages within a particular window of time.

View file

@ -1 +0,0 @@
Cancel the processing of key query requests when they time out.

View file

@ -1 +0,0 @@
Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status).

View file

@ -1 +0,0 @@
Add & populate `event_stream_ordering` column on receipts table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar).

View file

@ -1 +0,0 @@
Rename the `EventFormatVersions` enum values so that they line up with room version numbers.

View file

@ -1 +0,0 @@
Update trial old deps CI to use poetry 1.2.0.

View file

@ -1 +0,0 @@
Add experimental configuration option to allow disabling legacy Prometheus metric names.

View file

@ -1 +0,0 @@
Add experimental configuration option to allow disabling legacy Prometheus metric names.

View file

@ -1 +0,0 @@
Add experimental configuration option to allow disabling legacy Prometheus metric names.

View file

@ -1 +0,0 @@
Fix typechecking with latest types-jsonschema.

View file

@ -1 +0,0 @@
Update trial old deps CI to use poetry 1.2.0.

View file

@ -1 +0,0 @@
Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22.

View file

@ -1 +0,0 @@
Fix a typo in the documentation for the login ratelimiting configuration.

View file

@ -1 +0,0 @@
Define Synapse's compatability policy for SQLite versions.

View file

@ -1 +0,0 @@
Strip number suffix from instance name to consolidate services that traces are spread over.

View file

@ -1 +0,0 @@
Instrument `get_metadata_for_events` for understandable traces in Jaeger.

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Fix a bug where Synapse fails to start if a signing key file contains an empty line.

View file

@ -1 +0,0 @@
Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used.

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar).

View file

@ -1 +0,0 @@
Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases.

View file

@ -1 +0,0 @@
Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit.

View file

@ -1 +0,0 @@
Fix a long standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver.

View file

@ -1 +0,0 @@
Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state.

View file

@ -1 +0,0 @@
User an additional database query when persisting receipts.

View file

@ -1 +0,0 @@
Prepatory work for storing thread IDs for notifications and receipts.

View file

@ -1 +0,0 @@
Re-type hint some collections as read-only.

View file

@ -1 +0,0 @@
Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used.

View file

@ -1 +0,0 @@
Add a check for editable installs if the Rust library needs rebuilding.

View file

@ -1 +0,0 @@
Synapse will now refuse to start if configured to use SQLite < 3.27.

View file

@ -1 +0,0 @@
Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance.

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar).

View file

@ -1 +0,0 @@
Fix a long-standing bug where the `cache_invalidation_stream_seq` sequence would begin at 1 instead of 2.

1
changelog.d/13768.misc Normal file
View file

@ -0,0 +1 @@
Port push rules to using Rust.

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Add a stub Rust crate.

View file

@ -1 +0,0 @@
Deduplicate `is_server_notices_room`.

View file

@ -1 +0,0 @@
Simplify the dependency DAG in the tests workflow.

View file

@ -1 +0,0 @@
Remove an old, incorrect migration file.

View file

@ -1 +0,0 @@
Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests.

View file

@ -1 +0,0 @@
Remove unused method in `synapse.api.auth.Auth`.

View file

@ -1 +0,0 @@
Fix a memory leak when running the unit tests.

View file

@ -1 +0,0 @@
Use partial indices on SQLite.

View file

@ -0,0 +1 @@
Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0.

6
debian/changelog vendored
View file

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium
* New Synapse release 1.68.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Sep 2022 11:18:20 +0100
matrix-synapse-py3 (1.67.0) stable; urgency=medium
* New Synapse release 1.67.0.

View file

@ -31,7 +31,9 @@ ARG PYTHON_VERSION=3.9
###
### Stage 0: generate requirements.txt
###
FROM docker.io/python:${PYTHON_VERSION}-slim as requirements
# We hardcode the use of Debian bullseye here because this could change upstream
# and other Dockerfiles used for testing are expecting bullseye.
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements
# RUN --mount is specific to buildkit and is documented at
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
@ -76,7 +78,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
###
### Stage 1: builder
###
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder
# install the OS build deps
RUN \
@ -137,7 +139,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
### Stage 2: runtime
###
FROM docker.io/python:${PYTHON_VERSION}-slim
FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'

View file

@ -17,7 +17,17 @@ ARG SYNAPSE_VERSION=latest
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
FROM postgres:13-bullseye AS postgres_base
# now build the final image, based on the Synapse image.
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# copy the postgres installation over from the image we built above
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# initialise the database cluster in /var/lib/postgresql
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
@ -25,18 +35,6 @@ FROM postgres:13-bullseye AS postgres_base
RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single
RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single
# now build the final image, based on the Synapse image.
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# copy the postgres installation over from the image we built above
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql
COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# Extend the shared homeserver config to disable rate-limiting,
# set Complement's static shared secret, enable registration, amongst other
# tweaks to get Synapse ready for testing.

View file

@ -1155,3 +1155,41 @@ GET /_synapse/admin/v1/username_available?username=$localpart
The request and response format is the same as the
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
### Find a user based on their ID in an auth provider
The API is:
```
GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
```
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
```json
{
"user_id": "@hello:example.org"
}
```
**Parameters**
The following parameters should be set in the URL:
- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method.
- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers.
The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters.
**Errors**
Returns a `404` HTTP status code if no user was found, with a response body like this:
```json
{
"errcode":"M_NOT_FOUND",
"error":"User not found"
}
```
_Added in Synapse 1.68.0._

View file

@ -126,6 +126,23 @@ context of poetry's venv, without having to run `poetry shell` beforehand.
poetry install --extras all --remove-untracked
```
## ...delete everything and start over from scratch?
```shell
# Stop the current virtualenv if active
$ deactivate
# Remove all of the files from the current environment.
# Don't worry, even though it says "all", this will only
# remove the Poetry virtualenvs for the current project.
$ poetry env remove --all
# Reactivate Poetry shell to create the virtualenv again
$ poetry shell
# Install everything again
$ poetry install --extras all
```
## ...run a command in the `poetry` virtualenv?
Use `poetry run cmd args` when you need the python virtualenv context.
@ -256,6 +273,16 @@ from PyPI. (This is what makes poetry seem slow when doing the first
`poetry install`.) Try `poetry cache list` and `poetry cache clear --all
<name of cache>` to see if that fixes things.
## Remove outdated egg-info
Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse
install.
This stores some cached information about dependencies and often conflicts with
letting Poetry do the right thing.
## Try `--verbose` or `--dry-run` arguments.
Sometimes useful to see what poetry's internal logic is.

View file

@ -45,6 +45,10 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1`
when using a containerized Synapse, as that will prevent it from responding
to proxied traffic.)
Optionally, you can also set
[`request_id_header`](../usage/configuration/config_documentation.md#listeners)
so that the server extracts and re-uses the same request ID format that the
reverse proxy is using.
## Reverse-proxy configuration examples

View file

@ -89,6 +89,13 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.68.0
As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite
version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse
now require a recent Rust compiler.
# Upgrading to v1.67.0
## Direct TCP replication is no longer supported: migrate to Redis
@ -125,7 +132,7 @@ From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
higher. Synapse v1.67.0 will be the last major release supporting SQLite
versions 3.22 to 3.26.
Those using docker images or Debian packages from Matrix.org will not be
Those using Docker images or Debian packages from Matrix.org will not be
affected. If you have installed from source, you should check the version of
SQLite used by Python with:
@ -135,6 +142,7 @@ python -c "import sqlite3; print(sqlite3.sqlite_version)"
If this is too old, refer to your distribution for advice on upgrading.
# Upgrading to v1.66.0
## Delegation of email validation no longer supported

View file

@ -12,14 +12,14 @@ See the following for how to decode the dense data available from the default lo
| Part | Explanation |
| ----- | ------------ |
| AAAA | Timestamp request was logged (not recieved) |
| AAAA | Timestamp request was logged (not received) |
| BBBB | Logger name (`synapse.access.(http\|https).<tag>`, where 'tag' is defined in the `listeners` config section, normally the port) |
| CCCC | Line number in code |
| DDDD | Log Level |
| EEEE | Request Identifier (This identifier is shared by related log lines)|
| FFFF | Source IP (Or X-Forwarded-For if enabled) |
| GGGG | Server Port |
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied) |
| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied).<br/>If this is of the form `@aaa:example.com|@bbb:example.com`, then that means that `@aaa:example.com` is authenticated but they are controlling `@bbb:example.com`, e.g. if `aaa` is controlling `bbb` [via the admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#login-as-a-user). |
| IIII | Total Time to process the request |
| JJJJ | Time to send response over network once generated (this may be negative if the socket is closed before the response is generated)|
| KKKK | Userland CPU time |

View file

@ -435,7 +435,16 @@ Sub-options for each listener include:
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
behind a reverse-proxy.
behind a [reverse-proxy](../../reverse_proxy.md).
* `request_id_header`: The header extracted from each incoming request that is
used as the basis for the request ID. The request ID is used in
[logs](../administration/request_log.md#request-log-format) and tracing to
correlate and match up requests. When unset, Synapse will automatically
generate sequential request IDs. This option is useful when Synapse is behind
a [reverse-proxy](../../reverse_proxy.md).
_Added in Synapse 1.68.0._
* `resources`: Only valid for an 'http' listener. A list of resources to host
on this port. Sub-options for each resource are:

10
poetry.lock generated
View file

@ -535,11 +535,11 @@ python-versions = ">=3.7"
[[package]]
name = "matrix-common"
version = "1.2.1"
version = "1.3.0"
description = "Common utilities for Synapse, Sydent and Sygnal"
category = "main"
optional = false
python-versions = ">=3.6"
python-versions = ">=3.7"
[package.dependencies]
attrs = "*"
@ -1680,7 +1680,7 @@ url_preview = ["lxml"]
[metadata]
lock-version = "1.1"
python-versions = "^3.7.1"
content-hash = "d821a8abd8f3109e323e8128cb3aac63753794bb4f07d8dc35aed299230d8cad"
content-hash = "8697b449d4c7eb1eee9e10d5ff030d2a973576c5a9c8ad64fed9337489d5b37a"
[metadata.files]
attrs = [
@ -2217,8 +2217,8 @@ markupsafe = [
{file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"},
]
matrix-common = [
{file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"},
{file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"},
{file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"},
{file = "matrix_common-1.3.0.tar.gz", hash = "sha256:62e121cccd9f243417b57ec37a76dc44aeb198a7a5c67afd6b8275992ff2abd1"},
]
matrix-synapse-ldap3 = [
{file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"},

View file

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.67.0"
version = "1.68.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@ -164,7 +164,7 @@ typing-extensions = ">=3.10.0.1"
cryptography = ">=3.4.7"
# ijson 3.1.4 fixes a bug with "." in property names
ijson = ">=3.1.4"
matrix-common = "^1.2.1"
matrix-common = "^1.3.0"
# We need packaging.requirements.Requirement, added in 16.1.
packaging = ">=16.1"
# At the time of writing, we only use functions from the version `importlib.metadata`

View file

@ -18,7 +18,15 @@ crate-type = ["cdylib"]
name = "synapse.synapse_rust"
[dependencies]
pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] }
anyhow = "1.0.63"
lazy_static = "1.4.0"
log = "0.4.17"
pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
pyo3-log = "0.7.0"
pythonize = "0.17.0"
regex = "1.6.0"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
[build-dependencies]
blake2 = "0.10.4"

View file

@ -1,5 +1,7 @@
use pyo3::prelude::*;
pub mod push;
/// Returns the hash of all the rust source files at the time it was compiled.
///
/// Used by python to detect if the rust library is outdated.
@ -17,8 +19,13 @@ fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
/// The entry point for defining the Python module.
#[pymodule]
fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
pyo3_log::init();
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
push::register_module(py, m)?;
Ok(())
}

335
rust/src/push/base_rules.rs Normal file
View file

@ -0,0 +1,335 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the definitions of the "base" push rules.
use std::borrow::Cow;
use std::collections::HashMap;
use lazy_static::lazy_static;
use serde_json::Value;
use super::KnownCondition;
use crate::push::Action;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
use crate::push::SetTweak;
use crate::push::TweakValue;
const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
value: None,
other_keys: Value::Null,
});
const HIGHLIGHT_FALSE_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
value: Some(TweakValue::Other(Value::Bool(false))),
other_keys: Value::Null,
});
const SOUND_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("sound"),
value: Some(TweakValue::String(Cow::Borrowed("default"))),
other_keys: Value::Null,
});
const RING_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("sound"),
value: Some(TweakValue::String(Cow::Borrowed("ring"))),
other_keys: Value::Null,
});
pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
priority_class: 5,
conditions: Cow::Borrowed(&[]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: false,
}];
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.msgtype"),
pattern: Some(Cow::Borrowed("m.notice")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.invite_for_me"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.member")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.membership"),
pattern: Some(Cow::Borrowed("invite")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_id")),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.member_event"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.member")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::ContainsDisplayName)]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.body"),
pattern: Some(Cow::Borrowed("@room")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.tombstone"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.tombstone")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.reaction"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.reaction")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3786.rule.room.server_acl"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.server_acl")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
];
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"),
priority_class: 4,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.body"),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_localpart")),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
}];
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.call.invite")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.message")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.encrypted")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3772.thread_reply"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelationMatch {
rel_type: Cow::Borrowed("m.thread"),
sender: None,
sender_type: Some(Cow::Borrowed("user_id")),
})]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.message")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.encrypted")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("im.vector.modular.widgets")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.type"),
pattern: Some(Cow::Borrowed("jitsi")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("*")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
];
lazy_static! {
pub static ref BASE_RULES_BY_ID: HashMap<&'static str, &'static PushRule> =
BASE_PREPEND_OVERRIDE_RULES
.iter()
.chain(BASE_APPEND_OVERRIDE_RULES.iter())
.chain(BASE_APPEND_CONTENT_RULES.iter())
.chain(BASE_APPEND_UNDERRIDE_RULES.iter())
.map(|rule| { (&*rule.rule_id, rule) })
.collect();
}

502
rust/src/push/mod.rs Normal file
View file

@ -0,0 +1,502 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An implementation of Matrix push rules.
//!
//! The `Cow<_>` type is used extensively within this module to allow creating
//! the base rules as constants (in Rust constants can't require explicit
//! allocation atm).
//!
//! ---
//!
//! Push rules is the system used to determine which events trigger a push (and a
//! bump in notification counts).
//!
//! This consists of a list of "push rules" for each user, where a push rule is a
//! pair of "conditions" and "actions". When a user receives an event Synapse
//! iterates over the list of push rules until it finds one where all the conditions
//! match the event, at which point "actions" describe the outcome (e.g. notify,
//! highlight, etc).
//!
//! Push rules are split up into 5 different "kinds" (aka "priority classes"), which
//! are run in order:
//! 1. Override — highest priority rules, e.g. always ignore notices
//! 2. Content — content specific rules, e.g. @ notifications
//! 3. Room — per room rules, e.g. enable/disable notifications for all messages
//! in a room
//! 4. Sender — per sender rules, e.g. never notify for messages from a given
//! user
//! 5. Underride — the lowest priority "default" rules, e.g. notify for every
//! message.
//!
//! The set of "base rules" are the list of rules that every user has by default. A
//! user can modify their copy of the push rules in one of three ways:
//!
//! 1. Adding a new push rule of a certain kind
//! 2. Changing the actions of a base rule
//! 3. Enabling/disabling a base rule.
//!
//! The base rules are split into whether they come before or after a particular
//! kind, so the order of push rule evaluation would be: base rules for before
//! "override" kind, user defined "override" rules, base rules after "override"
//! kind, etc, etc.
use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::{Context, Error};
use log::warn;
use pyo3::prelude::*;
use pythonize::pythonize;
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
use serde_json::Value;
mod base_rules;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
let child_module = PyModule::new(py, "push")?;
child_module.add_class::<PushRule>()?;
child_module.add_class::<PushRules>()?;
child_module.add_class::<FilteredPushRules>()?;
child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
m.add_submodule(child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import push` work.
py.import("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.push", child_module)?;
Ok(())
}
#[pyfunction]
fn get_base_rule_ids() -> HashSet<&'static str> {
base_rules::BASE_RULES_BY_ID.keys().copied().collect()
}
/// A single push rule for a user.
#[derive(Debug, Clone)]
#[pyclass(frozen)]
pub struct PushRule {
/// A unique ID for this rule
pub rule_id: Cow<'static, str>,
/// The "kind" of push rule this is (see `PRIORITY_CLASS_MAP` in Python)
#[pyo3(get)]
pub priority_class: i32,
/// The conditions that must all match for actions to be applied
pub conditions: Cow<'static, [Condition]>,
/// The actions to apply if all conditions are met
pub actions: Cow<'static, [Action]>,
/// Whether this is a base rule
#[pyo3(get)]
pub default: bool,
/// Whether this is enabled by default
#[pyo3(get)]
pub default_enabled: bool,
}
#[pymethods]
impl PushRule {
#[staticmethod]
pub fn from_db(
rule_id: String,
priority_class: i32,
conditions: &str,
actions: &str,
) -> Result<PushRule, Error> {
let conditions = serde_json::from_str(conditions).context("parsing conditions")?;
let actions = serde_json::from_str(actions).context("parsing actions")?;
Ok(PushRule {
rule_id: Cow::Owned(rule_id),
priority_class,
conditions,
actions,
default: false,
default_enabled: true,
})
}
#[getter]
fn rule_id(&self) -> &str {
&self.rule_id
}
#[getter]
fn actions(&self) -> Vec<Action> {
self.actions.clone().into_owned()
}
#[getter]
fn conditions(&self) -> Vec<Condition> {
self.conditions.clone().into_owned()
}
fn __repr__(&self) -> String {
format!(
"<PushRule rule_id={}, conditions={:?}, actions={:?}>",
self.rule_id, self.conditions, self.actions
)
}
}
/// The "action" Synapse should perform for a matching push rule.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Action {
DontNotify,
Notify,
Coalesce,
SetTweak(SetTweak),
// An unrecognized custom action.
Unknown(Value),
}
impl IntoPy<PyObject> for Action {
fn into_py(self, py: Python<'_>) -> PyObject {
// When we pass the `Action` struct to Python we want it to be converted
// to a dict. We use `pythonize`, which converts the struct using the
// `serde` serialization.
pythonize(py, &self).expect("valid action")
}
}
/// The body of a `SetTweak` push action.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct SetTweak {
set_tweak: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
value: Option<TweakValue>,
// This picks up any other fields that may have been added by clients.
// These get added when we convert the `Action` to a python object.
#[serde(flatten)]
other_keys: Value,
}
/// The value of a `set_tweak`.
///
/// We need this (rather than using `TweakValue` directly) so that we can use
/// `&'static str` in the value when defining the constant base rules.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum TweakValue {
String(Cow<'static, str>),
Other(Value),
}
impl Serialize for Action {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Action::DontNotify => serializer.serialize_str("dont_notify"),
Action::Notify => serializer.serialize_str("notify"),
Action::Coalesce => serializer.serialize_str("coalesce"),
Action::SetTweak(tweak) => tweak.serialize(serializer),
Action::Unknown(value) => value.serialize(serializer),
}
}
}
/// Simple helper class for deserializing Action from JSON.
#[derive(Deserialize)]
#[serde(untagged)]
enum ActionDeserializeHelper {
Str(String),
SetTweak(SetTweak),
Unknown(Value),
}
impl<'de> Deserialize<'de> for Action {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let helper: ActionDeserializeHelper = Deserialize::deserialize(deserializer)?;
match helper {
ActionDeserializeHelper::Str(s) => match &*s {
"dont_notify" => Ok(Action::DontNotify),
"notify" => Ok(Action::Notify),
"coalesce" => Ok(Action::Coalesce),
_ => Err(D::Error::custom("unrecognized action")),
},
ActionDeserializeHelper::SetTweak(set_tweak) => Ok(Action::SetTweak(set_tweak)),
ActionDeserializeHelper::Unknown(value) => Ok(Action::Unknown(value)),
}
}
}
/// A condition used in push rules to match against an event.
///
/// We need this split as `serde` doesn't give us the ability to have a
/// "catchall" variant in tagged enums.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(untagged)]
pub enum Condition {
/// A recognized condition that we can match against
Known(KnownCondition),
/// An unrecognized condition that we ignore.
Unknown(Value),
}
/// The set of "known" conditions that we can handle.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
is: Option<Cow<'static, str>>,
},
SenderNotificationPermission {
key: Cow<'static, str>,
},
#[serde(rename = "org.matrix.msc3772.relation_match")]
RelationMatch {
rel_type: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
sender: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
sender_type: Option<Cow<'static, str>>,
},
}
impl IntoPy<PyObject> for Condition {
fn into_py(self, py: Python<'_>) -> PyObject {
pythonize(py, &self).expect("valid condition")
}
}
/// The body of a [`Condition::EventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EventMatchCondition {
key: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pattern: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
pattern_type: Option<Cow<'static, str>>,
}
/// The collection of push rules for a user.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
struct PushRules {
/// Custom push rules that override a base rule.
overridden_base_rules: HashMap<Cow<'static, str>, PushRule>,
/// Custom rules that come between the prepend/append override base rules.
override_rules: Vec<PushRule>,
/// Custom rules that come before the base content rules.
content: Vec<PushRule>,
/// Custom rules that come before the base room rules.
room: Vec<PushRule>,
/// Custom rules that come before the base sender rules.
sender: Vec<PushRule>,
/// Custom rules that come before the base underride rules.
underride: Vec<PushRule>,
}
#[pymethods]
impl PushRules {
#[new]
fn new(rules: Vec<PushRule>) -> PushRules {
let mut push_rules: PushRules = Default::default();
for rule in rules {
if let Some(&o) = base_rules::BASE_RULES_BY_ID.get(&*rule.rule_id) {
push_rules.overridden_base_rules.insert(
rule.rule_id.clone(),
PushRule {
actions: rule.actions.clone(),
..o.clone()
},
);
continue;
}
match rule.priority_class {
5 => push_rules.override_rules.push(rule),
4 => push_rules.content.push(rule),
3 => push_rules.room.push(rule),
2 => push_rules.sender.push(rule),
1 => push_rules.underride.push(rule),
_ => {
warn!(
"Unrecognized priority class for rule {}: {}",
rule.rule_id, rule.priority_class
);
}
}
}
push_rules
}
/// Returns the list of all rules, including base rules, in the order they
/// should be executed in.
fn rules(&self) -> Vec<PushRule> {
self.iter().cloned().collect()
}
}
impl PushRules {
/// Iterates over all the rules, including base rules, in the order they
/// should be executed in.
pub fn iter(&self) -> impl Iterator<Item = &PushRule> {
base_rules::BASE_PREPEND_OVERRIDE_RULES
.iter()
.chain(self.override_rules.iter())
.chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter())
.chain(self.content.iter())
.chain(base_rules::BASE_APPEND_CONTENT_RULES.iter())
.chain(self.room.iter())
.chain(self.sender.iter())
.chain(self.underride.iter())
.chain(base_rules::BASE_APPEND_UNDERRIDE_RULES.iter())
.map(|rule| {
self.overridden_base_rules
.get(&*rule.rule_id)
.unwrap_or(rule)
})
}
}
/// A wrapper around `PushRules` that checks the enabled state of rules and
/// filters out disabled experimental rules.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc3786_enabled: bool,
msc3772_enabled: bool,
}
#[pymethods]
impl FilteredPushRules {
#[new]
fn py_new(
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc3786_enabled: bool,
msc3772_enabled: bool,
) -> Self {
Self {
push_rules,
enabled_map,
msc3786_enabled,
msc3772_enabled,
}
}
/// Returns the list of all rules and their enabled state, including base
/// rules, in the order they should be executed in.
fn rules(&self) -> Vec<(PushRule, bool)> {
self.iter().map(|(r, e)| (r.clone(), e)).collect()
}
}
impl FilteredPushRules {
/// Iterates over all the rules and their enabled state, including base
/// rules, in the order they should be executed in.
fn iter(&self) -> impl Iterator<Item = (&PushRule, bool)> {
self.push_rules
.iter()
.filter(|rule| {
// Ignore disabled experimental push rules
if !self.msc3786_enabled
&& rule.rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl"
{
return false;
}
if !self.msc3772_enabled
&& rule.rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
{
return false;
}
true
})
.map(|r| {
let enabled = *self
.enabled_map
.get(&*r.rule_id)
.unwrap_or(&r.default_enabled);
(r, enabled)
})
}
}
#[test]
fn test_serialize_condition() {
let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: "content.body".into(),
pattern: Some("coffee".into()),
pattern_type: None,
}));
let json = serde_json::to_string(&condition).unwrap();
assert_eq!(
json,
r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#
)
}
#[test]
fn test_deserialize_condition() {
let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#;
let _: Condition = serde_json::from_str(json).unwrap();
}
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(condition, Condition::Unknown(_)));
let new_json = serde_json::to_string(&condition).unwrap();
assert_eq!(json, new_json);
}
#[test]
fn test_deserialize_action() {
let _: Action = serde_json::from_str(r#""notify""#).unwrap();
let _: Action = serde_json::from_str(r#""dont_notify""#).unwrap();
let _: Action = serde_json::from_str(r#""coalesce""#).unwrap();
let _: Action = serde_json::from_str(r#"{"set_tweak": "highlight"}"#).unwrap();
}
#[test]
fn test_custom_action() {
let json = r#"{"some_custom":"action_fields"}"#;
let action: Action = serde_json::from_str(json).unwrap();
assert!(matches!(action, Action::Unknown(_)));
let new_json = serde_json::to_string(&action).unwrap();
assert_eq!(json, new_json);
}

View file

@ -2,23 +2,16 @@
#
# This script generates SQL files for creating a brand new Synapse DB with the latest
# schema, on both SQLite3 and Postgres.
#
# It does so by having Synapse generate an up-to-date SQLite DB, then running
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
export PGHOST="localhost"
POSTGRES_DB_NAME="synapse_full_schema.$$"
SQLITE_SCHEMA_FILE="schema.sql.sqlite"
SQLITE_ROWS_FILE="rows.sql.sqlite"
POSTGRES_SCHEMA_FILE="full.sql.postgres"
POSTGRES_ROWS_FILE="rows.sql.postgres"
POSTGRES_MAIN_DB_NAME="synapse_full_schema_main.$$"
POSTGRES_COMMON_DB_NAME="synapse_full_schema_common.$$"
POSTGRES_STATE_DB_NAME="synapse_full_schema_state.$$"
REQUIRED_DEPS=("matrix-synapse" "psycopg2")
usage() {
echo
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]"
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n <schema number>] [-h]"
echo
echo "-p <postgres_username>"
echo " Username to connect to local postgres instance. The password will be requested"
@ -27,11 +20,16 @@ usage() {
echo " CI mode. Prints every command that the script runs."
echo "-o <path>"
echo " Directory to output full schema files to."
echo "-n <schema number>"
echo " Schema number for the new snapshot. Used to set the location of files within "
echo " the output directory, mimicking that of synapse/storage/schemas."
echo " Defaults to 9999."
echo "-h"
echo " Display this help text."
}
while getopts "p:co:h" opt; do
SCHEMA_NUMBER="9999"
while getopts "p:co:hn:" opt; do
case $opt in
p)
export PGUSER=$OPTARG
@ -48,6 +46,9 @@ while getopts "p:co:h" opt; do
usage
exit
;;
n)
SCHEMA_NUMBER="$OPTARG"
;;
\?)
echo "ERROR: Invalid option: -$OPTARG" >&2
usage
@ -95,12 +96,21 @@ cd "$(dirname "$0")/.."
TMPDIR=$(mktemp -d)
KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
SQLITE_CONFIG=$TMPDIR/sqlite.conf
SQLITE_DB=$TMPDIR/homeserver.db
SQLITE_MAIN_DB=$TMPDIR/main.db
SQLITE_STATE_DB=$TMPDIR/state.db
SQLITE_COMMON_DB=$TMPDIR/common.db
POSTGRES_CONFIG=$TMPDIR/postgres.conf
# Ensure these files are delete on script exit
# TODO: the trap should also drop the temp postgres DB
trap 'rm -rf $TMPDIR' EXIT
cleanup() {
echo "Cleaning up temporary sqlite database and config files..."
rm -r "$TMPDIR"
echo "Cleaning up temporary Postgres database..."
dropdb --if-exists "$POSTGRES_COMMON_DB_NAME"
dropdb --if-exists "$POSTGRES_MAIN_DB_NAME"
dropdb --if-exists "$POSTGRES_STATE_DB_NAME"
}
trap 'cleanup' EXIT
cat > "$SQLITE_CONFIG" <<EOF
server_name: "test"
@ -110,10 +120,22 @@ macaroon_secret_key: "abcde"
report_stats: false
database:
name: "sqlite3"
args:
database: "$SQLITE_DB"
databases:
common:
name: "sqlite3"
data_stores: []
args:
database: "$SQLITE_COMMON_DB"
main:
name: "sqlite3"
data_stores: ["main"]
args:
database: "$SQLITE_MAIN_DB"
state:
name: "sqlite3"
data_stores: ["state"]
args:
database: "$SQLITE_STATE_DB"
# Suppress the key server warning.
trusted_key_servers: []
@ -127,13 +149,32 @@ macaroon_secret_key: "abcde"
report_stats: false
database:
name: "psycopg2"
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_DB_NAME"
databases:
common:
name: "psycopg2"
data_stores: []
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_COMMON_DB_NAME"
main:
name: "psycopg2"
data_stores: ["main"]
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_MAIN_DB_NAME"
state:
name: "psycopg2"
data_stores: ["state"]
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_STATE_DB_NAME"
# Suppress the key server warning.
trusted_key_servers: []
@ -148,33 +189,76 @@ echo "Running db background jobs..."
synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
# Create the PostgreSQL database.
echo "Creating postgres database..."
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
echo "Creating postgres databases..."
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_COMMON_DB_NAME"
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAME"
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME"
echo "Running db background jobs..."
synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
# Also delete any shadow tables from fts4
echo "Dropping unwanted db tables..."
SQL="
# Some common tables are created and updated by Synapse itself and do not belong in the
# schema.
DROP_APP_MANAGED_TABLES="
DROP TABLE schema_version;
DROP TABLE schema_compat_version;
DROP TABLE applied_schema_deltas;
DROP TABLE applied_module_schemas;
"
sqlite3 "$SQLITE_DB" <<< "$SQL"
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
# Other common tables are not created by Synapse and do belong in the schema.
# TODO: we could derive DROP_COMMON_TABLES from the dump of the common-only DB. But
# since there's only one table there, I haven't bothered to do so.
DROP_COMMON_TABLES="$DROP_APP_MANAGED_TABLES
DROP TABLE background_updates;
"
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_SCHEMA_FILE' and '$OUTPUT_DIR/$SQLITE_ROWS_FILE'..."
sqlite3 "$SQLITE_DB" ".schema --indent" > "$OUTPUT_DIR/$SQLITE_SCHEMA_FILE"
sqlite3 "$SQLITE_DB" ".dump --data-only --nosys" > "$OUTPUT_DIR/$SQLITE_ROWS_FILE"
sqlite3 "$SQLITE_COMMON_DB" <<< "$DROP_APP_MANAGED_TABLES"
sqlite3 "$SQLITE_MAIN_DB" <<< "$DROP_COMMON_TABLES"
sqlite3 "$SQLITE_STATE_DB" <<< "$DROP_COMMON_TABLES"
psql "$POSTGRES_COMMON_DB_NAME" -w <<< "$DROP_APP_MANAGED_TABLES"
psql "$POSTGRES_MAIN_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
psql "$POSTGRES_STATE_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE' and '$OUTPUT_DIR/$POSTGRES_ROWS_FILE'..."
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_ROWS_FILE"
# For Reasons(TM), SQLite's `.schema` also dumps out "shadow tables", the implementation
# details behind full text search tables. Omit these from the dumps.
echo "Cleaning up temporary Postgres database..."
dropdb $POSTGRES_DB_NAME
sqlite3 "$SQLITE_MAIN_DB" <<< "
DROP TABLE event_search_content;
DROP TABLE event_search_segments;
DROP TABLE event_search_segdir;
DROP TABLE event_search_docsize;
DROP TABLE event_search_stat;
DROP TABLE user_directory_search_content;
DROP TABLE user_directory_search_segments;
DROP TABLE user_directory_search_segdir;
DROP TABLE user_directory_search_docsize;
DROP TABLE user_directory_search_stat;
"
echo "Dumping SQLite3 schema..."
mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schema/$SCHEMA_NUMBER"
sqlite3 "$SQLITE_COMMON_DB" ".schema --indent" > "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_MAIN_DB" ".schema --indent" > "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_STATE_DB" ".schema --indent" > "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.sqlite"
cleanup_pg_schema() {
sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d'
}
echo "Dumping Postgres schema..."
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.postgres"
echo "Done! Files dumped to: $OUTPUT_DIR"

View file

@ -0,0 +1,37 @@
from typing import Any, Collection, Dict, Mapping, Sequence, Tuple, Union
from synapse.types import JsonDict
class PushRule:
@property
def rule_id(self) -> str: ...
@property
def priority_class(self) -> int: ...
@property
def conditions(self) -> Sequence[Mapping[str, str]]: ...
@property
def actions(self) -> Sequence[Union[Mapping[str, Any], str]]: ...
@property
def default(self) -> bool: ...
@property
def default_enabled(self) -> bool: ...
@staticmethod
def from_db(
rule_id: str, priority_class: int, conditions: str, actions: str
) -> "PushRule": ...
class PushRules:
def __init__(self, rules: Collection[PushRule]): ...
def rules(self) -> Collection[PushRule]: ...
class FilteredPushRules:
def __init__(
self,
push_rules: PushRules,
enabled_map: Dict[str, bool],
msc3786_enabled: bool,
msc3772_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
def get_base_rule_ids() -> Collection[str]: ...

View file

@ -48,10 +48,13 @@ class MockHomeserver(HomeServer):
def run_background_updates(hs: HomeServer) -> None:
store = hs.get_datastores().main
main = hs.get_datastores().main
state = hs.get_datastores().state
async def run_background_updates() -> None:
await store.db_pool.updates.run_background_updates(sleep=False)
await main.db_pool.updates.run_background_updates(sleep=False)
if state:
await state.db_pool.updates.run_background_updates(sleep=False)
# Stop the reactor to exit the script once every background update is run.
reactor.stop()
@ -97,8 +100,11 @@ def main() -> None:
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)
if "database" not in hs_config:
sys.stderr.write("The configuration file must have a 'database' section.\n")
if "database" not in hs_config and "databases" not in hs_config:
sys.stderr.write(
"The configuration file must have a 'database' or 'databases' section. "
"See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database"
)
sys.exit(4)
config = HomeServerConfig()

View file

@ -206,6 +206,7 @@ class HttpListenerConfig:
resources: List[HttpResourceConfig] = attr.Factory(list)
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
request_id_header: Optional[str] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
@ -520,9 +521,11 @@ class ServerConfig(Config):
):
raise ConfigError("allowed_avatar_mimetypes must be a list")
self.listeners = [
parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", []))
]
listeners = config.get("listeners", [])
if not isinstance(listeners, list):
raise ConfigError("Expected a list", ("listeners",))
self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)]
# no_tls is not really supported any more, but let's grandfather it in
# here.
@ -889,6 +892,9 @@ def read_gc_thresholds(
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
"""parse a listener config from the config file"""
if not isinstance(listener, dict):
raise ConfigError("Expected a dictionary", ("listeners", str(num)))
listener_type = listener["type"]
# Raise a helpful error if direct TCP replication is still configured.
if listener_type == "replication":
@ -928,6 +934,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
resources=resources,
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
request_id_header=listener.get("request_id_header"),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)

View file

@ -167,7 +167,6 @@ class EventBuilder:
"content": self.content,
"unsigned": self.unsigned,
"depth": depth,
"prev_state": [],
}
if self.is_state():

View file

@ -906,9 +906,6 @@ class FederationClient(FederationBase):
# The protoevent received over the JSON wire may not have all
# the required fields. Lets just gloss over that because
# there's some we never care about
if "prev_state" not in pdu_dict:
pdu_dict["prev_state"] = []
ev = builder.create_local_event_from_event_dict(
self._clock,
self.hostname,

View file

@ -63,7 +63,6 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
@ -1687,41 +1686,10 @@ class AuthHandler:
respond_with_html(request, 403, self._sso_account_deactivated_template)
return
profile = await self.store.get_profileinfo(
user_profile_data = await self.store.get_profileinfo(
UserID.from_string(registered_user_id).localpart
)
self._complete_sso_login(
registered_user_id,
auth_provider_id,
request,
client_redirect_url,
extra_attributes,
new_user=new_user,
user_profile_data=profile,
auth_provider_session_id=auth_provider_session_id,
)
def _complete_sso_login(
self,
registered_user_id: str,
auth_provider_id: str,
request: Request,
client_redirect_url: str,
extra_attributes: Optional[JsonDict] = None,
new_user: bool = False,
user_profile_data: Optional[ProfileInfo] = None,
auth_provider_session_id: Optional[str] = None,
) -> None:
"""
The synchronous portion of complete_sso_login.
This exists purely for backwards compatibility of synapse.module_api.ModuleApi.
"""
if user_profile_data is None:
user_profile_data = ProfileInfo(None, None)
# Store any extra attributes which will be passed in the login response.
# Note that this is per-user so it may overwrite a previous value, this
# is considered OK since the newest SSO attributes should be most valid.

View file

@ -188,18 +188,21 @@ class E2eKeysHandler:
)
invalid_cached_users = cached_users - valid_cached_users
if invalid_cached_users:
# Fix up results. If we get here, there is either a bug in device
# list tracking, or we hit the race mentioned above.
# Fix up results. If we get here, it means there was either a bug in
# device list tracking, or we hit the race mentioned above.
# TODO: In practice, this path is hit fairly often in existing
# deployments when clients query the keys of departed remote
# users. A background update to mark the appropriate device
# lists as unsubscribed is needed.
# https://github.com/matrix-org/synapse/issues/13651
# Note that this currently introduces a failure mode when clients
# are trying to decrypt old messages from a remote user whose
# homeserver is no longer available. We may want to consider falling
# back to the cached data when we fail to retrieve a device list
# over federation for such remote users.
user_ids_not_in_cache.update(invalid_cached_users)
for invalid_user_id in invalid_cached_users:
remote_results.pop(invalid_user_id)
# This log message may be removed if it turns out it's almost
# entirely triggered by races.
logger.error(
"Devices for %s were cached, but the server no longer shares "
"any rooms with them. The cached device lists are stale.",
invalid_cached_users,
)
for user_id, devices in remote_results.items():
user_devices = results.setdefault(user_id, {})

View file

@ -866,6 +866,11 @@ class FederationEventHandler:
event.room_id, event_id, str(err)
)
return
except Exception as exc:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(exc)
)
raise exc
try:
try:
@ -908,6 +913,11 @@ class FederationEventHandler:
logger.warning("Pulled event %s failed history check.", event_id)
else:
raise
except Exception as exc:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(exc)
)
raise exc
@trace
async def _compute_event_context_with_maybe_missing_prevs(

View file

@ -16,14 +16,17 @@ from typing import TYPE_CHECKING, List, Optional, Union
import attr
from synapse.api.errors import SynapseError, UnrecognizedRequestError
from synapse.push.baserules import BASE_RULE_IDS
from synapse.storage.push_rule import RuleNotFoundException
from synapse.synapse_rust.push import get_base_rule_ids
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
BASE_RULE_IDS = get_base_rule_ids()
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RuleSpec:
scope: str

View file

@ -72,10 +72,12 @@ class SynapseRequest(Request):
site: "SynapseSite",
*args: Any,
max_request_body_size: int = 1024,
request_id_header: Optional[str] = None,
**kw: Any,
):
super().__init__(channel, *args, **kw)
self._max_request_body_size = max_request_body_size
self.request_id_header = request_id_header
self.synapse_site = site
self.reactor = site.reactor
self._channel = channel # this is used by the tests
@ -172,7 +174,14 @@ class SynapseRequest(Request):
self._tracing_span = span
def get_request_id(self) -> str:
return "%s-%i" % (self.get_method(), self.request_seq)
request_id_value = None
if self.request_id_header:
request_id_value = self.getHeader(self.request_id_header)
if request_id_value is None:
request_id_value = str(self.request_seq)
return "%s-%s" % (self.get_method(), request_id_value)
def get_redacted_uri(self) -> str:
"""Gets the redacted URI associated with the request (or placeholder if the URI
@ -619,12 +628,15 @@ class SynapseSite(Site):
proxied = config.http_options.x_forwarded
request_class = XForwardedForRequest if proxied else SynapseRequest
request_id_header = config.http_options.request_id_header
def request_factory(channel: HTTPChannel, queued: bool) -> Request:
return request_class(
channel,
self,
max_request_body_size=max_request_body_size,
queued=queued,
request_id_header=request_id_header,
)
self.requestFactory = request_factory # type: ignore

View file

@ -836,31 +836,6 @@ class ModuleApi:
self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type]
)
def complete_sso_login(
self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
) -> None:
"""Complete a SSO login by redirecting the user to a page to confirm whether they
want their access token sent to `client_redirect_url`, or redirect them to that
URL with a token directly if the URL matches with one of the whitelisted clients.
This is deprecated in favor of complete_sso_login_async.
Added in Synapse v1.11.1.
Args:
registered_user_id: The MXID that has been registered as a previous step of
of this SSO login.
request: The request to respond to.
client_redirect_url: The URL to which to offer to redirect the user (or to
redirect them directly if whitelisted).
"""
self._auth_handler._complete_sso_login(
registered_user_id,
"<unknown>",
request,
client_redirect_url,
)
async def complete_sso_login_async(
self,
registered_user_id: str,

View file

@ -1,583 +0,0 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Push rules is the system used to determine which events trigger a push (and a
bump in notification counts).
This consists of a list of "push rules" for each user, where a push rule is a
pair of "conditions" and "actions". When a user receives an event Synapse
iterates over the list of push rules until it finds one where all the conditions
match the event, at which point "actions" describe the outcome (e.g. notify,
highlight, etc).
Push rules are split up into 5 different "kinds" (aka "priority classes"), which
are run in order:
1. Override highest priority rules, e.g. always ignore notices
2. Content content specific rules, e.g. @ notifications
3. Room per room rules, e.g. enable/disable notifications for all messages
in a room
4. Sender per sender rules, e.g. never notify for messages from a given
user
5. Underride the lowest priority "default" rules, e.g. notify for every
message.
The set of "base rules" are the list of rules that every user has by default. A
user can modify their copy of the push rules in one of three ways:
1. Adding a new push rule of a certain kind
2. Changing the actions of a base rule
3. Enabling/disabling a base rule.
The base rules are split into whether they come before or after a particular
kind, so the order of push rule evaluation would be: base rules for before
"override" kind, user defined "override" rules, base rules after "override"
kind, etc, etc.
"""
import itertools
import logging
from typing import Dict, Iterator, List, Mapping, Sequence, Tuple, Union
import attr
from synapse.config.experimental import ExperimentalConfig
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True, slots=True, frozen=True)
class PushRule:
"""A push rule
Attributes:
rule_id: a unique ID for this rule
priority_class: what "kind" of push rule this is (see
`PRIORITY_CLASS_MAP` for mapping between int and kind)
conditions: the sequence of conditions that all need to match
actions: the actions to apply if all conditions are met
default: is this a base rule?
default_enabled: is this enabled by default?
"""
rule_id: str
priority_class: int
conditions: Sequence[Mapping[str, str]]
actions: Sequence[Union[str, Mapping]]
default: bool = False
default_enabled: bool = True
@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False)
class PushRules:
"""A collection of push rules for an account.
Can be iterated over, producing push rules in priority order.
"""
# A mapping from rule ID to push rule that overrides a base rule. These will
# be returned instead of the base rule.
overriden_base_rules: Dict[str, PushRule] = attr.Factory(dict)
# The following stores the custom push rules at each priority class.
#
# We keep these separate (rather than combining into one big list) to avoid
# copying the base rules around all the time.
override: List[PushRule] = attr.Factory(list)
content: List[PushRule] = attr.Factory(list)
room: List[PushRule] = attr.Factory(list)
sender: List[PushRule] = attr.Factory(list)
underride: List[PushRule] = attr.Factory(list)
def __iter__(self) -> Iterator[PushRule]:
# When iterating over the push rules we need to return the base rules
# interspersed at the correct spots.
for rule in itertools.chain(
BASE_PREPEND_OVERRIDE_RULES,
self.override,
BASE_APPEND_OVERRIDE_RULES,
self.content,
BASE_APPEND_CONTENT_RULES,
self.room,
self.sender,
self.underride,
BASE_APPEND_UNDERRIDE_RULES,
):
# Check if a base rule has been overriden by a custom rule. If so
# return that instead.
override_rule = self.overriden_base_rules.get(rule.rule_id)
if override_rule:
yield override_rule
else:
yield rule
def __len__(self) -> int:
# The length is mostly used by caches to get a sense of "size" / amount
# of memory this object is using, so we only count the number of custom
# rules.
return (
len(self.overriden_base_rules)
+ len(self.override)
+ len(self.content)
+ len(self.room)
+ len(self.sender)
+ len(self.underride)
)
@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False)
class FilteredPushRules:
"""A wrapper around `PushRules` that filters out disabled experimental push
rules, and includes the "enabled" state for each rule when iterated over.
"""
push_rules: PushRules
enabled_map: Dict[str, bool]
experimental_config: ExperimentalConfig
def __iter__(self) -> Iterator[Tuple[PushRule, bool]]:
for rule in self.push_rules:
if not _is_experimental_rule_enabled(
rule.rule_id, self.experimental_config
):
continue
enabled = self.enabled_map.get(rule.rule_id, rule.default_enabled)
yield rule, enabled
def __len__(self) -> int:
return len(self.push_rules)
DEFAULT_EMPTY_PUSH_RULES = PushRules()
def compile_push_rules(rawrules: List[PushRule]) -> PushRules:
"""Given a set of custom push rules return a `PushRules` instance (which
includes the base rules).
"""
if not rawrules:
# Fast path to avoid allocating empty lists when there are no custom
# rules for the user.
return DEFAULT_EMPTY_PUSH_RULES
rules = PushRules()
for rule in rawrules:
# We need to decide which bucket each custom push rule goes into.
# If it has the same ID as a base rule then it overrides that...
overriden_base_rule = BASE_RULES_BY_ID.get(rule.rule_id)
if overriden_base_rule:
rules.overriden_base_rules[rule.rule_id] = attr.evolve(
overriden_base_rule, actions=rule.actions
)
continue
# ... otherwise it gets added to the appropriate priority class bucket
collection: List[PushRule]
if rule.priority_class == 5:
collection = rules.override
elif rule.priority_class == 4:
collection = rules.content
elif rule.priority_class == 3:
collection = rules.room
elif rule.priority_class == 2:
collection = rules.sender
elif rule.priority_class == 1:
collection = rules.underride
elif rule.priority_class <= 0:
logger.info(
"Got rule with priority class less than zero, but doesn't override a base rule: %s",
rule,
)
continue
else:
# We log and continue here so as not to break event sending
logger.error("Unknown priority class: %", rule.priority_class)
continue
collection.append(rule)
return rules
def _is_experimental_rule_enabled(
rule_id: str, experimental_config: ExperimentalConfig
) -> bool:
"""Used by `FilteredPushRules` to filter out experimental rules when they
have not been enabled.
"""
if (
rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl"
and not experimental_config.msc3786_enabled
):
return False
if (
rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
and not experimental_config.msc3772_enabled
):
return False
return True
BASE_APPEND_CONTENT_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["content"],
rule_id="global/content/.m.rule.contains_user_name",
conditions=[
{
"kind": "event_match",
"key": "content.body",
# Match the localpart of the requester's MXID.
"pattern_type": "user_localpart",
}
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight"},
],
)
]
BASE_PREPEND_OVERRIDE_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.master",
default_enabled=False,
conditions=[],
actions=["dont_notify"],
)
]
BASE_APPEND_OVERRIDE_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.suppress_notices",
conditions=[
{
"kind": "event_match",
"key": "content.msgtype",
"pattern": "m.notice",
"_cache_key": "_suppress_notices",
}
],
actions=["dont_notify"],
),
# NB. .m.rule.invite_for_me must be higher prio than .m.rule.member_event
# otherwise invites will be matched by .m.rule.member_event
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.invite_for_me",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.member",
"_cache_key": "_member",
},
{
"kind": "event_match",
"key": "content.membership",
"pattern": "invite",
"_cache_key": "_invite_member",
},
# Match the requester's MXID.
{"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight", "value": False},
],
),
# Will we sometimes want to know about people joining and leaving?
# Perhaps: if so, this could be expanded upon. Seems the most usual case
# is that we don't though. We add this override rule so that even if
# the room rule is set to notify, we don't get notifications about
# join/leave/avatar/displayname events.
# See also: https://matrix.org/jira/browse/SYN-607
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.member_event",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.member",
"_cache_key": "_member",
}
],
actions=["dont_notify"],
),
# This was changed from underride to override so it's closer in priority
# to the content rules where the user name highlight rule lives. This
# way a room rule is lower priority than both but a custom override rule
# is higher priority than both.
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.contains_display_name",
conditions=[{"kind": "contains_display_name"}],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight"},
],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.roomnotif",
conditions=[
{
"kind": "event_match",
"key": "content.body",
"pattern": "@room",
"_cache_key": "_roomnotif_content",
},
{
"kind": "sender_notification_permission",
"key": "room",
"_cache_key": "_roomnotif_pl",
},
],
actions=["notify", {"set_tweak": "highlight", "value": True}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.tombstone",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.tombstone",
"_cache_key": "_tombstone",
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "",
"_cache_key": "_tombstone_statekey",
},
],
actions=["notify", {"set_tweak": "highlight", "value": True}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.reaction",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.reaction",
"_cache_key": "_reaction",
}
],
actions=["dont_notify"],
),
# XXX: This is an experimental rule that is only enabled if msc3786_enabled
# is enabled, if it is not the rule gets filtered out in _load_rules() in
# PushRulesWorkerStore
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.org.matrix.msc3786.rule.room.server_acl",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.server_acl",
"_cache_key": "_room_server_acl",
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "",
"_cache_key": "_room_server_acl_state_key",
},
],
actions=[],
),
]
BASE_APPEND_UNDERRIDE_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.call",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.call.invite",
"_cache_key": "_call",
}
],
actions=[
"notify",
{"set_tweak": "sound", "value": "ring"},
{"set_tweak": "highlight", "value": False},
],
),
# XXX: once m.direct is standardised everywhere, we should use it to detect
# a DM from the user's perspective rather than this heuristic.
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.room_one_to_one",
conditions=[
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.message",
"_cache_key": "_message",
},
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight", "value": False},
],
),
# XXX: this is going to fire for events which aren't m.room.messages
# but are encrypted (e.g. m.call.*)...
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.encrypted_room_one_to_one",
conditions=[
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.encrypted",
"_cache_key": "_encrypted",
},
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight", "value": False},
],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.org.matrix.msc3772.thread_reply",
conditions=[
{
"kind": "org.matrix.msc3772.relation_match",
"rel_type": "m.thread",
# Match the requester's MXID.
"sender_type": "user_id",
}
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.message",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.message",
"_cache_key": "_message",
}
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
# XXX: this is going to fire for events which aren't m.room.messages
# but are encrypted (e.g. m.call.*)...
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.encrypted",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.encrypted",
"_cache_key": "_encrypted",
}
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.im.vector.jitsi",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "im.vector.modular.widgets",
"_cache_key": "_type_modular_widgets",
},
{
"kind": "event_match",
"key": "content.type",
"pattern": "jitsi",
"_cache_key": "_content_type_jitsi",
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "*",
"_cache_key": "_is_state_event",
},
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
]
BASE_RULE_IDS = set()
BASE_RULES_BY_ID: Dict[str, PushRule] = {}
for r in BASE_APPEND_CONTENT_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r
for r in BASE_PREPEND_OVERRIDE_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r
for r in BASE_APPEND_OVERRIDE_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r
for r in BASE_APPEND_UNDERRIDE_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r

View file

@ -37,11 +37,11 @@ from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.storage.state import StateFilter
from synapse.synapse_rust.push import FilteredPushRules, PushRule
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
from .baserules import FilteredPushRules, PushRule
from .push_rule_evaluator import PushRuleEvaluatorForEvent
if TYPE_CHECKING:
@ -280,7 +280,8 @@ class BulkPushRuleEvaluator:
thread_id = "main"
if relation:
relations = await self._get_mutual_relations(
relation.parent_id, itertools.chain(*rules_by_user.values())
relation.parent_id,
itertools.chain(*(r.rules() for r in rules_by_user.values())),
)
if relation.rel_type == RelationTypes.THREAD:
thread_id = relation.parent_id
@ -333,7 +334,7 @@ class BulkPushRuleEvaluator:
# current user, it'll be added to the dict later.
actions_by_user[uid] = []
for rule, enabled in rules:
for rule, enabled in rules.rules():
if not enabled:
continue

View file

@ -16,10 +16,9 @@ import copy
from typing import Any, Dict, List, Optional
from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
from synapse.synapse_rust.push import FilteredPushRules, PushRule
from synapse.types import UserID
from .baserules import FilteredPushRules, PushRule
def format_push_rules_for_user(
user: UserID, ruleslist: FilteredPushRules
@ -34,7 +33,7 @@ def format_push_rules_for_user(
rules["global"] = _add_empty_priority_class_arrays(rules["global"])
for r, enabled in ruleslist:
for r, enabled in ruleslist.rules():
template_name = _priority_class_to_template_name(r.priority_class)
rulearray = rules["global"][template_name]

View file

@ -80,6 +80,7 @@ from synapse.rest.admin.users import (
SearchUsersRestServlet,
ShadowBanRestServlet,
UserAdminServlet,
UserByExternalId,
UserMembershipRestServlet,
UserRegisterServlet,
UserRestServletV2,
@ -275,6 +276,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ListDestinationsRestServlet(hs).register(http_server)
RoomMessagesRestServlet(hs).register(http_server)
RoomTimestampToEventRestServlet(hs).register(http_server)
UserByExternalId(hs).register(http_server)
# Some servlets only get registered for the main process.
if hs.config.worker.worker_app is None:

View file

@ -1156,3 +1156,30 @@ class AccountDataRestServlet(RestServlet):
"rooms": by_room_data,
},
}
class UserByExternalId(RestServlet):
"""Find a user based on an external ID from an auth provider"""
PATTERNS = admin_patterns(
"/auth_providers/(?P<provider>[^/]*)/users/(?P<external_id>[^/]*)"
)
def __init__(self, hs: "HomeServer"):
self._auth = hs.get_auth()
self._store = hs.get_datastores().main
async def on_GET(
self,
request: SynapseRequest,
provider: str,
external_id: str,
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self._auth, request)
user_id = await self._store.get_user_by_external_id(provider, external_id)
if user_id is None:
raise NotFoundError("User not found")
return HTTPStatus.OK, {"user_id": user_id}

View file

@ -19,6 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple
from urllib.parse import urlparse
from pydantic import StrictBool, StrictStr, constr
from typing_extensions import Literal
from twisted.web.server import Request
@ -43,6 +44,7 @@ from synapse.metrics import threepid_send_requests
from synapse.push.mailer import Mailer
from synapse.rest.client.models import (
AuthenticationData,
ClientSecretStr,
EmailRequestTokenBody,
MsisdnRequestTokenBody,
)
@ -627,6 +629,11 @@ class ThreepidAddRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
class PostBody(RequestBodyModel):
auth: Optional[AuthenticationData] = None
client_secret: ClientSecretStr
sid: StrictStr
@interactive_auth_handler
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
@ -636,22 +643,17 @@ class ThreepidAddRestServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["client_secret", "sid"])
sid = body["sid"]
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
body = parse_and_validate_json_object_from_request(request, self.PostBody)
await self.auth_handler.validate_user_via_ui_auth(
requester,
request,
body,
body.dict(exclude_unset=True),
"add a third-party identifier to your account",
)
validation_session = await self.identity_handler.validate_threepid_session(
client_secret, sid
body.client_secret, body.sid
)
if validation_session:
await self.auth_handler.add_threepid(
@ -676,23 +678,20 @@ class ThreepidBindRestServlet(RestServlet):
self.identity_handler = hs.get_identity_handler()
self.auth = hs.get_auth()
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_json_object_from_request(request)
class PostBody(RequestBodyModel):
client_secret: ClientSecretStr
id_access_token: StrictStr
id_server: StrictStr
sid: StrictStr
assert_params_in_dict(
body, ["id_server", "sid", "id_access_token", "client_secret"]
)
id_server = body["id_server"]
sid = body["sid"]
id_access_token = body["id_access_token"]
client_secret = body["client_secret"]
assert_valid_client_secret(client_secret)
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
body = parse_and_validate_json_object_from_request(request, self.PostBody)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
await self.identity_handler.bind_threepid(
client_secret, sid, user_id, id_server, id_access_token
body.client_secret, body.sid, user_id, body.id_server, body.id_access_token
)
return 200, {}
@ -708,23 +707,27 @@ class ThreepidUnbindRestServlet(RestServlet):
self.auth = hs.get_auth()
self.datastore = self.hs.get_datastores().main
class PostBody(RequestBodyModel):
address: StrictStr
id_server: Optional[StrictStr] = None
medium: Literal["email", "msisdn"]
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
"""Unbind the given 3pid from a specific identity server, or identity servers that are
known to have this 3pid bound
"""
requester = await self.auth.get_user_by_req(request)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
medium = body.get("medium")
address = body.get("address")
id_server = body.get("id_server")
body = parse_and_validate_json_object_from_request(request, self.PostBody)
# Attempt to unbind the threepid from an identity server. If id_server is None, try to
# unbind from all identity servers this threepid has been added to in the past
result = await self.identity_handler.try_unbind_threepid(
requester.user.to_string(),
{"address": address, "medium": medium, "id_server": id_server},
{
"address": body.address,
"medium": body.medium,
"id_server": body.id_server,
},
)
return 200, {"id_server_unbind_result": "success" if result else "no-support"}
@ -738,21 +741,25 @@ class ThreepidDeleteRestServlet(RestServlet):
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
class PostBody(RequestBodyModel):
address: StrictStr
id_server: Optional[StrictStr] = None
medium: Literal["email", "msisdn"]
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
if not self.hs.config.registration.enable_3pid_changes:
raise SynapseError(
400, "3PID changes are disabled on this server", Codes.FORBIDDEN
)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["medium", "address"])
body = parse_and_validate_json_object_from_request(request, self.PostBody)
requester = await self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
try:
ret = await self.auth_handler.delete_threepid(
user_id, body["medium"], body["address"], body.get("id_server")
user_id, body.medium, body.address, body.id_server
)
except Exception:
# NB. This endpoint should succeed if there is nothing to

View file

@ -36,18 +36,20 @@ class AuthenticationData(RequestBodyModel):
type: Optional[StrictStr] = None
class ThreePidRequestTokenBody(RequestBodyModel):
if TYPE_CHECKING:
client_secret: StrictStr
else:
# See also assert_valid_client_secret()
client_secret: constr(
regex="[0-9a-zA-Z.=_-]", # noqa: F722
min_length=0,
max_length=255,
strict=True,
)
if TYPE_CHECKING:
ClientSecretStr = StrictStr
else:
# See also assert_valid_client_secret()
ClientSecretStr = constr(
regex="[0-9a-zA-Z.=_-]", # noqa: F722
min_length=1,
max_length=255,
strict=True,
)
class ThreepidRequestTokenBody(RequestBodyModel):
client_secret: ClientSecretStr
id_server: Optional[StrictStr]
id_access_token: Optional[StrictStr]
next_link: Optional[StrictStr]
@ -62,7 +64,7 @@ class ThreePidRequestTokenBody(RequestBodyModel):
return token
class EmailRequestTokenBody(ThreePidRequestTokenBody):
class EmailRequestTokenBody(ThreepidRequestTokenBody):
email: StrictStr
# Canonicalise the email address. The addresses are all stored canonicalised
@ -80,6 +82,6 @@ else:
ISO3116_1_Alpha_2 = constr(regex="[A-Z]{2}", strict=True)
class MsisdnRequestTokenBody(ThreePidRequestTokenBody):
class MsisdnRequestTokenBody(ThreepidRequestTokenBody):
country: ISO3116_1_Alpha_2
phone_number: StrictStr

View file

@ -19,6 +19,8 @@ import shutil
from io import BytesIO
from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple
from matrix_common.types.mxc_uri import MXCUri
import twisted.internet.error
import twisted.web.http
from twisted.internet.defer import Deferred
@ -186,7 +188,7 @@ class MediaRepository:
content: IO,
content_length: int,
auth_user: UserID,
) -> str:
) -> MXCUri:
"""Store uploaded content for a local user and return the mxc URL
Args:
@ -219,7 +221,7 @@ class MediaRepository:
await self._generate_thumbnails(None, media_id, media_id, media_type)
return "mxc://%s/%s" % (self.server_name, media_id)
return MXCUri(self.server_name, media_id)
async def get_local_media(
self, request: SynapseRequest, media_id: str, name: Optional[str]

View file

@ -101,6 +101,8 @@ class UploadResource(DirectServeJsonResource):
# the default 404, as that would just be confusing.
raise SynapseError(400, "Bad content")
logger.info("Uploaded content with URI %r", content_uri)
logger.info("Uploaded content with URI '%s'", content_uri)
respond_with_json(request, 200, {"content_uri": content_uri}, send_cors=True)
respond_with_json(
request, 200, {"content_uri": str(content_uri)}, send_cors=True
)

View file

@ -577,6 +577,21 @@ async def _iterative_auth_checks(
if ev.rejected_reason is None:
auth_events[key] = event_map[ev_id]
if event.rejected_reason is not None:
# Do not admit previously rejected events into state.
# TODO: This isn't spec compliant. Events that were previously rejected due
# to failing auth checks at their state, but pass auth checks during
# state resolution should be accepted. Synapse does not handle the
# change of rejection status well, so we preserve the previous
# rejection status for now.
#
# Note that events rejected for non-state reasons, such as having the
# wrong auth events, should remain rejected.
#
# https://spec.matrix.org/v1.2/rooms/v9/#rejected-events
# https://github.com/matrix-org/synapse/issues/13797
continue
try:
event_auth.check_state_dependent_auth_rules(
event,

View file

@ -285,7 +285,10 @@ class BackgroundUpdater:
back_to_back_failures = 0
try:
logger.info("Starting background schema updates")
logger.info(
"Starting background schema updates for database %s",
self._database_name,
)
while self.enabled:
try:
result = await self.do_next_background_update(sleep)
@ -533,6 +536,7 @@ class BackgroundUpdater:
index_name: name of index to add
table: table to add index to
columns: columns/expressions to include in index
where_clause: A WHERE clause to specify a partial unique index.
unique: true to make a UNIQUE index
psql_only: true to only create this index on psql databases (useful
for virtual sqlite tables)

View file

@ -1187,6 +1187,7 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
"""
@ -1199,6 +1200,7 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
where_clause: An index predicate to apply to the upsert.
lock: True to lock the table when doing the upsert. Unused when performing
a native upsert.
Returns:
@ -1209,7 +1211,12 @@ class DatabasePool:
if table not in self._unsafe_to_upsert_tables:
return self.simple_upsert_txn_native_upsert(
txn, table, keyvalues, values, insertion_values=insertion_values
txn,
table,
keyvalues,
values,
insertion_values=insertion_values,
where_clause=where_clause,
)
else:
return self.simple_upsert_txn_emulated(
@ -1218,6 +1225,7 @@ class DatabasePool:
keyvalues,
values,
insertion_values=insertion_values,
where_clause=where_clause,
lock=lock,
)
@ -1228,6 +1236,7 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
where_clause: Optional[str] = None,
lock: bool = True,
) -> bool:
"""
@ -1236,6 +1245,7 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
where_clause: An index predicate to apply to the upsert.
lock: True to lock the table when doing the upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
@ -1255,14 +1265,17 @@ class DatabasePool:
else:
return "%s = ?" % (key,)
# Generate a where clause of each keyvalue and optionally the provided
# index predicate.
where = [_getwhere(k) for k in keyvalues]
if where_clause:
where.append(where_clause)
if not values:
# If `values` is empty, then all of the values we care about are in
# the unique key, so there is nothing to UPDATE. We can just do a
# SELECT instead to see if it exists.
sql = "SELECT 1 FROM %s WHERE %s" % (
table,
" AND ".join(_getwhere(k) for k in keyvalues),
)
sql = "SELECT 1 FROM %s WHERE %s" % (table, " AND ".join(where))
sqlargs = list(keyvalues.values())
txn.execute(sql, sqlargs)
if txn.fetchall():
@ -1273,7 +1286,7 @@ class DatabasePool:
sql = "UPDATE %s SET %s WHERE %s" % (
table,
", ".join("%s = ?" % (k,) for k in values),
" AND ".join(_getwhere(k) for k in keyvalues),
" AND ".join(where),
)
sqlargs = list(values.values()) + list(keyvalues.values())
@ -1303,6 +1316,7 @@ class DatabasePool:
keyvalues: Dict[str, Any],
values: Dict[str, Any],
insertion_values: Optional[Dict[str, Any]] = None,
where_clause: Optional[str] = None,
) -> bool:
"""
Use the native UPSERT functionality in PostgreSQL.
@ -1312,6 +1326,7 @@ class DatabasePool:
keyvalues: The unique key tables and their new values
values: The nonunique columns and their new values
insertion_values: additional key/values to use only when inserting
where_clause: An index predicate to apply to the upsert.
Returns:
Returns True if a row was inserted or updated (i.e. if `values` is
@ -1327,11 +1342,12 @@ class DatabasePool:
allvalues.update(values)
latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values)
sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % (
sql = "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) %s DO %s" % (
table,
", ".join(k for k in allvalues),
", ".join("?" for _ in allvalues),
", ".join(k for k in keyvalues),
f"WHERE {where_clause}" if where_clause else "",
latter,
)
txn.execute(sql, list(allvalues.values()))

View file

@ -419,6 +419,7 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"event_forward_extremities",
"event_push_actions",
"event_search",
"event_failed_pull_attempts",
"partial_state_events",
"events",
"federation_inbound_events_staging",
@ -441,6 +442,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore):
"e2e_room_keys",
"event_push_summary",
"pusher_throttle",
"insertion_events",
"insertion_event_extremities",
"insertion_event_edges",
"batch_events",
"room_account_data",
"room_tags",
# "rooms" happens last, to keep the foreign keys in the other tables

Some files were not shown because too many files have changed in this diff Show more