From 06953bc193530780a7686b2aee9632a4ed1d604f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Jun 2024 17:10:58 +0100 Subject: [PATCH 01/16] Always return OTK counts (#17275) Broke in https://github.com/element-hq/synapse/pull/17215 --- changelog.d/17275.bugfix | 1 + synapse/handlers/sync.py | 33 +++++++++++++++++++++++++++++---- 2 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17275.bugfix diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix new file mode 100644 index 0000000000..eb522bb997 --- /dev/null +++ b/changelog.d/17275.bugfix @@ -0,0 +1 @@ +Fix bug where OTKs were not always included in `/sync` response when using workers. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1d7d9dfdd0..6389c51b1c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -285,7 +285,11 @@ class SyncResult: ) @staticmethod - def empty(next_batch: StreamToken) -> "SyncResult": + def empty( + next_batch: StreamToken, + device_one_time_keys_count: JsonMapping, + device_unused_fallback_key_types: List[str], + ) -> "SyncResult": "Return a new empty result" return SyncResult( next_batch=next_batch, @@ -297,8 +301,8 @@ class SyncResult: archived=[], to_device=[], device_lists=DeviceListUpdates(), - device_one_time_keys_count={}, - device_unused_fallback_key_types=[], + device_one_time_keys_count=device_one_time_keys_count, + device_unused_fallback_key_types=device_unused_fallback_key_types, ) @@ -523,7 +527,28 @@ class SyncHandler: logger.warning( "Timed out waiting for worker to catch up. Returning empty response" ) - return SyncResult.empty(since_token) + device_id = sync_config.device_id + one_time_keys_count: JsonMapping = {} + unused_fallback_key_types: List[str] = [] + if device_id: + user_id = sync_config.user.to_string() + # TODO: We should have a way to let clients differentiate between the states of: + # * no change in OTK count since the provided since token + # * the server has zero OTKs left for this device + # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 + one_time_keys_count = await self.store.count_e2e_one_time_keys( + user_id, device_id + ) + unused_fallback_key_types = list( + await self.store.get_e2e_unused_fallback_key_types( + user_id, device_id + ) + ) + + cache_context.should_cache = False # Don't cache empty responses + return SyncResult.empty( + since_token, one_time_keys_count, unused_fallback_key_types + ) # If we've spent significant time waiting to catch up, take it off # the timeout. From b84e31375b0ca80970a6c56e3e0e86a5c63af025 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2024 15:55:42 +0100 Subject: [PATCH 02/16] Update changelog --- changelog.d/17275.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix index eb522bb997..04e8ab5eff 100644 --- a/changelog.d/17275.bugfix +++ b/changelog.d/17275.bugfix @@ -1 +1 @@ -Fix bug where OTKs were not always included in `/sync` response when using workers. +Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced v1.109.0rc1. From 8c4937b216309eec62f9262f23162878fa3b772c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2024 15:56:57 +0100 Subject: [PATCH 03/16] Fix bug where device lists would break sync (#17292) If the stream ID in the unconverted table is ahead of the device lists ID gen, then it can break all /sync requests that had an ID from ahead of the table. The fix is to make sure we add the unconverted table to the list of tables we check at start up. Broke in https://github.com/element-hq/synapse/pull/17229 --- changelog.d/17292.bugfix | 1 + synapse/storage/databases/main/devices.py | 28 +++++++++++++------ .../05_add_instance_names_converted_pos.sql | 16 +++++++++++ 3 files changed, 36 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17292.bugfix create mode 100644 synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql diff --git a/changelog.d/17292.bugfix b/changelog.d/17292.bugfix new file mode 100644 index 0000000000..c067a98ce8 --- /dev/null +++ b/changelog.d/17292.bugfix @@ -0,0 +1 @@ +Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 1c771e48f7..40187496e2 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -108,6 +108,11 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore): ("device_lists_outbound_pokes", "instance_name", "stream_id"), ("device_lists_changes_in_room", "instance_name", "stream_id"), ("device_lists_remote_pending", "instance_name", "stream_id"), + ( + "device_lists_changes_converted_stream_position", + "instance_name", + "stream_id", + ), ], sequence_name="device_lists_sequence", writers=["master"], @@ -2394,15 +2399,16 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): `FALSE` have not been converted. """ - return cast( - Tuple[int, str], - await self.db_pool.simple_select_one( - table="device_lists_changes_converted_stream_position", - keyvalues={}, - retcols=["stream_id", "room_id"], - desc="get_device_change_last_converted_pos", - ), + # There should be only one row in this table, though we want to + # future-proof ourselves for when we have multiple rows (one for each + # instance). So to handle that case we take the minimum of all rows. + rows = await self.db_pool.simple_select_list( + table="device_lists_changes_converted_stream_position", + keyvalues={}, + retcols=["stream_id", "room_id"], + desc="get_device_change_last_converted_pos", ) + return cast(Tuple[int, str], min(rows)) async def set_device_change_last_converted_pos( self, @@ -2417,6 +2423,10 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): await self.db_pool.simple_update_one( table="device_lists_changes_converted_stream_position", keyvalues={}, - updatevalues={"stream_id": stream_id, "room_id": room_id}, + updatevalues={ + "stream_id": stream_id, + "instance_name": self._instance_name, + "room_id": room_id, + }, desc="set_device_change_last_converted_pos", ) diff --git a/synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql b/synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql new file mode 100644 index 0000000000..c3f2b6a1dd --- /dev/null +++ b/synapse/storage/schema/main/delta/85/05_add_instance_names_converted_pos.sql @@ -0,0 +1,16 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- Add `instance_name` columns to stream tables to allow them to be used with +-- `MultiWriterIdGenerator` +ALTER TABLE device_lists_changes_converted_stream_position ADD COLUMN instance_name TEXT; From 491365f19997c2ca1c42d54242cc11c5d8d0e52d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jun 2024 10:47:03 +0100 Subject: [PATCH 04/16] Bump types-pillow from 10.2.0.20240423 to 10.2.0.20240520 (#17285) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 942e26701d..54674cc7d9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2844,13 +2844,13 @@ files = [ [[package]] name = "types-pillow" -version = "10.2.0.20240423" +version = "10.2.0.20240520" description = "Typing stubs for Pillow" optional = false python-versions = ">=3.8" files = [ - {file = "types-Pillow-10.2.0.20240423.tar.gz", hash = "sha256:696e68b9b6a58548fc307a8669830469237c5b11809ddf978ac77fafa79251cd"}, - {file = "types_Pillow-10.2.0.20240423-py3-none-any.whl", hash = "sha256:bd12923093b96c91d523efcdb66967a307f1a843bcfaf2d5a529146c10a9ced3"}, + {file = "types-Pillow-10.2.0.20240520.tar.gz", hash = "sha256:130b979195465fa1e1676d8e81c9c7c30319e8e95b12fae945e8f0d525213107"}, + {file = "types_Pillow-10.2.0.20240520-py3-none-any.whl", hash = "sha256:33c36494b380e2a269bb742181bea5d9b00820367822dbd3760f07210a1da23d"}, ] [[package]] From 9e59d18022f20ef8f6c9f9644a59fdc4e08d9cbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jun 2024 10:50:03 +0100 Subject: [PATCH 05/16] Bump dawidd6/action-download-artifact from 3.1.4 to 5 (#17289) --- .github/workflows/docs-pr-netlify.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index 277083ded3..a724816392 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@09f2f74827fd3a8607589e5ad7f9398816f540fe # v3.1.4 + uses: dawidd6/action-download-artifact@deb3bb83256a78589fef6a7b942e5f2573ad7c13 # v5 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} From 863578bfcf0136ccf527d13533198fc707e5b604 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jun 2024 10:50:13 +0100 Subject: [PATCH 06/16] Bump regex from 1.10.4 to 1.10.5 (#17290) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e3e63fc205..7472e16291 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,9 +444,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.4" +version = "1.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" dependencies = [ "aho-corasick", "memchr", From a8069e9739fdef8c7200d0d33a38b85100398136 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 11 Jun 2024 15:22:21 +0200 Subject: [PATCH 07/16] 1.109.0rc2 --- CHANGES.md | 10 ++++++++++ changelog.d/17275.bugfix | 1 - changelog.d/17292.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/17275.bugfix delete mode 100644 changelog.d/17292.bugfix diff --git a/CHANGES.md b/CHANGES.md index 092dbdbf2d..3116e1b2a8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +# Synapse 1.109.0rc2 (2024-06-11) + +### Bugfixes + +- Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) +- Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292)) + + + + # Synapse 1.109.0rc1 (2024-06-04) ### Features diff --git a/changelog.d/17275.bugfix b/changelog.d/17275.bugfix deleted file mode 100644 index 04e8ab5eff..0000000000 --- a/changelog.d/17275.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced v1.109.0rc1. diff --git a/changelog.d/17292.bugfix b/changelog.d/17292.bugfix deleted file mode 100644 index c067a98ce8..0000000000 --- a/changelog.d/17292.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. diff --git a/debian/changelog b/debian/changelog index 927248bdab..ac2536749d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.109.0~rc2) stable; urgency=medium + + * New synapse release 1.109.0rc2. + + -- Synapse Packaging team Tue, 11 Jun 2024 13:20:17 +0000 + matrix-synapse-py3 (1.109.0~rc1) stable; urgency=medium * New Synapse release 1.109.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 8cc99b8cba..f4f7f70603 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.109.0rc1" +version = "1.109.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From e6816babf6ce37cca4fbd9d67e5b2a0b06f65d1a Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 11 Jun 2024 15:39:30 +0200 Subject: [PATCH 08/16] CHANGES.md: s/OTKs/one-time-keys/ --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 3116e1b2a8..61c6170c62 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,7 +2,7 @@ ### Bugfixes -- Fix bug where OTKs were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) +- Fix bug where one-time-keys were not always included in `/sync` response when using workers. Introduced in v1.109.0rc1. ([\#17275](https://github.com/element-hq/synapse/issues/17275)) - Fix bug where `/sync` could get stuck due to edge case in device lists handling. Introduced in v1.109.0rc1. ([\#17292](https://github.com/element-hq/synapse/issues/17292)) From d0f90bd04e1d83983de5e8327acd214b9a2b8d43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 10:52:14 +0100 Subject: [PATCH 09/16] Bump jinja2 from 3.1.3 to 3.1.4 (#17287) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 54674cc7d9..76463d493c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -912,13 +912,13 @@ trio = ["async_generator", "trio"] [[package]] name = "jinja2" -version = "3.1.3" +version = "3.1.4" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, - {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, ] [package.dependencies] From 0edf1cacf72c2307b0a3611b6fbef1033cbc2b2a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 10:52:26 +0100 Subject: [PATCH 10/16] Bump types-jsonschema from 4.21.0.20240311 to 4.22.0.20240610 (#17288) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 76463d493c..028a11f527 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2808,13 +2808,13 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.21.0.20240311" +version = "4.22.0.20240610" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" files = [ - {file = "types-jsonschema-4.21.0.20240311.tar.gz", hash = "sha256:f7165ce70abd91df490c73b089873afd2899c5e56430ee495b64f851ad01f287"}, - {file = "types_jsonschema-4.21.0.20240311-py3-none-any.whl", hash = "sha256:e872f5661513824edf9698f73a66c9c114713d93eab58699bd0532e7e6db5750"}, + {file = "types-jsonschema-4.22.0.20240610.tar.gz", hash = "sha256:f82ab9fe756e3a2642ea9712c46b403ce61eb380b939b696cff3252af42f65b0"}, + {file = "types_jsonschema-4.22.0.20240610-py3-none-any.whl", hash = "sha256:89996b9bd1928f820a0e252b2844be21cd2e55d062b6fa1048d88453006ad89e"}, ] [package.dependencies] From f1c4dfb08b530f2bfaf9c6723ce69ccd231a3370 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 12 Jun 2024 04:27:46 -0600 Subject: [PATCH 11/16] Add report room API (MSC4151) (#17270) https://github.com/matrix-org/matrix-spec-proposals/pull/4151 This is intended to be enabled by default for immediate use. When FCP is complete, the unstable endpoint will be dropped and stable endpoint supported instead - no backwards compatibility is expected for the unstable endpoint. --- changelog.d/17270.feature | 1 + synapse/config/experimental.py | 3 + synapse/rest/__init__.py | 4 +- .../client/{report_event.py => reporting.py} | 57 +++++++++++- synapse/rest/client/versions.py | 2 + synapse/storage/databases/main/room.py | 32 +++++++ .../main/delta/85/06_add_room_reports.sql | 20 ++++ tests/rest/admin/test_event_reports.py | 6 +- ...test_report_event.py => test_reporting.py} | 93 ++++++++++++++++++- 9 files changed, 210 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17270.feature rename synapse/rest/client/{report_event.py => reporting.py} (65%) create mode 100644 synapse/storage/schema/main/delta/85/06_add_room_reports.sql rename tests/rest/client/{test_report_event.py => test_reporting.py} (64%) diff --git a/changelog.d/17270.feature b/changelog.d/17270.feature new file mode 100644 index 0000000000..4ea5e7be85 --- /dev/null +++ b/changelog.d/17270.feature @@ -0,0 +1 @@ +Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 75fe6d7b24..5fe5b951dd 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -443,3 +443,6 @@ class ExperimentalConfig(Config): self.msc3916_authenticated_media_enabled = experimental.get( "msc3916_authenticated_media_enabled", False ) + + # MSC4151: Report room API (Client-Server API) + self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 534dc0e276..0024ccf708 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -53,7 +53,7 @@ from synapse.rest.client import ( register, relations, rendezvous, - report_event, + reporting, room, room_keys, room_upgrade_rest_servlet, @@ -128,7 +128,7 @@ class ClientRestResource(JsonResource): tags.register_servlets(hs, client_resource) account_data.register_servlets(hs, client_resource) if is_main_process: - report_event.register_servlets(hs, client_resource) + reporting.register_servlets(hs, client_resource) openid.register_servlets(hs, client_resource) notifications.register_servlets(hs, client_resource) devices.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/reporting.py similarity index 65% rename from synapse/rest/client/report_event.py rename to synapse/rest/client/reporting.py index 447281931e..a95b83b14d 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/reporting.py @@ -23,17 +23,28 @@ import logging from http import HTTPStatus from typing import TYPE_CHECKING, Tuple +from synapse._pydantic_compat import HAS_PYDANTIC_V2 from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer -from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.servlet import ( + RestServlet, + parse_and_validate_json_object_from_request, + parse_json_object_from_request, +) from synapse.http.site import SynapseRequest from synapse.types import JsonDict +from synapse.types.rest import RequestBodyModel from ._base import client_patterns if TYPE_CHECKING: from synapse.server import HomeServer +if TYPE_CHECKING or HAS_PYDANTIC_V2: + from pydantic.v1 import StrictStr +else: + from pydantic import StrictStr + logger = logging.getLogger(__name__) @@ -95,5 +106,49 @@ class ReportEventRestServlet(RestServlet): return 200, {} +class ReportRoomRestServlet(RestServlet): + # https://github.com/matrix-org/matrix-spec-proposals/pull/4151 + PATTERNS = client_patterns( + "/org.matrix.msc4151/rooms/(?P[^/]*)/report$", + releases=[], + v1=False, + unstable=True, + ) + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.store = hs.get_datastores().main + + class PostBody(RequestBodyModel): + reason: StrictStr + + async def on_POST( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + + body = parse_and_validate_json_object_from_request(request, self.PostBody) + + room = await self.store.get_room(room_id) + if room is None: + raise NotFoundError("Room does not exist") + + await self.store.add_room_report( + room_id=room_id, + user_id=user_id, + reason=body.reason, + received_ts=self.clock.time_msec(), + ) + + return 200, {} + + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReportEventRestServlet(hs).register(http_server) + + if hs.config.experimental.msc4151_enabled: + ReportRoomRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 56de6906d0..f428158139 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -149,6 +149,8 @@ class VersionsRestServlet(RestServlet): is not None ) ), + # MSC4151: Report room API (Client-Server API) + "org.matrix.msc4151": self.config.experimental.msc4151_enabled, }, }, ) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 616c941687..b8a71c803e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2207,6 +2207,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): super().__init__(database, db_conn, hs) self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id") + self._room_reports_id_gen = IdGenerator(db_conn, "room_reports", "id") self._instance_name = hs.get_instance_name() @@ -2416,6 +2417,37 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): ) return next_id + async def add_room_report( + self, + room_id: str, + user_id: str, + reason: str, + received_ts: int, + ) -> int: + """Add a room report + + Args: + room_id: The room ID being reported. + user_id: User who reports the room. + reason: Description that the user specifies. + received_ts: Time when the user submitted the report (milliseconds). + Returns: + Id of the room report. + """ + next_id = self._room_reports_id_gen.get_next() + await self.db_pool.simple_insert( + table="room_reports", + values={ + "id": next_id, + "received_ts": received_ts, + "room_id": room_id, + "user_id": user_id, + "reason": reason, + }, + desc="add_room_report", + ) + return next_id + async def block_room(self, room_id: str, user_id: str) -> None: """Marks the room as blocked. diff --git a/synapse/storage/schema/main/delta/85/06_add_room_reports.sql b/synapse/storage/schema/main/delta/85/06_add_room_reports.sql new file mode 100644 index 0000000000..f7b45276cf --- /dev/null +++ b/synapse/storage/schema/main/delta/85/06_add_room_reports.sql @@ -0,0 +1,20 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +CREATE TABLE room_reports ( + id BIGINT NOT NULL PRIMARY KEY, + received_ts BIGINT NOT NULL, + room_id TEXT NOT NULL, + user_id TEXT NOT NULL, + reason TEXT NOT NULL +); diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index a0f978911a..feb410a11d 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -24,7 +24,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.errors import Codes -from synapse.rest.client import login, report_event, room +from synapse.rest.client import login, reporting, room from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -37,7 +37,7 @@ class EventReportsTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets, login.register_servlets, room.register_servlets, - report_event.register_servlets, + reporting.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -453,7 +453,7 @@ class EventReportDetailTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets, login.register_servlets, room.register_servlets, - report_event.register_servlets, + reporting.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_reporting.py similarity index 64% rename from tests/rest/client/test_report_event.py rename to tests/rest/client/test_reporting.py index 5903771e52..009deb9cb0 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_reporting.py @@ -22,7 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.rest.client import login, report_event, room +from synapse.rest.client import login, reporting, room from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -35,7 +35,7 @@ class ReportEventTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets, login.register_servlets, room.register_servlets, - report_event.register_servlets, + reporting.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -139,3 +139,92 @@ class ReportEventTestCase(unittest.HomeserverTestCase): "POST", self.report_path, data, access_token=self.other_user_tok ) self.assertEqual(response_status, channel.code, msg=channel.result["body"]) + + +class ReportRoomTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + reporting.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.report_path = ( + f"/_matrix/client/unstable/org.matrix.msc4151/rooms/{self.room_id}/report" + ) + + @unittest.override_config( + { + "experimental_features": {"msc4151_enabled": True}, + } + ) + def test_reason_str(self) -> None: + data = {"reason": "this makes me sad"} + self._assert_status(200, data) + + @unittest.override_config( + { + "experimental_features": {"msc4151_enabled": True}, + } + ) + def test_no_reason(self) -> None: + data = {"not_reason": "for typechecking"} + self._assert_status(400, data) + + @unittest.override_config( + { + "experimental_features": {"msc4151_enabled": True}, + } + ) + def test_reason_nonstring(self) -> None: + data = {"reason": 42} + self._assert_status(400, data) + + @unittest.override_config( + { + "experimental_features": {"msc4151_enabled": True}, + } + ) + def test_reason_null(self) -> None: + data = {"reason": None} + self._assert_status(400, data) + + @unittest.override_config( + { + "experimental_features": {"msc4151_enabled": True}, + } + ) + def test_cannot_report_nonexistent_room(self) -> None: + """ + Tests that we don't accept event reports for rooms which do not exist. + """ + channel = self.make_request( + "POST", + "/_matrix/client/unstable/org.matrix.msc4151/rooms/!bloop:example.org/report", + {"reason": "i am very sad"}, + access_token=self.other_user_tok, + shorthand=False, + ) + self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual( + "Room does not exist", + channel.json_body["error"], + msg=channel.result["body"], + ) + + def _assert_status(self, response_status: int, data: JsonDict) -> None: + channel = self.make_request( + "POST", + self.report_path, + data, + access_token=self.other_user_tok, + shorthand=False, + ) + self.assertEqual(response_status, channel.code, msg=channel.result["body"]) From 5db3eec5bcd3bc2b92c44e784264dfb5abaf89f6 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Thu, 13 Jun 2024 15:49:00 +0200 Subject: [PATCH 12/16] Clarify that MSC4151 is enabled on matrix.org (#17296) This clarifies in the comments that the MSC is being used in matrix.org See #17270 --- changelog.d/17296.feature | 1 + synapse/rest/client/reporting.py | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17296.feature diff --git a/changelog.d/17296.feature b/changelog.d/17296.feature new file mode 100644 index 0000000000..4ea5e7be85 --- /dev/null +++ b/changelog.d/17296.feature @@ -0,0 +1 @@ +Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API. diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py index a95b83b14d..4eee53e5a8 100644 --- a/synapse/rest/client/reporting.py +++ b/synapse/rest/client/reporting.py @@ -107,7 +107,15 @@ class ReportEventRestServlet(RestServlet): class ReportRoomRestServlet(RestServlet): - # https://github.com/matrix-org/matrix-spec-proposals/pull/4151 + """This endpoint lets clients report a room for abuse. + + Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org + for content moderation purposes, and therefore backwards compatibility should be + carefully considered when changing anything on this endpoint. + + More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151 + """ + PATTERNS = client_patterns( "/org.matrix.msc4151/rooms/(?P[^/]*)/report$", releases=[], From c6eb99c87861c9184be38107dcdf972bad6e1cf0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 13 Jun 2024 15:50:10 +0100 Subject: [PATCH 13/16] Bump `mypy` from 1.8.0 to 1.9.0 (#17297) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- changelog.d/17297.misc | 1 + poetry.lock | 56 +++++++++++++++---------------- tests/push/test_email.py | 37 +++++++++++++++----- tests/rest/client/test_account.py | 28 +++++++++++++--- 4 files changed, 82 insertions(+), 40 deletions(-) create mode 100644 changelog.d/17297.misc diff --git a/changelog.d/17297.misc b/changelog.d/17297.misc new file mode 100644 index 0000000000..7ec351d2c1 --- /dev/null +++ b/changelog.d/17297.misc @@ -0,0 +1 @@ +Bump `mypy` from 1.8.0 to 1.9.0. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 028a11f527..7b169ceb6e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1384,38 +1384,38 @@ files = [ [[package]] name = "mypy" -version = "1.8.0" +version = "1.9.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"}, - {file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"}, - {file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"}, - {file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"}, - {file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"}, - {file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"}, - {file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"}, - {file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"}, - {file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"}, - {file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"}, - {file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"}, - {file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"}, - {file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"}, - {file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"}, - {file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"}, - {file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"}, - {file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"}, - {file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"}, - {file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"}, - {file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"}, - {file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"}, - {file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"}, - {file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, + {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, + {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, + {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, + {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, + {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, + {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, + {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, + {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, + {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, + {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, + {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, + {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, + {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, + {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, + {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, + {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, + {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, ] [package.dependencies] diff --git a/tests/push/test_email.py b/tests/push/test_email.py index c927a73fa6..e0aab1c046 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -205,8 +205,24 @@ class EmailPusherTests(HomeserverTestCase): # Multipart: plain text, base 64 encoded; html, base 64 encoded multipart_msg = email.message_from_bytes(msg) - txt = multipart_msg.get_payload()[0].get_payload(decode=True).decode() - html = multipart_msg.get_payload()[1].get_payload(decode=True).decode() + + # Extract the text (non-HTML) portion of the multipart Message, + # as a Message. + txt_message = multipart_msg.get_payload(i=0) + assert isinstance(txt_message, email.message.Message) + + # Extract the actual bytes from the Message object, and decode them to a `str`. + txt_bytes = txt_message.get_payload(decode=True) + assert isinstance(txt_bytes, bytes) + txt = txt_bytes.decode() + + # Do the same for the HTML portion of the multipart Message. + html_message = multipart_msg.get_payload(i=1) + assert isinstance(html_message, email.message.Message) + html_bytes = html_message.get_payload(decode=True) + assert isinstance(html_bytes, bytes) + html = html_bytes.decode() + self.assertIn("/_synapse/client/unsubscribe", txt) self.assertIn("/_synapse/client/unsubscribe", html) @@ -347,12 +363,17 @@ class EmailPusherTests(HomeserverTestCase): # That email should contain the room's avatar msg: bytes = args[5] # Multipart: plain text, base 64 encoded; html, base 64 encoded - html = ( - email.message_from_bytes(msg) - .get_payload()[1] - .get_payload(decode=True) - .decode() - ) + + # Extract the html Message object from the Multipart Message. + # We need the asserts to convince mypy that this is OK. + html_message = email.message_from_bytes(msg).get_payload(i=1) + assert isinstance(html_message, email.message.Message) + + # Extract the `bytes` from the html Message object, and decode to a `str`. + html = html_message.get_payload(decode=True) + assert isinstance(html, bytes) + html = html.decode() + self.assertIn("_matrix/media/v1/thumbnail/DUMMY_MEDIA_ID", html) def test_empty_room(self) -> None: diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 992421ffe2..a85ea994de 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -427,13 +427,23 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): text = None for part in mail.walk(): if part.get_content_type() == "text/plain": - text = part.get_payload(decode=True).decode("UTF-8") + text = part.get_payload(decode=True) + if text is not None: + # According to the logic table in `get_payload`, we know that + # the result of `get_payload` will be `bytes`, but mypy doesn't + # know this and complains. Thus, we assert the type. + assert isinstance(text, bytes) + text = text.decode("UTF-8") + break if not text: self.fail("Could not find text portion of email to parse") - assert text is not None + # `text` must be a `str`, after being decoded and determined just above + # to not be `None` or an empty `str`. + assert isinstance(text, str) + match = re.search(r"https://example.com\S+", text) assert match, "Could not find link in email" @@ -1209,13 +1219,23 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): text = None for part in mail.walk(): if part.get_content_type() == "text/plain": - text = part.get_payload(decode=True).decode("UTF-8") + text = part.get_payload(decode=True) + if text is not None: + # According to the logic table in `get_payload`, we know that + # the result of `get_payload` will be `bytes`, but mypy doesn't + # know this and complains. Thus, we assert the type. + assert isinstance(text, bytes) + text = text.decode("UTF-8") + break if not text: self.fail("Could not find text portion of email to parse") - assert text is not None + # `text` must be a `str`, after being decoded and determined just above + # to not be `None` or an empty `str`. + assert isinstance(text, str) + match = re.search(r"https://example.com\S+", text) assert match, "Could not find link in email" From ebdce69f6af3863c9db2c00d6f78eae7ec9433f5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 13 Jun 2024 11:00:52 -0500 Subject: [PATCH 14/16] Fix `get_last_event_in_room_before_stream_ordering(...)` finding the wrong last event (#17295) PR where this was introduced: https://github.com/matrix-org/synapse/pull/14817 ### What does this affect? `get_last_event_in_room_before_stream_ordering(...)` is used in Sync v2 in a lot of different state calculations. `get_last_event_in_room_before_stream_ordering(...)` is also used in `/rooms/{roomId}/members` --- changelog.d/17295.bugfix | 1 + synapse/storage/databases/main/stream.py | 30 ++- tests/storage/test_stream.py | 269 ++++++++++++++++++++++- 3 files changed, 289 insertions(+), 11 deletions(-) create mode 100644 changelog.d/17295.bugfix diff --git a/changelog.d/17295.bugfix b/changelog.d/17295.bugfix new file mode 100644 index 0000000000..4484253bb8 --- /dev/null +++ b/changelog.d/17295.bugfix @@ -0,0 +1 @@ +Fix edge case in `/sync` returning the wrong the state when using sharded event persisters. diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 7ab6003f61..61373f0bfb 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -914,12 +914,23 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def get_last_event_in_room_before_stream_ordering_txn( txn: LoggingTransaction, ) -> Optional[str]: - # We need to handle the fact that the stream tokens can be vector - # clocks. We do this by getting all rows between the minimum and - # maximum stream ordering in the token, plus one row less than the - # minimum stream ordering. We then filter the results against the - # token and return the first row that matches. + # We're looking for the closest event at or before the token. We need to + # handle the fact that the stream token can be a vector clock (with an + # `instance_map`) and events can be persisted on different instances + # (sharded event persisters). The first subquery handles the events that + # would be within the vector clock and gets all rows between the minimum and + # maximum stream ordering in the token which need to be filtered against the + # `instance_map`. The second subquery handles the "before" case and finds + # the first row before the token. We then filter out any results past the + # token's vector clock and return the first row that matches. + min_stream = end_token.stream + max_stream = end_token.get_max_stream_pos() + # We use `union all` because we don't need any of the deduplication logic + # (`union` is really a union + distinct). `UNION ALL` does preserve the + # ordering of the operand queries but there is no actual gurantee that it + # has this behavior in all scenarios so we need the extra `ORDER BY` at the + # bottom. sql = """ SELECT * FROM ( SELECT instance_name, stream_ordering, topological_ordering, event_id @@ -931,7 +942,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): AND rejections.event_id IS NULL ORDER BY stream_ordering DESC ) AS a - UNION + UNION ALL SELECT * FROM ( SELECT instance_name, stream_ordering, topological_ordering, event_id FROM events @@ -943,15 +954,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ORDER BY stream_ordering DESC LIMIT 1 ) AS b + ORDER BY stream_ordering DESC """ txn.execute( sql, ( room_id, - end_token.stream, - end_token.get_max_stream_pos(), + min_stream, + max_stream, room_id, - end_token.stream, + min_stream, ), ) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 2029cd9c68..ee34baf46f 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -19,7 +19,10 @@ # # -from typing import List +import logging +from typing import List, Tuple + +from immutabledict import immutabledict from twisted.test.proto_helpers import MemoryReactor @@ -28,11 +31,13 @@ from synapse.api.filtering import Filter from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer -from synapse.types import JsonDict +from synapse.types import JsonDict, PersistedEventPosition, RoomStreamToken from synapse.util import Clock from tests.unittest import HomeserverTestCase +logger = logging.getLogger(__name__) + class PaginationTestCase(HomeserverTestCase): """ @@ -268,3 +273,263 @@ class PaginationTestCase(HomeserverTestCase): } chunk = self._filter_messages(filter) self.assertEqual(chunk, [self.event_id_1, self.event_id_2, self.event_id_none]) + + +class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase): + """ + Test `get_last_event_in_room_before_stream_ordering(...)` + """ + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.event_sources = hs.get_event_sources() + + def _update_persisted_instance_name_for_event( + self, event_id: str, instance_name: str + ) -> None: + """ + Update the `instance_name` that persisted the the event in the database. + """ + return self.get_success( + self.store.db_pool.simple_update_one( + "events", + keyvalues={"event_id": event_id}, + updatevalues={"instance_name": instance_name}, + ) + ) + + def _send_event_on_instance( + self, instance_name: str, room_id: str, access_token: str + ) -> Tuple[JsonDict, PersistedEventPosition]: + """ + Send an event in a room and mimic that it was persisted by a specific + instance/worker. + """ + event_response = self.helper.send( + room_id, f"{instance_name} message", tok=access_token + ) + + self._update_persisted_instance_name_for_event( + event_response["event_id"], instance_name + ) + + event_pos = self.get_success( + self.store.get_position_for_event(event_response["event_id"]) + ) + + return event_response, event_pos + + def test_before_room_created(self) -> None: + """ + Test that no event is returned if we are using a token before the room was even created + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + before_room_token = self.event_sources.get_current_token() + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + + last_event = self.get_success( + self.store.get_last_event_in_room_before_stream_ordering( + room_id=room_id, + end_token=before_room_token.room_key, + ) + ) + + self.assertIsNone(last_event) + + def test_after_room_created(self) -> None: + """ + Test that an event is returned if we are using a token after the room was created + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + + after_room_token = self.event_sources.get_current_token() + + last_event = self.get_success( + self.store.get_last_event_in_room_before_stream_ordering( + room_id=room_id, + end_token=after_room_token.room_key, + ) + ) + + self.assertIsNotNone(last_event) + + def test_activity_in_other_rooms(self) -> None: + """ + Test to make sure that the last event in the room is returned even if the + `stream_ordering` has advanced from activity in other rooms. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response = self.helper.send(room_id1, "target!", tok=user1_tok) + # Create another room to advance the stream_ordering + self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + + after_room_token = self.event_sources.get_current_token() + + last_event = self.get_success( + self.store.get_last_event_in_room_before_stream_ordering( + room_id=room_id1, + end_token=after_room_token.room_key, + ) + ) + + # Make sure it's the event we expect (which also means we know it's from the + # correct room) + self.assertEqual(last_event, event_response["event_id"]) + + def test_activity_after_token_has_no_effect(self) -> None: + """ + Test to make sure we return the last event before the token even if there is + activity after it. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response = self.helper.send(room_id1, "target!", tok=user1_tok) + + after_room_token = self.event_sources.get_current_token() + + # Send some events after the token + self.helper.send(room_id1, "after1", tok=user1_tok) + self.helper.send(room_id1, "after2", tok=user1_tok) + + last_event = self.get_success( + self.store.get_last_event_in_room_before_stream_ordering( + room_id=room_id1, + end_token=after_room_token.room_key, + ) + ) + + # Make sure it's the last event before the token + self.assertEqual(last_event, event_response["event_id"]) + + def test_last_event_within_sharded_token(self) -> None: + """ + Test to make sure we can find the last event that that is *within* the sharded + token (a token that has an `instance_map` and looks like + `m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`). We are specifically testing + that we can find an event within the tokens minimum and instance + `stream_ordering`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response1, event_pos1 = self._send_event_on_instance( + "worker1", room_id1, user1_tok + ) + event_response2, event_pos2 = self._send_event_on_instance( + "worker1", room_id1, user1_tok + ) + event_response3, event_pos3 = self._send_event_on_instance( + "worker1", room_id1, user1_tok + ) + + # Create another room to advance the `stream_ordering` on the same worker + # so we can sandwich event3 in the middle of the token + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response4, event_pos4 = self._send_event_on_instance( + "worker1", room_id2, user1_tok + ) + + # Assemble a token that encompasses event1 -> event4 on worker1 + end_token = RoomStreamToken( + stream=event_pos2.stream, + instance_map=immutabledict({"worker1": event_pos4.stream}), + ) + + # Send some events after the token + self.helper.send(room_id1, "after1", tok=user1_tok) + self.helper.send(room_id1, "after2", tok=user1_tok) + + last_event = self.get_success( + self.store.get_last_event_in_room_before_stream_ordering( + room_id=room_id1, + end_token=end_token, + ) + ) + + # Should find closest event at/before the token in room1 + self.assertEqual( + last_event, + event_response3["event_id"], + f"We expected {event_response3['event_id']} but saw {last_event} which corresponds to " + + str( + { + "event1": event_response1["event_id"], + "event2": event_response2["event_id"], + "event3": event_response3["event_id"], + } + ), + ) + + def test_last_event_before_sharded_token(self) -> None: + """ + Test to make sure we can find the last event that is *before* the sharded token + (a token that has an `instance_map` and looks like + `m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`). + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response1, event_pos1 = self._send_event_on_instance( + "worker1", room_id1, user1_tok + ) + event_response2, event_pos2 = self._send_event_on_instance( + "worker1", room_id1, user1_tok + ) + + # Create another room to advance the `stream_ordering` on the same worker + room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True) + event_response3, event_pos3 = self._send_event_on_instance( + "worker1", room_id2, user1_tok + ) + event_response4, event_pos4 = self._send_event_on_instance( + "worker1", room_id2, user1_tok + ) + + # Assemble a token that encompasses event3 -> event4 on worker1 + end_token = RoomStreamToken( + stream=event_pos3.stream, + instance_map=immutabledict({"worker1": event_pos4.stream}), + ) + + # Send some events after the token + self.helper.send(room_id1, "after1", tok=user1_tok) + self.helper.send(room_id1, "after2", tok=user1_tok) + + last_event = self.get_success( + self.store.get_last_event_in_room_before_stream_ordering( + room_id=room_id1, + end_token=end_token, + ) + ) + + # Should find closest event at/before the token in room1 + self.assertEqual( + last_event, + event_response2["event_id"], + f"We expected {event_response2['event_id']} but saw {last_event} which corresponds to " + + str( + { + "event1": event_response1["event_id"], + "event2": event_response2["event_id"], + } + ), + ) From 8c58eb7f17bdc697e653c7920edab42ee36f975b Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 13 Jun 2024 11:32:50 -0500 Subject: [PATCH 15/16] Add `event.internal_metadata.instance_name` (#17300) Add `event.internal_metadata.instance_name` (the worker instance that persisted the event) to go alongside the existing `event.internal_metadata.stream_ordering`. `instance_name` is useful to properly compare and query for events with a token since you need to compare both the `stream_ordering` and `instance_name` against the vector clock/`instance_map` in the `RoomStreamToken`. This is pre-requisite work and may be used in https://github.com/element-hq/synapse/pull/17293 Adding `event.internal_metadata.instance_name` was first mentioned in the initial Sliding Sync PR while pairing with @erikjohnston, see https://github.com/element-hq/synapse/pull/17187/commits/09609cb0dbca3a4cfd9fbf90cc962e765ec469c0#diff-5cd773fb307aa754bd3948871ba118b1ef0303f4d72d42a2d21e38242bf4e096R405-R410 --- changelog.d/17300.misc | 1 + rust/src/events/internal_metadata.rs | 3 +++ synapse/events/utils.py | 2 ++ synapse/handlers/message.py | 1 + synapse/storage/databases/main/events.py | 1 + synapse/storage/databases/main/events_worker.py | 16 ++++++++++------ synapse/synapse_rust/events.pyi | 2 ++ tests/events/test_utils.py | 3 +++ tests/replication/storage/test_events.py | 10 +++++++--- tests/storage/test_event_chain.py | 1 + 10 files changed, 31 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17300.misc diff --git a/changelog.d/17300.misc b/changelog.d/17300.misc new file mode 100644 index 0000000000..cdc40bb2e5 --- /dev/null +++ b/changelog.d/17300.misc @@ -0,0 +1 @@ +Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`. diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs index 63774fbd54..ad87825f16 100644 --- a/rust/src/events/internal_metadata.rs +++ b/rust/src/events/internal_metadata.rs @@ -204,6 +204,8 @@ pub struct EventInternalMetadata { /// The stream ordering of this event. None, until it has been persisted. #[pyo3(get, set)] stream_ordering: Option, + #[pyo3(get, set)] + instance_name: Option, /// whether this event is an outlier (ie, whether we have the state at that /// point in the DAG) @@ -232,6 +234,7 @@ impl EventInternalMetadata { Ok(EventInternalMetadata { data, stream_ordering: None, + instance_name: None, outlier: false, }) } diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 0772472312..b997d82d71 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -90,6 +90,7 @@ def prune_event(event: EventBase) -> EventBase: pruned_event.internal_metadata.stream_ordering = ( event.internal_metadata.stream_ordering ) + pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name pruned_event.internal_metadata.outlier = event.internal_metadata.outlier # Mark the event as redacted @@ -116,6 +117,7 @@ def clone_event(event: EventBase) -> EventBase: new_event.internal_metadata.stream_ordering = ( event.internal_metadata.stream_ordering ) + new_event.internal_metadata.instance_name = event.internal_metadata.instance_name new_event.internal_metadata.outlier = event.internal_metadata.outlier return new_event diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index de5bd44a5f..721ef04f41 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1551,6 +1551,7 @@ class EventCreationHandler: # stream_ordering entry manually (as it was persisted on # another worker). event.internal_metadata.stream_ordering = stream_id + event.internal_metadata.instance_name = writer_instance return event diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index f1bd85aa27..66428e6c8e 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -207,6 +207,7 @@ class PersistEventsStore: async with stream_ordering_manager as stream_orderings: for (event, _), stream in zip(events_and_contexts, stream_orderings): event.internal_metadata.stream_ordering = stream + event.internal_metadata.instance_name = self._instance_name await self.db_pool.runInteraction( "persist_events", diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c06c44deb1..e264d36f02 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -156,6 +156,7 @@ class _EventRow: event_id: str stream_ordering: int + instance_name: str json: str internal_metadata: str format_version: Optional[int] @@ -1354,6 +1355,7 @@ class EventsWorkerStore(SQLBaseStore): rejected_reason=rejected_reason, ) original_ev.internal_metadata.stream_ordering = row.stream_ordering + original_ev.internal_metadata.instance_name = row.instance_name original_ev.internal_metadata.outlier = row.outlier # Consistency check: if the content of the event has been modified in the @@ -1439,6 +1441,7 @@ class EventsWorkerStore(SQLBaseStore): SELECT e.event_id, e.stream_ordering, + e.instance_name, ej.internal_metadata, ej.json, ej.format_version, @@ -1462,13 +1465,14 @@ class EventsWorkerStore(SQLBaseStore): event_dict[event_id] = _EventRow( event_id=event_id, stream_ordering=row[1], - internal_metadata=row[2], - json=row[3], - format_version=row[4], - room_version_id=row[5], - rejected_reason=row[6], + instance_name=row[2], + internal_metadata=row[3], + json=row[4], + format_version=row[5], + room_version_id=row[6], + rejected_reason=row[7], redactions=[], - outlier=bool(row[7]), # This is an int in SQLite3 + outlier=bool(row[8]), # This is an int in SQLite3 ) # check for redactions diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi index 69837617f5..1682d0d151 100644 --- a/synapse/synapse_rust/events.pyi +++ b/synapse/synapse_rust/events.pyi @@ -19,6 +19,8 @@ class EventInternalMetadata: stream_ordering: Optional[int] """the stream ordering of this event. None, until it has been persisted.""" + instance_name: Optional[str] + """the instance name of the server that persisted this event. None, until it has been persisted.""" outlier: bool """whether this event is an outlier (ie, whether we have the state at that diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index d5ac66a6ed..30f8787758 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -625,6 +625,8 @@ class CloneEventTestCase(stdlib_unittest.TestCase): ) original.internal_metadata.stream_ordering = 1234 self.assertEqual(original.internal_metadata.stream_ordering, 1234) + original.internal_metadata.instance_name = "worker1" + self.assertEqual(original.internal_metadata.instance_name, "worker1") cloned = clone_event(original) cloned.unsigned["b"] = 3 @@ -632,6 +634,7 @@ class CloneEventTestCase(stdlib_unittest.TestCase): self.assertEqual(original.unsigned, {"a": 1, "b": 2}) self.assertEqual(cloned.unsigned, {"a": 1, "b": 3}) self.assertEqual(cloned.internal_metadata.stream_ordering, 1234) + self.assertEqual(cloned.internal_metadata.instance_name, "worker1") self.assertEqual(cloned.internal_metadata.txn_id, "txn") diff --git a/tests/replication/storage/test_events.py b/tests/replication/storage/test_events.py index 4e41a1c912..a56f1e2d5d 100644 --- a/tests/replication/storage/test_events.py +++ b/tests/replication/storage/test_events.py @@ -141,6 +141,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): self.persist(type="m.room.create", key="", creator=USER_ID) self.check("get_invited_rooms_for_local_user", [USER_ID_2], []) event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") + assert event.internal_metadata.instance_name is not None assert event.internal_metadata.stream_ordering is not None self.replicate() @@ -155,7 +156,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): "invite", event.event_id, PersistedEventPosition( - self.hs.get_instance_name(), + event.internal_metadata.instance_name, event.internal_metadata.stream_ordering, ), RoomVersions.V1.identifier, @@ -232,11 +233,12 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): j2 = self.persist( type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" ) + assert j2.internal_metadata.instance_name is not None assert j2.internal_metadata.stream_ordering is not None self.replicate() expected_pos = PersistedEventPosition( - "master", j2.internal_metadata.stream_ordering + j2.internal_metadata.instance_name, j2.internal_metadata.stream_ordering ) self.check( "get_rooms_for_user_with_stream_ordering", @@ -288,6 +290,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): msg, msgctx = self.build_event() self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)])) self.replicate() + assert j2.internal_metadata.instance_name is not None assert j2.internal_metadata.stream_ordering is not None event_source = RoomEventSource(self.hs) @@ -329,7 +332,8 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase): # joined_rooms list. if membership_changes: expected_pos = PersistedEventPosition( - "master", j2.internal_metadata.stream_ordering + j2.internal_metadata.instance_name, + j2.internal_metadata.stream_ordering, ) self.assertEqual( joined_rooms, diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 27d5b0125f..81feb3ec29 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -431,6 +431,7 @@ class EventChainStoreTestCase(HomeserverTestCase): for e in events: e.internal_metadata.stream_ordering = self._next_stream_ordering + e.internal_metadata.instance_name = self.hs.get_instance_name() self._next_stream_ordering += 1 def _persist(txn: LoggingTransaction) -> None: From 8aaff851b1f1dbf74482282e70194a69d13ea584 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 13 Jun 2024 11:36:57 -0500 Subject: [PATCH 16/16] Fix `newly_left` rooms not appearing if we returned early (Sliding Sync) (#17301) Fix `newly_left` rooms not appearing if we returned early when `membership_snapshot_token.is_before_or_eq(to_token.room_key)`. Introduced in https://github.com/element-hq/synapse/pull/17187 (part of Sliding Sync) The tests didn't catch it because they had a small typo in it `room_id1` vs `room_id2`. Found while working on https://github.com/element-hq/synapse/pull/17293 --- changelog.d/17301.bugfix | 1 + synapse/handlers/sliding_sync.py | 26 +++++++++++++------------- tests/handlers/test_sliding_sync.py | 2 +- 3 files changed, 15 insertions(+), 14 deletions(-) create mode 100644 changelog.d/17301.bugfix diff --git a/changelog.d/17301.bugfix b/changelog.d/17301.bugfix new file mode 100644 index 0000000000..50383cb4a4 --- /dev/null +++ b/changelog.d/17301.bugfix @@ -0,0 +1 @@ +Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. diff --git a/synapse/handlers/sliding_sync.py b/synapse/handlers/sliding_sync.py index 1c37f83a2b..de4f33abb8 100644 --- a/synapse/handlers/sliding_sync.py +++ b/synapse/handlers/sliding_sync.py @@ -275,12 +275,6 @@ class SlidingSyncHandler: instance_map=immutabledict(instance_to_max_stream_ordering_map), ) - # If our `to_token` is already the same or ahead of the latest room membership - # for the user, we can just straight-up return the room list (nothing has - # changed) - if membership_snapshot_token.is_before_or_eq(to_token.room_key): - return sync_room_id_set - # Since we fetched the users room list at some point in time after the from/to # tokens, we need to revert/rewind some membership changes to match the point in # time of the `to_token`. In particular, we need to make these fixups: @@ -300,14 +294,20 @@ class SlidingSyncHandler: # 1) Fetch membership changes that fall in the range from `to_token` up to # `membership_snapshot_token` - membership_change_events_after_to_token = ( - await self.store.get_membership_changes_for_user( - user_id, - from_key=to_token.room_key, - to_key=membership_snapshot_token, - excluded_rooms=self.rooms_to_exclude_globally, + # + # If our `to_token` is already the same or ahead of the latest room membership + # for the user, we don't need to do any "2)" fix-ups and can just straight-up + # use the room list from the snapshot as a base (nothing has changed) + membership_change_events_after_to_token = [] + if not membership_snapshot_token.is_before_or_eq(to_token.room_key): + membership_change_events_after_to_token = ( + await self.store.get_membership_changes_for_user( + user_id, + from_key=to_token.room_key, + to_key=membership_snapshot_token, + excluded_rooms=self.rooms_to_exclude_globally, + ) ) - ) # 1) Assemble a list of the last membership events in some given ranges. Someone # could have left and joined multiple times during the given range but we only diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 5c27474b96..41ceb517f0 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -326,7 +326,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase): # Leave during the from_token/to_token range (newly_left) room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok) - self.helper.leave(room_id1, user1_id, tok=user1_tok) + self.helper.leave(room_id2, user1_id, tok=user1_tok) after_room2_token = self.event_sources.get_current_token()