Merge branch 'release-v1.79' into develop

This commit is contained in:
David Robertson 2023-03-13 18:35:21 +00:00
commit a1c9869394
No known key found for this signature in database
GPG key ID: 903ECE108A39DEDD
11 changed files with 271 additions and 44 deletions

View file

@ -1,3 +1,19 @@
Synapse 1.79.0rc2 (2023-03-13)
==============================
Bugfixes
--------
- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227))
- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248))
Internal Changes
----------------
- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240))
Synapse 1.79.0rc1 (2023-03-07) Synapse 1.79.0rc1 (2023-03-07)
============================== ==============================
@ -47,7 +63,7 @@ Improved Documentation
Deprecations and Removals Deprecations and Removals
------------------------- -------------------------
- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044] - Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044))
- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) - Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093))
- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) - Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189))
- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) - Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137))

6
debian/changelog vendored
View file

@ -1,3 +1,9 @@
matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium
* New Synapse release 1.79.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 13 Mar 2023 12:54:21 +0000
matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium
* New Synapse release 1.79.0rc1. * New Synapse release 1.79.0rc1.

View file

@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry] [tool.poetry]
name = "matrix-synapse" name = "matrix-synapse"
version = "1.79.0rc1" version = "1.79.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol" description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0" license = "Apache-2.0"

View file

@ -247,6 +247,11 @@ class ThirdPartyEventRules:
on_add_user_third_party_identifier on_add_user_third_party_identifier
) )
if on_remove_user_third_party_identifier is not None:
self._on_remove_user_third_party_identifier_callbacks.append(
on_remove_user_third_party_identifier
)
async def check_event_allowed( async def check_event_allowed(
self, self,
event: EventBase, event: EventBase,

View file

@ -497,8 +497,8 @@ class PerDestinationQueue:
# #
# Note: `catchup_pdus` will have exactly one PDU per room. # Note: `catchup_pdus` will have exactly one PDU per room.
for pdu in catchup_pdus: for pdu in catchup_pdus:
# The PDU from the DB will be the last PDU in the room from # The PDU from the DB will be the newest PDU in the room from
# *this server* that wasn't sent to the remote. However, other # *this server* that we tried---but were unable---to send to the remote.
# servers may have sent lots of events since then, and we want # servers may have sent lots of events since then, and we want
# to try and tell the remote only about the *latest* events in # to try and tell the remote only about the *latest* events in
# the room. This is so that it doesn't get inundated by events # the room. This is so that it doesn't get inundated by events
@ -516,6 +516,11 @@ class PerDestinationQueue:
# If the event is in the extremities, then great! We can just # If the event is in the extremities, then great! We can just
# use that without having to do further checks. # use that without having to do further checks.
room_catchup_pdus = [pdu] room_catchup_pdus = [pdu]
elif await self._store.is_partial_state_room(pdu.room_id):
# We can't be sure which events the destination should
# see using only partial state. Avoid doing so, and just retry
# sending our the newest PDU the remote is missing from us.
room_catchup_pdus = [pdu]
else: else:
# If not, fetch the extremities and figure out which we can # If not, fetch the extremities and figure out which we can
# send. # send.
@ -547,6 +552,8 @@ class PerDestinationQueue:
self._server_name, self._server_name,
new_pdus, new_pdus,
redact=False, redact=False,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
# If we've filtered out all the extremities, fall back to # If we've filtered out all the extremities, fall back to

View file

@ -392,7 +392,7 @@ class FederationHandler:
get_prev_content=False, get_prev_content=False,
) )
# We set `check_history_visibility_only` as we might otherwise get false # We unset `filter_out_erased_senders` as we might otherwise get false
# positives from users having been erased. # positives from users having been erased.
filtered_extremities = await filter_events_for_server( filtered_extremities = await filter_events_for_server(
self._storage_controllers, self._storage_controllers,
@ -400,7 +400,8 @@ class FederationHandler:
self.server_name, self.server_name,
events_to_check, events_to_check,
redact=False, redact=False,
check_history_visibility_only=True, filter_out_erased_senders=False,
filter_out_remote_partial_state_events=False,
) )
if filtered_extremities: if filtered_extremities:
extremities_to_request.append(bp.event_id) extremities_to_request.append(bp.event_id)
@ -1331,7 +1332,13 @@ class FederationHandler:
) )
events = await filter_events_for_server( events = await filter_events_for_server(
self._storage_controllers, origin, self.server_name, events self._storage_controllers,
origin,
self.server_name,
events,
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
return events return events
@ -1362,7 +1369,13 @@ class FederationHandler:
await self._event_auth_handler.assert_host_in_room(event.room_id, origin) await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
events = await filter_events_for_server( events = await filter_events_for_server(
self._storage_controllers, origin, self.server_name, [event] self._storage_controllers,
origin,
self.server_name,
[event],
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
event = events[0] event = events[0]
return event return event
@ -1390,7 +1403,13 @@ class FederationHandler:
) )
missing_events = await filter_events_for_server( missing_events = await filter_events_for_server(
self._storage_controllers, origin, self.server_name, missing_events self._storage_controllers,
origin,
self.server_name,
missing_events,
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
return missing_events return missing_events

View file

@ -14,7 +14,17 @@
# limitations under the License. # limitations under the License.
import logging import logging
from enum import Enum, auto from enum import Enum, auto
from typing import Collection, Dict, FrozenSet, List, Optional, Tuple from typing import (
Collection,
Dict,
FrozenSet,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
)
import attr import attr
from typing_extensions import Final from typing_extensions import Final
@ -565,29 +575,43 @@ async def filter_events_for_server(
storage: StorageControllers, storage: StorageControllers,
target_server_name: str, target_server_name: str,
local_server_name: str, local_server_name: str,
events: List[EventBase], events: Sequence[EventBase],
redact: bool = True, *,
check_history_visibility_only: bool = False, redact: bool,
filter_out_erased_senders: bool,
filter_out_remote_partial_state_events: bool,
) -> List[EventBase]: ) -> List[EventBase]:
"""Filter a list of events based on whether given server is allowed to """Filter a list of events based on whether the target server is allowed to
see them. see them.
For a fully stated room, the target server is allowed to see an event E if:
- the state at E has world readable or shared history vis, OR
- the state at E says that the target server is in the room.
For a partially stated room, the target server is allowed to see E if:
- E was created by this homeserver, AND:
- the partial state at E has world readable or shared history vis, OR
- the partial state at E says that the target server is in the room.
TODO: state before or state after?
Args: Args:
storage storage
server_name target_server_name
local_server_name
events events
redact: Whether to return a redacted version of the event, or redact: Controls what to do with events which have been filtered out.
to filter them out entirely. If True, include their redacted forms; if False, omit them entirely.
check_history_visibility_only: Whether to only check the filter_out_erased_senders: If true, also filter out events whose sender has been
history visibility, rather than things like if the sender has been
erased. This is used e.g. during pagination to decide whether to erased. This is used e.g. during pagination to decide whether to
backfill or not. backfill or not.
filter_out_remote_partial_state_events: If True, also filter out events in
partial state rooms created by other homeservers.
Returns Returns
The filtered events. The filtered events.
""" """
def is_sender_erased(event: EventBase, erased_senders: Dict[str, bool]) -> bool: def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool:
if erased_senders and erased_senders[event.sender]: if erased_senders and erased_senders[event.sender]:
logger.info("Sender of %s has been erased, redacting", event.event_id) logger.info("Sender of %s has been erased, redacting", event.event_id)
return True return True
@ -616,7 +640,7 @@ async def filter_events_for_server(
# server has no users in the room: redact # server has no users in the room: redact
return False return False
if not check_history_visibility_only: if filter_out_erased_senders:
erased_senders = await storage.main.are_users_erased(e.sender for e in events) erased_senders = await storage.main.are_users_erased(e.sender for e in events)
else: else:
# We don't want to check whether users are erased, which is equivalent # We don't want to check whether users are erased, which is equivalent
@ -631,15 +655,15 @@ async def filter_events_for_server(
# otherwise a room could be fully joined after we retrieve those, which would then bypass # otherwise a room could be fully joined after we retrieve those, which would then bypass
# this check but would base the filtering on an outdated view of the membership events. # this check but would base the filtering on an outdated view of the membership events.
partial_state_invisible_events = set() partial_state_invisible_event_ids: Set[str] = set()
if not check_history_visibility_only: if filter_out_remote_partial_state_events:
for e in events: for e in events:
sender_domain = get_domain_from_id(e.sender) sender_domain = get_domain_from_id(e.sender)
if ( if (
sender_domain != local_server_name sender_domain != local_server_name
and await storage.main.is_partial_state_room(e.room_id) and await storage.main.is_partial_state_room(e.room_id)
): ):
partial_state_invisible_events.add(e) partial_state_invisible_event_ids.add(e.event_id)
# Let's check to see if all the events have a history visibility # Let's check to see if all the events have a history visibility
# of "shared" or "world_readable". If that's the case then we don't # of "shared" or "world_readable". If that's the case then we don't
@ -658,17 +682,20 @@ async def filter_events_for_server(
target_server_name, target_server_name,
) )
to_return = [] def include_event_in_output(e: EventBase) -> bool:
for e in events:
erased = is_sender_erased(e, erased_senders) erased = is_sender_erased(e, erased_senders)
visible = check_event_is_visible( visible = check_event_is_visible(
event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {}) event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
) )
if e in partial_state_invisible_events: if e.event_id in partial_state_invisible_event_ids:
visible = False visible = False
if visible and not erased: return visible and not erased
to_return = []
for e in events:
if include_event_in_output(e):
to_return.append(e) to_return.append(e)
elif redact: elif redact:
to_return.append(prune_event(e)) to_return.append(prune_event(e))

View file

@ -1,4 +1,5 @@
from typing import Callable, List, Optional, Tuple from typing import Callable, Collection, List, Optional, Tuple
from unittest import mock
from unittest.mock import Mock from unittest.mock import Mock
from twisted.test.proto_helpers import MemoryReactor from twisted.test.proto_helpers import MemoryReactor
@ -500,3 +501,87 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase):
self.assertEqual(len(sent_pdus), 1) self.assertEqual(len(sent_pdus), 1)
self.assertEqual(sent_pdus[0].event_id, event_2.event_id) self.assertEqual(sent_pdus[0].event_id, event_2.event_id)
self.assertFalse(per_dest_queue._catching_up) self.assertFalse(per_dest_queue._catching_up)
def test_catch_up_is_not_blocked_by_remote_event_in_partial_state_room(
self,
) -> None:
"""Detects (part of?) https://github.com/matrix-org/synapse/issues/15220."""
# ARRANGE:
# - a local user (u1)
# - a room which contains u1 and two remote users, @u2:host2 and @u3:other
# - events in that room such that
# - history visibility is restricted
# - u1 sent message events e1 and e2
# - afterwards, u3 sent a remote event e3
# - catchup to begin for host2; last successfully sent event was e1
per_dest_queue, sent_pdus = self.make_fake_destination_queue()
self.register_user("u1", "you the one")
u1_token = self.login("u1", "you the one")
room = self.helper.create_room_as("u1", tok=u1_token)
self.helper.send_state(
room_id=room,
event_type="m.room.history_visibility",
body={"history_visibility": "joined"},
tok=u1_token,
)
self.get_success(
event_injection.inject_member_event(self.hs, room, "@u2:host2", "join")
)
self.get_success(
event_injection.inject_member_event(self.hs, room, "@u3:other", "join")
)
# create some events
event_id_1 = self.helper.send(room, "hello", tok=u1_token)["event_id"]
event_id_2 = self.helper.send(room, "world", tok=u1_token)["event_id"]
# pretend that u3 changes their displayname
event_id_3 = self.get_success(
event_injection.inject_member_event(self.hs, room, "@u3:other", "join")
).event_id
# destination_rooms should already be populated, but let us pretend that we already
# sent (successfully) up to and including event id 1
event_1 = self.get_success(self.hs.get_datastores().main.get_event(event_id_1))
assert event_1.internal_metadata.stream_ordering is not None
self.get_success(
self.hs.get_datastores().main.set_destination_last_successful_stream_ordering(
"host2", event_1.internal_metadata.stream_ordering
)
)
# also fetch event 2 so we can compare its stream ordering to the sender's
# last_successful_stream_ordering later
event_2 = self.get_success(self.hs.get_datastores().main.get_event(event_id_2))
# Mock event 3 as having partial state
self.get_success(
event_injection.mark_event_as_partial_state(self.hs, event_id_3, room)
)
# Fail the test if we block on full state for event 3.
async def mock_await_full_state(event_ids: Collection[str]) -> None:
if event_id_3 in event_ids:
raise AssertionError("Tried to await full state for event_id_3")
# ACT
with mock.patch.object(
self.hs.get_storage_controllers().state._partial_state_events_tracker,
"await_full_state",
mock_await_full_state,
):
self.get_success(per_dest_queue._catch_up_transmission_loop())
# ASSERT
# We should have:
# - not sent event 3: it's not ours, and the room is partial stated
# - fallen back to sending event 2: it's the most recent event in the room
# we tried to send to host2
# - completed catch-up
self.assertEqual(len(sent_pdus), 1)
self.assertEqual(sent_pdus[0].event_id, event_id_2)
self.assertFalse(per_dest_queue._catching_up)
self.assertEqual(
per_dest_queue._last_successful_stream_ordering,
event_2.internal_metadata.stream_ordering,
)

View file

@ -941,18 +941,16 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
just before associating and removing a 3PID to/from an account. just before associating and removing a 3PID to/from an account.
""" """
# Pretend to be a Synapse module and register both callbacks as mocks. # Pretend to be a Synapse module and register both callbacks as mocks.
third_party_rules = self.hs.get_third_party_event_rules()
on_add_user_third_party_identifier_callback_mock = Mock( on_add_user_third_party_identifier_callback_mock = Mock(
return_value=make_awaitable(None) return_value=make_awaitable(None)
) )
on_remove_user_third_party_identifier_callback_mock = Mock( on_remove_user_third_party_identifier_callback_mock = Mock(
return_value=make_awaitable(None) return_value=make_awaitable(None)
) )
third_party_rules._on_threepid_bind_callbacks.append( third_party_rules = self.hs.get_third_party_event_rules()
on_add_user_third_party_identifier_callback_mock third_party_rules.register_third_party_rules_callbacks(
) on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock,
third_party_rules._on_threepid_bind_callbacks.append( on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock,
on_remove_user_third_party_identifier_callback_mock
) )
# Register an admin user. # Register an admin user.
@ -1008,12 +1006,12 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
when a user is deactivated and their third-party ID associations are deleted. when a user is deactivated and their third-party ID associations are deleted.
""" """
# Pretend to be a Synapse module and register both callbacks as mocks. # Pretend to be a Synapse module and register both callbacks as mocks.
third_party_rules = self.hs.get_third_party_event_rules()
on_remove_user_third_party_identifier_callback_mock = Mock( on_remove_user_third_party_identifier_callback_mock = Mock(
return_value=make_awaitable(None) return_value=make_awaitable(None)
) )
third_party_rules._on_threepid_bind_callbacks.append( third_party_rules = self.hs.get_third_party_event_rules()
on_remove_user_third_party_identifier_callback_mock third_party_rules.register_third_party_rules_callbacks(
on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock,
) )
# Register an admin user. # Register an admin user.
@ -1039,6 +1037,9 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase):
) )
self.assertEqual(channel.code, 200, channel.json_body) self.assertEqual(channel.code, 200, channel.json_body)
# Check that the mock was not called on the act of adding a third-party ID.
on_remove_user_third_party_identifier_callback_mock.assert_not_called()
# Now deactivate the user. # Now deactivate the user.
channel = self.make_request( channel = self.make_request(
"PUT", "PUT",

View file

@ -102,3 +102,34 @@ async def create_event(
context = await unpersisted_context.persist(event) context = await unpersisted_context.persist(event)
return event, context return event, context
async def mark_event_as_partial_state(
hs: synapse.server.HomeServer,
event_id: str,
room_id: str,
) -> None:
"""
(Falsely) mark an event as having partial state.
Naughty, but occasionally useful when checking that partial state doesn't
block something from happening.
If the event already has partial state, this insert will fail (event_id is unique
in this table).
"""
store = hs.get_datastores().main
await store.db_pool.simple_upsert(
table="partial_state_rooms",
keyvalues={"room_id": room_id},
values={},
insertion_values={"room_id": room_id},
)
await store.db_pool.simple_insert(
table="partial_state_events",
values={
"room_id": room_id,
"event_id": event_id,
},
)

View file

@ -63,7 +63,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
filtered = self.get_success( filtered = self.get_success(
filter_events_for_server( filter_events_for_server(
self._storage_controllers, "test_server", "hs", events_to_filter self._storage_controllers,
"test_server",
"hs",
events_to_filter,
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
) )
@ -85,7 +91,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
self.assertEqual( self.assertEqual(
self.get_success( self.get_success(
filter_events_for_server( filter_events_for_server(
self._storage_controllers, "remote_hs", "hs", [outlier] self._storage_controllers,
"remote_hs",
"hs",
[outlier],
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
), ),
[outlier], [outlier],
@ -96,7 +108,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
filtered = self.get_success( filtered = self.get_success(
filter_events_for_server( filter_events_for_server(
self._storage_controllers, "remote_hs", "local_hs", [outlier, evt] self._storage_controllers,
"remote_hs",
"local_hs",
[outlier, evt],
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
) )
self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}") self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}")
@ -108,7 +126,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# be redacted) # be redacted)
filtered = self.get_success( filtered = self.get_success(
filter_events_for_server( filter_events_for_server(
self._storage_controllers, "other_server", "local_hs", [outlier, evt] self._storage_controllers,
"other_server",
"local_hs",
[outlier, evt],
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
) )
self.assertEqual(filtered[0], outlier) self.assertEqual(filtered[0], outlier)
@ -143,7 +167,13 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# ... and the filtering happens. # ... and the filtering happens.
filtered = self.get_success( filtered = self.get_success(
filter_events_for_server( filter_events_for_server(
self._storage_controllers, "test_server", "local_hs", events_to_filter self._storage_controllers,
"test_server",
"local_hs",
events_to_filter,
redact=True,
filter_out_erased_senders=True,
filter_out_remote_partial_state_events=True,
) )
) )