Compare commits

...

19 commits

Author SHA1 Message Date
Michael Hollister 3b3baafd1d
Merge 9716d53627 into 2c36a679ae 2024-06-14 10:21:43 +01:00
Richard van der Hoff 2c36a679ae
Include user membership on events (#17282)
MSC4115 has now completed FCP, so we can enable it by default and switch
to the stable identifier.
2024-06-13 21:45:54 +00:00
Eric Eastwood c12ee0d5ba
Add is_dm filtering to Sliding Sync /sync (#17277)
Based on [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): Sliding Sync
2024-06-13 13:56:58 -05:00
Eric Eastwood 8aaff851b1
Fix newly_left rooms not appearing if we returned early (Sliding Sync) (#17301)
Fix `newly_left` rooms not appearing if we returned early when `membership_snapshot_token.is_before_or_eq(to_token.room_key)`. 

Introduced in https://github.com/element-hq/synapse/pull/17187 (part of Sliding Sync)

The tests didn't catch it because they had a small typo in it `room_id1` vs `room_id2`.

Found while working on https://github.com/element-hq/synapse/pull/17293
2024-06-13 11:36:57 -05:00
Eric Eastwood 8c58eb7f17
Add event.internal_metadata.instance_name (#17300)
Add `event.internal_metadata.instance_name` (the worker instance that persisted the event) to go alongside the existing `event.internal_metadata.stream_ordering`.

`instance_name` is useful to properly compare and query for events with a token since you need to compare both the `stream_ordering` and `instance_name` against the vector clock/`instance_map` in the `RoomStreamToken`.

This is pre-requisite work and may be used in https://github.com/element-hq/synapse/pull/17293

Adding `event.internal_metadata.instance_name` was first mentioned in the initial Sliding Sync PR while pairing with @erikjohnston, see 09609cb0db (diff-5cd773fb307aa754bd3948871ba118b1ef0303f4d72d42a2d21e38242bf4e096R405-R410)
2024-06-13 11:32:50 -05:00
Eric Eastwood ebdce69f6a
Fix get_last_event_in_room_before_stream_ordering(...) finding the wrong last event (#17295)
PR where this was introduced: https://github.com/matrix-org/synapse/pull/14817

### What does this affect?

`get_last_event_in_room_before_stream_ordering(...)` is used in Sync v2 in a lot of different state calculations.

`get_last_event_in_room_before_stream_ordering(...)`  is also used in `/rooms/{roomId}/members`
2024-06-13 11:00:52 -05:00
Andrew Morgan c6eb99c878
Bump mypy from 1.8.0 to 1.9.0 (#17297)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-13 15:50:10 +01:00
Quentin Gliech 5db3eec5bc
Clarify that MSC4151 is enabled on matrix.org (#17296)
This clarifies in the comments that the MSC is being used in matrix.org

See #17270
2024-06-13 13:49:00 +00:00
Michael Hollister 9716d53627 Docs grammar fix 2024-05-30 22:59:32 -05:00
Michael Hollister 66d3244860 Updated changelog description 2024-05-30 22:50:59 -05:00
Michael Hollister 46ddc1d893 Reverted schema compat version 2024-05-30 22:46:18 -05:00
Michael Hollister 6b12d3ec6c Improved wording of documentation and config option naming 2024-05-30 22:45:38 -05:00
Michael Hollister 89d8b32af7
Improved wording of presence tracking documentation
Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com>
2024-05-30 15:24:30 -05:00
Michael Hollister 8386b98e11
Merge branch 'element-hq:develop' into michael/presence-enhancements 2024-05-02 15:40:44 -05:00
Michael Hollister cccb26f206 Removed duplicate license comment block 2024-05-01 14:22:28 -05:00
Michael Hollister fbbd8ed6be Added documentation for configuring new presence tracking options 2024-05-01 14:20:17 -05:00
Michael Hollister a38f805ff3 Updating DB schema to 86 due to rebase with develop 2024-05-01 13:44:12 -05:00
Michael Hollister c893e5a577 Added missing type hints to DB queries 2024-05-01 13:37:47 -05:00
Michael Hollister 16a21470dd Added presence update on change of profile information and config flags for selective presence tracking
Signed-off-by: Michael Hollister <michael@futo.org>
2024-05-01 12:14:56 -05:00
56 changed files with 1049 additions and 137 deletions

View file

@ -0,0 +1 @@
Added presence tracking of user profile updates and config flags for disabling user activity tracking. Contributed by @Michael-Hollister.

View file

@ -0,0 +1 @@
Add `is_dm` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View file

@ -0,0 +1 @@
Include user membership in events served to clients, per MSC4115.

1
changelog.d/17295.bugfix Normal file
View file

@ -0,0 +1 @@
Fix edge case in `/sync` returning the wrong the state when using sharded event persisters.

View file

@ -0,0 +1 @@
Add support for the unstable [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151) report room API.

1
changelog.d/17297.misc Normal file
View file

@ -0,0 +1 @@
Bump `mypy` from 1.8.0 to 1.9.0.

1
changelog.d/17300.misc Normal file
View file

@ -0,0 +1 @@
Expose the worker instance that persisted the event on `event.internal_metadata.instance_name`.

1
changelog.d/17301.bugfix Normal file
View file

@ -0,0 +1 @@
Add initial implementation of an experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.

View file

@ -105,8 +105,6 @@ experimental_features:
# Expose a room summary for public rooms
msc3266_enabled: true
msc4115_membership_on_events: true
server_notices:
system_mxid_localpart: _server
system_mxid_display_name: "Server Alert"

View file

@ -246,6 +246,8 @@ Example configuration:
```yaml
presence:
enabled: false
local_activity_tracking: true
remote_activity_tracking: true
```
`enabled` can also be set to a special value of "untracked" which ignores updates
@ -254,6 +256,21 @@ received via clients and federation, while still accepting updates from the
*The "untracked" option was added in Synapse 1.96.0.*
Enabling presence tracking can be resource intensive for the presence handler when server-side
tracking of user activity is enabled. Below are some additional configuration options which may
help improve the performance of the presence feature without outright disabling it:
* `local_activity_tracking` (Default enabled): Determines if the server tracks a user's activity
when syncing or fetching events. If disabled, the server will not automatically update the
user's presence activity when the /sync or /events endpoints are called. Note that client
applications can still update their presence by calling the presence /status endpoint.
* `remote_activity_tracking` (Default enabled): Determines if the server will accept presence
EDUs from remote servers that are exclusively user activity updates. If disabled, the server
will reject processing these EDUs. However if a presence EDU contains profile updates to any of
the `status_msg`, `displayname`, or `avatar_url` fields, then the server will accept the EDU.
If the presence `enabled` field is set to "untracked", then these options will both act as if
set to false.
---
### `require_auth_for_profile_requests`

56
poetry.lock generated
View file

@ -1384,38 +1384,38 @@ files = [
[[package]]
name = "mypy"
version = "1.8.0"
version = "1.9.0"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3"},
{file = "mypy-1.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4"},
{file = "mypy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d"},
{file = "mypy-1.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9"},
{file = "mypy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410"},
{file = "mypy-1.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae"},
{file = "mypy-1.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3"},
{file = "mypy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817"},
{file = "mypy-1.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d"},
{file = "mypy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835"},
{file = "mypy-1.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd"},
{file = "mypy-1.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55"},
{file = "mypy-1.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218"},
{file = "mypy-1.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3"},
{file = "mypy-1.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e"},
{file = "mypy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6"},
{file = "mypy-1.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66"},
{file = "mypy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6"},
{file = "mypy-1.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d"},
{file = "mypy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02"},
{file = "mypy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8"},
{file = "mypy-1.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259"},
{file = "mypy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b"},
{file = "mypy-1.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592"},
{file = "mypy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a"},
{file = "mypy-1.8.0-py3-none-any.whl", hash = "sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d"},
{file = "mypy-1.8.0.tar.gz", hash = "sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07"},
{file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"},
{file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"},
{file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"},
{file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"},
{file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"},
{file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"},
{file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"},
{file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"},
{file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"},
{file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"},
{file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"},
{file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"},
{file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"},
{file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"},
{file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"},
{file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"},
{file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"},
{file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"},
{file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"},
{file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"},
{file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"},
{file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"},
{file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"},
{file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"},
{file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"},
{file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"},
{file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"},
]
[package.dependencies]

View file

@ -204,6 +204,8 @@ pub struct EventInternalMetadata {
/// The stream ordering of this event. None, until it has been persisted.
#[pyo3(get, set)]
stream_ordering: Option<NonZeroI64>,
#[pyo3(get, set)]
instance_name: Option<String>,
/// whether this event is an outlier (ie, whether we have the state at that
/// point in the DAG)
@ -232,6 +234,7 @@ impl EventInternalMetadata {
Ok(EventInternalMetadata {
data,
stream_ordering: None,
instance_name: None,
outlier: false,
})
}

View file

@ -223,7 +223,6 @@ test_packages=(
./tests/msc3930
./tests/msc3902
./tests/msc3967
./tests/msc4115
)
# Enable dirty runs, so tests will reuse the same container where possible.

View file

@ -238,7 +238,7 @@ class EventUnsignedContentFields:
"""Fields found inside the 'unsigned' data on events"""
# Requesting user's membership, per MSC4115
MSC4115_MEMBERSHIP: Final = "io.element.msc4115.membership"
MEMBERSHIP: Final = "membership"
class RoomTypes:

View file

@ -83,6 +83,8 @@ class UserPresenceState:
last_user_sync_ts: int
status_msg: Optional[str]
currently_active: bool
displayname: Optional[str]
avatar_url: Optional[str]
def as_dict(self) -> JsonDict:
return attr.asdict(self)
@ -101,4 +103,6 @@ class UserPresenceState:
last_user_sync_ts=0,
status_msg=None,
currently_active=False,
displayname=None,
avatar_url=None,
)

View file

@ -436,10 +436,6 @@ class ExperimentalConfig(Config):
("experimental", "msc4108_delegation_endpoint"),
)
self.msc4115_membership_on_events = experimental.get(
"msc4115_membership_on_events", False
)
self.msc3916_authenticated_media_enabled = experimental.get(
"msc3916_authenticated_media_enabled", False
)

View file

@ -384,6 +384,16 @@ class ServerConfig(Config):
# Whether to internally track presence, requires that presence is enabled,
self.track_presence = self.presence_enabled and presence_enabled != "untracked"
# Disabling server-side presence tracking
self.presence_local_activity_tracking = presence_config.get(
"local_activity_tracking", True
)
# Disabling federation presence tracking
self.presence_remote_activity_tracking = presence_config.get(
"remote_activity_tracking", True
)
# Custom presence router module
# This is the legacy way of configuring it (the config should now be put in the modules section)
self.presence_router_module_class = None

View file

@ -90,6 +90,7 @@ def prune_event(event: EventBase) -> EventBase:
pruned_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering
)
pruned_event.internal_metadata.instance_name = event.internal_metadata.instance_name
pruned_event.internal_metadata.outlier = event.internal_metadata.outlier
# Mark the event as redacted
@ -116,6 +117,7 @@ def clone_event(event: EventBase) -> EventBase:
new_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering
)
new_event.internal_metadata.instance_name = event.internal_metadata.instance_name
new_event.internal_metadata.outlier = event.internal_metadata.outlier
return new_event

View file

@ -1425,9 +1425,30 @@ class FederationHandlerRegistry:
self._edu_type_to_instance[edu_type] = instance_names
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
"""Passes an EDU to a registered handler if one exists
This potentially modifies the `content` dict for `m.presence` EDUs when
presence `remote_activity_tracking` is disabled.
Args:
edu_type: The type of the incoming EDU to process
origin: The server we received the event from
content: The content of the EDU
"""
if not self.config.server.track_presence and edu_type == EduTypes.PRESENCE:
return
if (
not self.config.server.presence_remote_activity_tracking
and edu_type == EduTypes.PRESENCE
):
filtered_edus = []
for e in content["push"]:
# Process only profile presence updates to reduce resource impact
if "status_msg" in e or "displayname" in e or "avatar_url" in e:
filtered_edus.append(e)
content["push"] = filtered_edus
# Check if we have a handler on this instance
handler = self.edu_handlers.get(edu_type)
if handler:

View file

@ -42,7 +42,6 @@ class AdminHandler:
self._device_handler = hs.get_device_handler()
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._hs_config = hs.config
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonMapping:
@ -215,7 +214,6 @@ class AdminHandler:
self._storage_controllers,
user_id,
events,
msc4115_membership_on_events=self._hs_config.experimental.msc4115_membership_on_events,
)
writer.write_events(room_id, events)

View file

@ -148,7 +148,6 @@ class EventHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._config = hs.config
async def get_event(
self,
@ -194,7 +193,6 @@ class EventHandler:
user.to_string(),
[event],
is_peeking=is_peeking,
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
if not filtered:

View file

@ -224,7 +224,6 @@ class InitialSyncHandler:
self._storage_controllers,
user_id,
messages,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
@ -383,7 +382,6 @@ class InitialSyncHandler:
requester.user.to_string(),
messages,
is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
@ -498,7 +496,6 @@ class InitialSyncHandler:
requester.user.to_string(),
messages,
is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)

View file

@ -1551,6 +1551,7 @@ class EventCreationHandler:
# stream_ordering entry manually (as it was persisted on
# another worker).
event.internal_metadata.stream_ordering = stream_id
event.internal_metadata.instance_name = writer_instance
return event

View file

@ -623,7 +623,6 @@ class PaginationHandler:
user_id,
events,
is_peeking=(member_event_id is None),
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
# if after the filter applied there are no more events

View file

@ -201,6 +201,9 @@ class BasePresenceHandler(abc.ABC):
self._presence_enabled = hs.config.server.presence_enabled
self._track_presence = hs.config.server.track_presence
self._presence_local_activity_tracking = (
hs.config.server.presence_local_activity_tracking
)
self._federation = None
if hs.should_send_federation():
@ -451,6 +454,8 @@ class BasePresenceHandler(abc.ABC):
state = {
"presence": current_presence_state.state,
"status_message": current_presence_state.status_msg,
"displayname": current_presence_state.displayname,
"avatar_url": current_presence_state.avatar_url,
}
# Copy the presence state to the tip of the presence stream.
@ -579,7 +584,11 @@ class WorkerPresenceHandler(BasePresenceHandler):
Called by the sync and events servlets to record that a user has connected to
this worker and is waiting for some events.
"""
if not affect_presence or not self._track_presence:
if (
not affect_presence
or not self._track_presence
or not self._presence_local_activity_tracking
):
return _NullContextManager()
# Note that this causes last_active_ts to be incremented which is not
@ -648,6 +657,8 @@ class WorkerPresenceHandler(BasePresenceHandler):
row.last_user_sync_ts,
row.status_msg,
row.currently_active,
row.displayname,
row.avatar_url,
)
for row in rows
]
@ -1140,7 +1151,11 @@ class PresenceHandler(BasePresenceHandler):
client that is being used by a user.
presence_state: The presence state indicated in the sync request
"""
if not affect_presence or not self._track_presence:
if (
not affect_presence
or not self._track_presence
or not self._presence_local_activity_tracking
):
return _NullContextManager()
curr_sync = self._user_device_to_num_current_syncs.get((user_id, device_id), 0)
@ -1340,6 +1355,8 @@ class PresenceHandler(BasePresenceHandler):
new_fields["status_msg"] = push.get("status_msg", None)
new_fields["currently_active"] = push.get("currently_active", False)
new_fields["displayname"] = push.get("displayname", None)
new_fields["avatar_url"] = push.get("avatar_url", None)
prev_state = await self.current_state_for_user(user_id)
updates.append(prev_state.copy_and_replace(**new_fields))
@ -1369,6 +1386,8 @@ class PresenceHandler(BasePresenceHandler):
the `state` dict.
"""
status_msg = state.get("status_msg", None)
displayname = state.get("displayname", None)
avatar_url = state.get("avatar_url", None)
presence = state["presence"]
if presence not in self.VALID_PRESENCE:
@ -1414,6 +1433,8 @@ class PresenceHandler(BasePresenceHandler):
else:
# Syncs do not override the status message.
new_fields["status_msg"] = status_msg
new_fields["displayname"] = displayname
new_fields["avatar_url"] = avatar_url
await self._update_states(
[prev_state.copy_and_replace(**new_fields)], force_notify=force_notify
@ -1634,6 +1655,8 @@ class PresenceHandler(BasePresenceHandler):
if state.state != PresenceState.OFFLINE
or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000
or state.status_msg is not None
or state.displayname is not None
or state.avatar_url is not None
]
await self._federation_queue.send_presence_to_destinations(
@ -1668,6 +1691,14 @@ def should_notify(
notify_reason_counter.labels(user_location, "status_msg_change").inc()
return True
if old_state.displayname != new_state.displayname:
notify_reason_counter.labels(user_location, "displayname_change").inc()
return True
if old_state.avatar_url != new_state.avatar_url:
notify_reason_counter.labels(user_location, "avatar_url_change").inc()
return True
if old_state.state != new_state.state:
notify_reason_counter.labels(user_location, "state_change").inc()
state_transition_counter.labels(
@ -1725,6 +1756,8 @@ def format_user_presence_state(
* status_msg: Optional. Included if `status_msg` is set on `state`. The user's
status.
* currently_active: Optional. Included only if `state.state` is "online".
* displayname: Optional. The current display name for this user, if any.
* avatar_url: Optional. The current avatar URL for this user, if any.
Example:
@ -1733,7 +1766,9 @@ def format_user_presence_state(
"user_id": "@alice:example.com",
"last_active_ago": 16783813918,
"status_msg": "Hello world!",
"currently_active": True
"currently_active": True,
"displayname": "Alice",
"avatar_url": "mxc://localhost/wefuiwegh8742w"
}
"""
content: JsonDict = {"presence": state.state}
@ -1745,6 +1780,10 @@ def format_user_presence_state(
content["status_msg"] = state.status_msg
if state.state == PresenceState.ONLINE:
content["currently_active"] = state.currently_active
if state.displayname:
content["displayname"] = state.displayname
if state.avatar_url:
content["avatar_url"] = state.avatar_url
return content

View file

@ -202,6 +202,19 @@ class ProfileHandler:
if propagate:
await self._update_join_states(requester, target_user)
if self.hs.config.server.track_presence:
presence_handler = self.hs.get_presence_handler()
current_presence_state = await presence_handler.get_state(target_user)
state = {
"presence": current_presence_state.state,
"status_message": current_presence_state.status_msg,
"displayname": new_displayname,
"avatar_url": current_presence_state.avatar_url,
}
await presence_handler.set_state(target_user, requester.device_id, state)
async def get_avatar_url(self, target_user: UserID) -> Optional[str]:
if self.hs.is_mine(target_user):
try:
@ -295,6 +308,19 @@ class ProfileHandler:
if propagate:
await self._update_join_states(requester, target_user)
if self.hs.config.server.track_presence:
presence_handler = self.hs.get_presence_handler()
current_presence_state = await presence_handler.get_state(target_user)
state = {
"presence": current_presence_state.state,
"status_message": current_presence_state.status_msg,
"displayname": current_presence_state.displayname,
"avatar_url": new_avatar_url,
}
await presence_handler.set_state(target_user, requester.device_id, state)
@cached()
async def check_avatar_size_and_mime_type(self, mxc: str) -> bool:
"""Check that the size and content type of the avatar at the given MXC URI are

View file

@ -95,7 +95,6 @@ class RelationsHandler:
self._event_handler = hs.get_event_handler()
self._event_serializer = hs.get_event_client_serializer()
self._event_creation_handler = hs.get_event_creation_handler()
self._config = hs.config
async def get_relations(
self,
@ -164,7 +163,6 @@ class RelationsHandler:
user_id,
events,
is_peeking=(member_event_id is None),
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
# The relations returned for the requested event do include their
@ -610,7 +608,6 @@ class RelationsHandler:
user_id,
events,
is_peeking=(member_event_id is None),
msc4115_membership_on_events=self._config.experimental.msc4115_membership_on_events,
)
aggregations = await self.get_bundled_aggregations(

View file

@ -1476,7 +1476,6 @@ class RoomContextHandler:
user.to_string(),
events,
is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
event = await self.store.get_event(

View file

@ -483,7 +483,6 @@ class SearchHandler:
self._storage_controllers,
user.to_string(),
filtered_events,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
events.sort(key=lambda e: -rank_map[e.event_id])
@ -585,7 +584,6 @@ class SearchHandler:
self._storage_controllers,
user.to_string(),
filtered_events,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
room_events.extend(events)
@ -673,14 +671,12 @@ class SearchHandler:
self._storage_controllers,
user.to_string(),
res.events_before,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
events_after = await filter_events_for_client(
self._storage_controllers,
user.to_string(),
res.events_after,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
context: JsonDict = {

View file

@ -22,7 +22,7 @@ from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
from immutabledict import immutabledict
from synapse.api.constants import Membership
from synapse.api.constants import AccountDataTypes, Membership
from synapse.events import EventBase
from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
@ -69,9 +69,19 @@ class SlidingSyncHandler:
from_token: Optional[StreamToken] = None,
timeout_ms: int = 0,
) -> SlidingSyncResult:
"""Get the sync for a client if we have new data for it now. Otherwise
"""
Get the sync for a client if we have new data for it now. Otherwise
wait for new data to arrive on the server. If the timeout expires, then
return an empty sync result.
Args:
requester: The user making the request
sync_config: Sync configuration
from_token: The point in the stream to sync from. Token of the end of the
previous batch. May be `None` if this is the initial sync request.
timeout_ms: The time in milliseconds to wait for new data to arrive. If 0,
we will immediately but there might not be any new data so we just return an
empty response.
"""
# If the user is not part of the mau group, then check that limits have
# not been exceeded (if not part of the group by this point, almost certain
@ -143,6 +153,14 @@ class SlidingSyncHandler:
"""
Generates the response body of a Sliding Sync result, represented as a
`SlidingSyncResult`.
We fetch data according to the token range (> `from_token` and <= `to_token`).
Args:
sync_config: Sync configuration
to_token: The point in the stream to sync up to.
from_token: The point in the stream to sync from. Token of the end of the
previous batch. May be `None` if this is the initial sync request.
"""
user_id = sync_config.user.to_string()
app_service = self.store.get_app_service_by_user_id(user_id)
@ -163,11 +181,12 @@ class SlidingSyncHandler:
lists: Dict[str, SlidingSyncResult.SlidingWindowList] = {}
if sync_config.lists:
for list_key, list_config in sync_config.lists.items():
# TODO: Apply filters
#
# TODO: Exclude partially stated rooms unless the `required_state` has
# `["m.room.member", "$LAZY"]`
# Apply filters
filtered_room_ids = room_id_set
if list_config.filters is not None:
filtered_room_ids = await self.filter_rooms(
sync_config.user, room_id_set, list_config.filters, to_token
)
# TODO: Apply sorts
sorted_room_ids = sorted(filtered_room_ids)
@ -217,6 +236,12 @@ class SlidingSyncHandler:
`forgotten` flag to the `room_memberships` table in Synapse. There isn't a way
to tell when a room was forgotten at the moment so we can't factor it into the
from/to range.
Args:
user: User to fetch rooms for
to_token: The token to fetch rooms up to.
from_token: The point in the stream to sync from.
"""
user_id = user.to_string()
@ -275,12 +300,6 @@ class SlidingSyncHandler:
instance_map=immutabledict(instance_to_max_stream_ordering_map),
)
# If our `to_token` is already the same or ahead of the latest room membership
# for the user, we can just straight-up return the room list (nothing has
# changed)
if membership_snapshot_token.is_before_or_eq(to_token.room_key):
return sync_room_id_set
# Since we fetched the users room list at some point in time after the from/to
# tokens, we need to revert/rewind some membership changes to match the point in
# time of the `to_token`. In particular, we need to make these fixups:
@ -300,14 +319,20 @@ class SlidingSyncHandler:
# 1) Fetch membership changes that fall in the range from `to_token` up to
# `membership_snapshot_token`
membership_change_events_after_to_token = (
await self.store.get_membership_changes_for_user(
user_id,
from_key=to_token.room_key,
to_key=membership_snapshot_token,
excluded_rooms=self.rooms_to_exclude_globally,
#
# If our `to_token` is already the same or ahead of the latest room membership
# for the user, we don't need to do any "2)" fix-ups and can just straight-up
# use the room list from the snapshot as a base (nothing has changed)
membership_change_events_after_to_token = []
if not membership_snapshot_token.is_before_or_eq(to_token.room_key):
membership_change_events_after_to_token = (
await self.store.get_membership_changes_for_user(
user_id,
from_key=to_token.room_key,
to_key=membership_snapshot_token,
excluded_rooms=self.rooms_to_exclude_globally,
)
)
)
# 1) Assemble a list of the last membership events in some given ranges. Someone
# could have left and joined multiple times during the given range but we only
@ -439,3 +464,84 @@ class SlidingSyncHandler:
sync_room_id_set.add(room_id)
return sync_room_id_set
async def filter_rooms(
self,
user: UserID,
room_id_set: AbstractSet[str],
filters: SlidingSyncConfig.SlidingSyncList.Filters,
to_token: StreamToken,
) -> AbstractSet[str]:
"""
Filter rooms based on the sync request.
Args:
user: User to filter rooms for
room_id_set: Set of room IDs to filter down
filters: Filters to apply
to_token: We filter based on the state of the room at this token
"""
user_id = user.to_string()
# TODO: Apply filters
#
# TODO: Exclude partially stated rooms unless the `required_state` has
# `["m.room.member", "$LAZY"]`
filtered_room_id_set = set(room_id_set)
# Filter for Direct-Message (DM) rooms
if filters.is_dm is not None:
# We're using global account data (`m.direct`) instead of checking for
# `is_direct` on membership events because that property only appears for
# the invitee membership event (doesn't show up for the inviter). Account
# data is set by the client so it needs to be scrutinized.
#
# We're unable to take `to_token` into account for global account data since
# we only keep track of the latest account data for the user.
dm_map = await self.store.get_global_account_data_by_type_for_user(
user_id, AccountDataTypes.DIRECT
)
# Flatten out the map
dm_room_id_set = set()
if dm_map:
for room_ids in dm_map.values():
# Account data should be a list of room IDs. Ignore anything else
if isinstance(room_ids, list):
for room_id in room_ids:
if isinstance(room_id, str):
dm_room_id_set.add(room_id)
if filters.is_dm:
# Only DM rooms please
filtered_room_id_set = filtered_room_id_set.intersection(dm_room_id_set)
else:
# Only non-DM rooms please
filtered_room_id_set = filtered_room_id_set.difference(dm_room_id_set)
if filters.spaces:
raise NotImplementedError()
if filters.is_encrypted:
raise NotImplementedError()
if filters.is_invite:
raise NotImplementedError()
if filters.room_types:
raise NotImplementedError()
if filters.not_room_types:
raise NotImplementedError()
if filters.room_name_like:
raise NotImplementedError()
if filters.tags:
raise NotImplementedError()
if filters.not_tags:
raise NotImplementedError()
return filtered_room_id_set

View file

@ -844,7 +844,6 @@ class SyncHandler:
sync_config.user.to_string(),
recents,
always_include_ids=current_state_ids,
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
)
log_kv({"recents_after_visibility_filtering": len(recents)})
else:
@ -930,7 +929,6 @@ class SyncHandler:
sync_config.user.to_string(),
loaded_recents,
always_include_ids=current_state_ids,
msc4115_membership_on_events=self.hs_config.experimental.msc4115_membership_on_events,
)
loaded_recents = []

View file

@ -721,7 +721,6 @@ class Notifier:
user.to_string(),
new_events,
is_peeking=is_peeking,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
elif keyname == StreamKeyType.PRESENCE:
now = self.clock.time_msec()

View file

@ -532,7 +532,6 @@ class Mailer:
self._storage_controllers,
user_id,
results.events_before,
msc4115_membership_on_events=self.hs.config.experimental.msc4115_membership_on_events,
)
the_events.append(notif_event)

View file

@ -330,6 +330,8 @@ class PresenceStream(_StreamFromIdGen):
last_user_sync_ts: int
status_msg: str
currently_active: bool
displayname: str
avatar_url: str
NAME = "presence"
ROW_TYPE = PresenceStreamRow

View file

@ -107,7 +107,15 @@ class ReportEventRestServlet(RestServlet):
class ReportRoomRestServlet(RestServlet):
# https://github.com/matrix-org/matrix-spec-proposals/pull/4151
"""This endpoint lets clients report a room for abuse.
Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org
for content moderation purposes, and therefore backwards compatibility should be
carefully considered when changing anything on this endpoint.
More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151
"""
PATTERNS = client_patterns(
"/org.matrix.msc4151/rooms/(?P<room_id>[^/]*)/report$",
releases=[],

View file

@ -207,6 +207,7 @@ class PersistEventsStore:
async with stream_ordering_manager as stream_orderings:
for (event, _), stream in zip(events_and_contexts, stream_orderings):
event.internal_metadata.stream_ordering = stream
event.internal_metadata.instance_name = self._instance_name
await self.db_pool.runInteraction(
"persist_events",

View file

@ -156,6 +156,7 @@ class _EventRow:
event_id: str
stream_ordering: int
instance_name: str
json: str
internal_metadata: str
format_version: Optional[int]
@ -1354,6 +1355,7 @@ class EventsWorkerStore(SQLBaseStore):
rejected_reason=rejected_reason,
)
original_ev.internal_metadata.stream_ordering = row.stream_ordering
original_ev.internal_metadata.instance_name = row.instance_name
original_ev.internal_metadata.outlier = row.outlier
# Consistency check: if the content of the event has been modified in the
@ -1439,6 +1441,7 @@ class EventsWorkerStore(SQLBaseStore):
SELECT
e.event_id,
e.stream_ordering,
e.instance_name,
ej.internal_metadata,
ej.json,
ej.format_version,
@ -1462,13 +1465,14 @@ class EventsWorkerStore(SQLBaseStore):
event_dict[event_id] = _EventRow(
event_id=event_id,
stream_ordering=row[1],
internal_metadata=row[2],
json=row[3],
format_version=row[4],
room_version_id=row[5],
rejected_reason=row[6],
instance_name=row[2],
internal_metadata=row[3],
json=row[4],
format_version=row[5],
room_version_id=row[6],
rejected_reason=row[7],
redactions=[],
outlier=bool(row[7]), # This is an int in SQLite3
outlier=bool(row[8]), # This is an int in SQLite3
)
# check for redactions

View file

@ -174,6 +174,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
"last_user_sync_ts",
"status_msg",
"currently_active",
"displayname",
"avatar_url",
"instance_name",
),
values=[
@ -186,6 +188,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
state.last_user_sync_ts,
state.status_msg,
state.currently_active,
state.displayname,
state.avatar_url,
self._instance_name,
)
for stream_id, state in zip(stream_orderings, presence_states)
@ -225,7 +229,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
sql = """
SELECT stream_id, user_id, state, last_active_ts,
last_federation_update_ts, last_user_sync_ts,
status_msg, currently_active
status_msg, currently_active, displayname,
avatar_url
FROM presence_stream
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC
@ -264,7 +269,19 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
# TODO All these columns are nullable, but we don't expect that:
# https://github.com/matrix-org/synapse/issues/16467
rows = cast(
List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]],
List[
Tuple[
str,
str,
int,
int,
int,
Optional[str],
Union[int, bool],
Optional[str],
Optional[str],
]
],
await self.db_pool.simple_select_many_batch(
table="presence_stream",
column="user_id",
@ -278,6 +295,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
"last_user_sync_ts",
"status_msg",
"currently_active",
"displayname",
"avatar_url",
),
desc="get_presence_for_users",
),
@ -292,8 +311,10 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
last_user_sync_ts=last_user_sync_ts,
status_msg=status_msg,
currently_active=bool(currently_active),
displayname=displayname,
avatar_url=avatar_url,
)
for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows
for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active, displayname, avatar_url, in rows
}
async def should_user_receive_full_presence_with_token(
@ -403,7 +424,19 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
# TODO All these columns are nullable, but we don't expect that:
# https://github.com/matrix-org/synapse/issues/16467
rows = cast(
List[Tuple[str, str, int, int, int, Optional[str], Union[int, bool]]],
List[
Tuple[
str,
str,
int,
int,
int,
Optional[str],
Union[int, bool],
Optional[str],
Optional[str],
]
],
await self.db_pool.runInteraction(
"get_presence_for_all_users",
self.db_pool.simple_select_list_paginate_txn,
@ -420,6 +453,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
"last_user_sync_ts",
"status_msg",
"currently_active",
"displayname",
"avatar_url",
),
order_direction="ASC",
),
@ -433,6 +468,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
last_user_sync_ts,
status_msg,
currently_active,
displayname,
avatar_url,
) in rows:
users_to_state[user_id] = UserPresenceState(
user_id=user_id,
@ -442,6 +479,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
last_user_sync_ts=last_user_sync_ts,
status_msg=status_msg,
currently_active=bool(currently_active),
displayname=displayname,
avatar_url=avatar_url,
)
# We've run out of updates to query
@ -464,7 +503,8 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
# query.
sql = (
"SELECT user_id, state, last_active_ts, last_federation_update_ts,"
" last_user_sync_ts, status_msg, currently_active FROM presence_stream"
" last_user_sync_ts, status_msg, currently_active, displayname, avatar_url "
" FROM presence_stream"
" WHERE state != ?"
)
@ -482,8 +522,10 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
last_user_sync_ts=last_user_sync_ts,
status_msg=status_msg,
currently_active=bool(currently_active),
displayname=displayname,
avatar_url=avatar_url,
)
for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active in rows
for user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, status_msg, currently_active, displayname, avatar_url, in rows
]
def take_presence_startup_info(self) -> List[UserPresenceState]:

View file

@ -914,12 +914,23 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
def get_last_event_in_room_before_stream_ordering_txn(
txn: LoggingTransaction,
) -> Optional[str]:
# We need to handle the fact that the stream tokens can be vector
# clocks. We do this by getting all rows between the minimum and
# maximum stream ordering in the token, plus one row less than the
# minimum stream ordering. We then filter the results against the
# token and return the first row that matches.
# We're looking for the closest event at or before the token. We need to
# handle the fact that the stream token can be a vector clock (with an
# `instance_map`) and events can be persisted on different instances
# (sharded event persisters). The first subquery handles the events that
# would be within the vector clock and gets all rows between the minimum and
# maximum stream ordering in the token which need to be filtered against the
# `instance_map`. The second subquery handles the "before" case and finds
# the first row before the token. We then filter out any results past the
# token's vector clock and return the first row that matches.
min_stream = end_token.stream
max_stream = end_token.get_max_stream_pos()
# We use `union all` because we don't need any of the deduplication logic
# (`union` is really a union + distinct). `UNION ALL` does preserve the
# ordering of the operand queries but there is no actual gurantee that it
# has this behavior in all scenarios so we need the extra `ORDER BY` at the
# bottom.
sql = """
SELECT * FROM (
SELECT instance_name, stream_ordering, topological_ordering, event_id
@ -931,7 +942,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
AND rejections.event_id IS NULL
ORDER BY stream_ordering DESC
) AS a
UNION
UNION ALL
SELECT * FROM (
SELECT instance_name, stream_ordering, topological_ordering, event_id
FROM events
@ -943,15 +954,16 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
ORDER BY stream_ordering DESC
LIMIT 1
) AS b
ORDER BY stream_ordering DESC
"""
txn.execute(
sql,
(
room_id,
end_token.stream,
end_token.get_max_stream_pos(),
min_stream,
max_stream,
room_id,
end_token.stream,
min_stream,
),
)

View file

@ -19,7 +19,7 @@
#
#
SCHEMA_VERSION = 85 # remember to update the list below when updating
SCHEMA_VERSION = 86 # remember to update the list below when updating
"""Represents the expectations made by the codebase about the database schema
This should be incremented whenever the codebase changes its requirements on the
@ -139,6 +139,9 @@ Changes in SCHEMA_VERSION = 84
Changes in SCHEMA_VERSION = 85
- Add a column `suspended` to the `users` table
Changes in SCHEMA_VERSION = 86
- Added displayname and avatar_url columns to presence_stream
"""

View file

@ -0,0 +1,15 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
ALTER TABLE presence_stream ADD COLUMN displayname TEXT;
ALTER TABLE presence_stream ADD COLUMN avatar_url TEXT;

View file

@ -19,6 +19,8 @@ class EventInternalMetadata:
stream_ordering: Optional[int]
"""the stream ordering of this event. None, until it has been persisted."""
instance_name: Optional[str]
"""the instance name of the server that persisted this event. None, until it has been persisted."""
outlier: bool
"""whether this event is an outlier (ie, whether we have the state at that

View file

@ -238,6 +238,53 @@ class SlidingSyncBody(RequestBodyModel):
"""
class Filters(RequestBodyModel):
"""
All fields are applied with AND operators, hence if `is_dm: True` and
`is_encrypted: True` then only Encrypted DM rooms will be returned. The
absence of fields implies no filter on that criteria: it does NOT imply
`False`. These fields may be expanded through use of extensions.
Attributes:
is_dm: Flag which only returns rooms present (or not) in the DM section
of account data. If unset, both DM rooms and non-DM rooms are returned.
If False, only non-DM rooms are returned. If True, only DM rooms are
returned.
spaces: Filter the room based on the space they belong to according to
`m.space.child` state events. If multiple spaces are present, a room can
be part of any one of the listed spaces (OR'd). The server will inspect
the `m.space.child` state events for the JOINED space room IDs given.
Servers MUST NOT navigate subspaces. It is up to the client to give a
complete list of spaces to navigate. Only rooms directly mentioned as
`m.space.child` events in these spaces will be returned. Unknown spaces
or spaces the user is not joined to will be ignored.
is_encrypted: Flag which only returns rooms which have an
`m.room.encryption` state event. If unset, both encrypted and
unencrypted rooms are returned. If `False`, only unencrypted rooms are
returned. If `True`, only encrypted rooms are returned.
is_invite: Flag which only returns rooms the user is currently invited
to. If unset, both invited and joined rooms are returned. If `False`, no
invited rooms are returned. If `True`, only invited rooms are returned.
room_types: If specified, only rooms where the `m.room.create` event has
a `type` matching one of the strings in this array will be returned. If
this field is unset, all rooms are returned regardless of type. This can
be used to get the initial set of spaces for an account. For rooms which
do not have a room type, use `null`/`None` to include them.
not_room_types: Same as `room_types` but inverted. This can be used to
filter out spaces from the room list. If a type is in both `room_types`
and `not_room_types`, then `not_room_types` wins and they are not included
in the result.
room_name_like: Filter the room name. Case-insensitive partial matching
e.g 'foo' matches 'abFooab'. The term 'like' is inspired by SQL 'LIKE',
and the text here is similar to '%foo%'.
tags: Filter the room based on its room tags. If multiple tags are
present, a room can have any one of the listed tags (OR'd).
not_tags: Filter the room based on its room tags. Takes priority over
`tags`. For example, a room with tags A and B with filters `tags: [A]`
`not_tags: [B]` would NOT be included because `not_tags` takes priority over
`tags`. This filter is useful if your rooms list does NOT include the
list of favourite rooms again.
"""
is_dm: Optional[StrictBool] = None
spaces: Optional[List[StrictStr]] = None
is_encrypted: Optional[StrictBool] = None

View file

@ -82,7 +82,6 @@ async def filter_events_for_client(
is_peeking: bool = False,
always_include_ids: FrozenSet[str] = frozenset(),
filter_send_to_client: bool = True,
msc4115_membership_on_events: bool = False,
) -> List[EventBase]:
"""
Check which events a user is allowed to see. If the user can see the event but its
@ -101,12 +100,10 @@ async def filter_events_for_client(
filter_send_to_client: Whether we're checking an event that's going to be
sent to a client. This might not always be the case since this function can
also be called to check whether a user can see the state at a given point.
msc4115_membership_on_events: Whether to include the requesting user's
membership in the "unsigned" data, per MSC4115.
Returns:
The filtered events. If `msc4115_membership_on_events` is true, the `unsigned`
data is annotated with the membership state of `user_id` at each event.
The filtered events. The `unsigned` data is annotated with the membership state
of `user_id` at each event.
"""
# Filter out events that have been soft failed so that we don't relay them
# to clients.
@ -159,9 +156,6 @@ async def filter_events_for_client(
if filtered is None:
return None
if not msc4115_membership_on_events:
return filtered
# Annotate the event with the user's membership after the event.
#
# Normally we just look in `state_after_event`, but if the event is an outlier
@ -186,7 +180,7 @@ async def filter_events_for_client(
# Copy the event before updating the unsigned data: this shouldn't be persisted
# to the cache!
cloned = clone_event(filtered)
cloned.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP] = user_membership
cloned.unsigned[EventUnsignedContentFields.MEMBERSHIP] = user_membership
return cloned

View file

@ -450,6 +450,8 @@ class FilteringTestCase(unittest.HomeserverTestCase):
last_user_sync_ts=0,
status_msg=None,
currently_active=False,
displayname=None,
avatar_url=None,
),
]
@ -478,6 +480,8 @@ class FilteringTestCase(unittest.HomeserverTestCase):
last_user_sync_ts=0,
status_msg=None,
currently_active=False,
displayname=None,
avatar_url=None,
),
]

View file

@ -625,6 +625,8 @@ class CloneEventTestCase(stdlib_unittest.TestCase):
)
original.internal_metadata.stream_ordering = 1234
self.assertEqual(original.internal_metadata.stream_ordering, 1234)
original.internal_metadata.instance_name = "worker1"
self.assertEqual(original.internal_metadata.instance_name, "worker1")
cloned = clone_event(original)
cloned.unsigned["b"] = 3
@ -632,6 +634,7 @@ class CloneEventTestCase(stdlib_unittest.TestCase):
self.assertEqual(original.unsigned, {"a": 1, "b": 2})
self.assertEqual(cloned.unsigned, {"a": 1, "b": 3})
self.assertEqual(cloned.internal_metadata.stream_ordering, 1234)
self.assertEqual(cloned.internal_metadata.instance_name, "worker1")
self.assertEqual(cloned.internal_metadata.txn_id, "txn")

View file

@ -366,6 +366,8 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase):
last_user_sync_ts=1,
status_msg="I'm online!",
currently_active=True,
displayname=None,
avatar_url=None,
)
presence_states.append(presence_state)
@ -718,6 +720,8 @@ class PresenceHandlerInitTestCase(unittest.HomeserverTestCase):
last_user_sync_ts=now,
status_msg=None,
currently_active=True,
displayname=None,
avatar_url=None,
)
]
)

View file

@ -22,8 +22,9 @@ from unittest.mock import patch
from twisted.test.proto_helpers import MemoryReactor
from synapse.api.constants import EventTypes, JoinRules, Membership
from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership
from synapse.api.room_versions import RoomVersions
from synapse.handlers.sliding_sync import SlidingSyncConfig
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
@ -326,7 +327,7 @@ class GetSyncRoomIdsForUserTestCase(HomeserverTestCase):
# Leave during the from_token/to_token range (newly_left)
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok)
self.helper.leave(room_id1, user1_id, tok=user1_tok)
self.helper.leave(room_id2, user1_id, tok=user1_tok)
after_room2_token = self.event_sources.get_current_token()
@ -1116,3 +1117,130 @@ class GetSyncRoomIdsForUserEventShardTestCase(BaseMultiWorkerStreamTestCase):
room_id3,
},
)
class FilterRoomsTestCase(HomeserverTestCase):
"""
Tests Sliding Sync handler `filter_rooms()` to make sure it includes/excludes rooms
correctly.
"""
servlets = [
admin.register_servlets,
knock.register_servlets,
login.register_servlets,
room.register_servlets,
]
def default_config(self) -> JsonDict:
config = super().default_config()
# Enable sliding sync
config["experimental_features"] = {"msc3575_enabled": True}
return config
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
self.store = self.hs.get_datastores().main
self.event_sources = hs.get_event_sources()
def _create_dm_room(
self,
inviter_user_id: str,
inviter_tok: str,
invitee_user_id: str,
invitee_tok: str,
) -> str:
"""
Helper to create a DM room as the "inviter" and invite the "invitee" user to the room. The
"invitee" user also will join the room. The `m.direct` account data will be set
for both users.
"""
# Create a room and send an invite the other user
room_id = self.helper.create_room_as(
inviter_user_id,
is_public=False,
tok=inviter_tok,
)
self.helper.invite(
room_id,
src=inviter_user_id,
targ=invitee_user_id,
tok=inviter_tok,
extra_data={"is_direct": True},
)
# Person that was invited joins the room
self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
# Mimic the client setting the room as a direct message in the global account
# data
self.get_success(
self.store.add_account_data_for_user(
invitee_user_id,
AccountDataTypes.DIRECT,
{inviter_user_id: [room_id]},
)
)
self.get_success(
self.store.add_account_data_for_user(
inviter_user_id,
AccountDataTypes.DIRECT,
{invitee_user_id: [room_id]},
)
)
return room_id
def test_filter_dm_rooms(self) -> None:
"""
Test `filter.is_dm` for DM rooms
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create a normal room
room_id = self.helper.create_room_as(
user1_id,
is_public=False,
tok=user1_tok,
)
# Create a DM room
dm_room_id = self._create_dm_room(
inviter_user_id=user1_id,
inviter_tok=user1_tok,
invitee_user_id=user2_id,
invitee_tok=user2_tok,
)
after_rooms_token = self.event_sources.get_current_token()
# Try with `is_dm=True`
truthy_filtered_room_ids = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
{room_id, dm_room_id},
SlidingSyncConfig.SlidingSyncList.Filters(
is_dm=True,
),
after_rooms_token,
)
)
self.assertEqual(truthy_filtered_room_ids, {dm_room_id})
# Try with `is_dm=False`
falsy_filtered_room_ids = self.get_success(
self.sliding_sync_handler.filter_rooms(
UserID.from_string(user1_id),
{room_id, dm_room_id},
SlidingSyncConfig.SlidingSyncList.Filters(
is_dm=False,
),
after_rooms_token,
)
)
self.assertEqual(falsy_filtered_room_ids, {room_id})

View file

@ -205,8 +205,24 @@ class EmailPusherTests(HomeserverTestCase):
# Multipart: plain text, base 64 encoded; html, base 64 encoded
multipart_msg = email.message_from_bytes(msg)
txt = multipart_msg.get_payload()[0].get_payload(decode=True).decode()
html = multipart_msg.get_payload()[1].get_payload(decode=True).decode()
# Extract the text (non-HTML) portion of the multipart Message,
# as a Message.
txt_message = multipart_msg.get_payload(i=0)
assert isinstance(txt_message, email.message.Message)
# Extract the actual bytes from the Message object, and decode them to a `str`.
txt_bytes = txt_message.get_payload(decode=True)
assert isinstance(txt_bytes, bytes)
txt = txt_bytes.decode()
# Do the same for the HTML portion of the multipart Message.
html_message = multipart_msg.get_payload(i=1)
assert isinstance(html_message, email.message.Message)
html_bytes = html_message.get_payload(decode=True)
assert isinstance(html_bytes, bytes)
html = html_bytes.decode()
self.assertIn("/_synapse/client/unsubscribe", txt)
self.assertIn("/_synapse/client/unsubscribe", html)
@ -347,12 +363,17 @@ class EmailPusherTests(HomeserverTestCase):
# That email should contain the room's avatar
msg: bytes = args[5]
# Multipart: plain text, base 64 encoded; html, base 64 encoded
html = (
email.message_from_bytes(msg)
.get_payload()[1]
.get_payload(decode=True)
.decode()
)
# Extract the html Message object from the Multipart Message.
# We need the asserts to convince mypy that this is OK.
html_message = email.message_from_bytes(msg).get_payload(i=1)
assert isinstance(html_message, email.message.Message)
# Extract the `bytes` from the html Message object, and decode to a `str`.
html = html_message.get_payload(decode=True)
assert isinstance(html, bytes)
html = html.decode()
self.assertIn("_matrix/media/v1/thumbnail/DUMMY_MEDIA_ID", html)
def test_empty_room(self) -> None:

View file

@ -141,6 +141,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
self.persist(type="m.room.create", key="", creator=USER_ID)
self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
assert event.internal_metadata.instance_name is not None
assert event.internal_metadata.stream_ordering is not None
self.replicate()
@ -155,7 +156,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
"invite",
event.event_id,
PersistedEventPosition(
self.hs.get_instance_name(),
event.internal_metadata.instance_name,
event.internal_metadata.stream_ordering,
),
RoomVersions.V1.identifier,
@ -232,11 +233,12 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
j2 = self.persist(
type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
)
assert j2.internal_metadata.instance_name is not None
assert j2.internal_metadata.stream_ordering is not None
self.replicate()
expected_pos = PersistedEventPosition(
"master", j2.internal_metadata.stream_ordering
j2.internal_metadata.instance_name, j2.internal_metadata.stream_ordering
)
self.check(
"get_rooms_for_user_with_stream_ordering",
@ -288,6 +290,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
msg, msgctx = self.build_event()
self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)]))
self.replicate()
assert j2.internal_metadata.instance_name is not None
assert j2.internal_metadata.stream_ordering is not None
event_source = RoomEventSource(self.hs)
@ -329,7 +332,8 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
# joined_rooms list.
if membership_changes:
expected_pos = PersistedEventPosition(
"master", j2.internal_metadata.stream_ordering
j2.internal_metadata.instance_name,
j2.internal_metadata.stream_ordering,
)
self.assertEqual(
joined_rooms,

View file

@ -427,13 +427,23 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
text = None
for part in mail.walk():
if part.get_content_type() == "text/plain":
text = part.get_payload(decode=True).decode("UTF-8")
text = part.get_payload(decode=True)
if text is not None:
# According to the logic table in `get_payload`, we know that
# the result of `get_payload` will be `bytes`, but mypy doesn't
# know this and complains. Thus, we assert the type.
assert isinstance(text, bytes)
text = text.decode("UTF-8")
break
if not text:
self.fail("Could not find text portion of email to parse")
assert text is not None
# `text` must be a `str`, after being decoded and determined just above
# to not be `None` or an empty `str`.
assert isinstance(text, str)
match = re.search(r"https://example.com\S+", text)
assert match, "Could not find link in email"
@ -1209,13 +1219,23 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase):
text = None
for part in mail.walk():
if part.get_content_type() == "text/plain":
text = part.get_payload(decode=True).decode("UTF-8")
text = part.get_payload(decode=True)
if text is not None:
# According to the logic table in `get_payload`, we know that
# the result of `get_payload` will be `bytes`, but mypy doesn't
# know this and complains. Thus, we assert the type.
assert isinstance(text, bytes)
text = text.decode("UTF-8")
break
if not text:
self.fail("Could not find text portion of email to parse")
assert text is not None
# `text` must be a `str`, after being decoded and determined just above
# to not be `None` or an empty `str`.
assert isinstance(text, str)
match = re.search(r"https://example.com\S+", text)
assert match, "Could not find link in email"

View file

@ -167,7 +167,6 @@ class RetentionTestCase(unittest.HomeserverTestCase):
storage_controllers,
self.user_id,
events,
msc4115_membership_on_events=True,
)
)

View file

@ -27,6 +27,7 @@ from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
ReceiptTypes,
@ -1226,10 +1227,59 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
return config
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.sync_endpoint = "/_matrix/client/unstable/org.matrix.msc3575/sync"
self.store = hs.get_datastores().main
self.event_sources = hs.get_event_sources()
def _create_dm_room(
self,
inviter_user_id: str,
inviter_tok: str,
invitee_user_id: str,
invitee_tok: str,
) -> str:
"""
Helper to create a DM room as the "inviter" and invite the "invitee" user to the
room. The "invitee" user also will join the room. The `m.direct` account data
will be set for both users.
"""
# Create a room and send an invite the other user
room_id = self.helper.create_room_as(
inviter_user_id,
is_public=False,
tok=inviter_tok,
)
self.helper.invite(
room_id,
src=inviter_user_id,
targ=invitee_user_id,
tok=inviter_tok,
extra_data={"is_direct": True},
)
# Person that was invited joins the room
self.helper.join(room_id, invitee_user_id, tok=invitee_tok)
# Mimic the client setting the room as a direct message in the global account
# data
self.get_success(
self.store.add_account_data_for_user(
invitee_user_id,
AccountDataTypes.DIRECT,
{inviter_user_id: [room_id]},
)
)
self.get_success(
self.store.add_account_data_for_user(
inviter_user_id,
AccountDataTypes.DIRECT,
{invitee_user_id: [room_id]},
)
)
return room_id
def test_sync_list(self) -> None:
"""
Test that room IDs show up in the Sliding Sync lists
@ -1336,3 +1386,80 @@ class SlidingSyncTestCase(unittest.HomeserverTestCase):
self.assertEqual(
channel.json_body["next_pos"], future_position_token_serialized
)
def test_filter_list(self) -> None:
"""
Test that filters apply to lists
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
# Create a DM room
dm_room_id = self._create_dm_room(
inviter_user_id=user1_id,
inviter_tok=user1_tok,
invitee_user_id=user2_id,
invitee_tok=user2_tok,
)
# Create a normal room
room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
# Make the Sliding Sync request
channel = self.make_request(
"POST",
self.sync_endpoint,
{
"lists": {
"dms": {
"ranges": [[0, 99]],
"sort": ["by_recency"],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": True},
},
"foo-list": {
"ranges": [[0, 99]],
"sort": ["by_recency"],
"required_state": [],
"timeline_limit": 1,
"filters": {"is_dm": False},
},
}
},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.json_body)
# Make sure it has the foo-list we requested
self.assertListEqual(
list(channel.json_body["lists"].keys()),
["dms", "foo-list"],
channel.json_body["lists"].keys(),
)
# Make sure the list includes the room we are joined to
self.assertListEqual(
list(channel.json_body["lists"]["dms"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [dm_room_id],
}
],
list(channel.json_body["lists"]["dms"]),
)
self.assertListEqual(
list(channel.json_body["lists"]["foo-list"]["ops"]),
[
{
"op": "SYNC",
"range": [0, 99],
"room_ids": [room_id],
}
],
list(channel.json_body["lists"]["foo-list"]),
)

View file

@ -431,6 +431,7 @@ class EventChainStoreTestCase(HomeserverTestCase):
for e in events:
e.internal_metadata.stream_ordering = self._next_stream_ordering
e.internal_metadata.instance_name = self.hs.get_instance_name()
self._next_stream_ordering += 1
def _persist(txn: LoggingTransaction) -> None:

View file

@ -19,7 +19,10 @@
#
#
from typing import List
import logging
from typing import List, Tuple
from immutabledict import immutabledict
from twisted.test.proto_helpers import MemoryReactor
@ -28,11 +31,13 @@ from synapse.api.filtering import Filter
from synapse.rest import admin
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.types import JsonDict, PersistedEventPosition, RoomStreamToken
from synapse.util import Clock
from tests.unittest import HomeserverTestCase
logger = logging.getLogger(__name__)
class PaginationTestCase(HomeserverTestCase):
"""
@ -268,3 +273,263 @@ class PaginationTestCase(HomeserverTestCase):
}
chunk = self._filter_messages(filter)
self.assertEqual(chunk, [self.event_id_1, self.event_id_2, self.event_id_none])
class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
"""
Test `get_last_event_in_room_before_stream_ordering(...)`
"""
servlets = [
admin.register_servlets,
room.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.event_sources = hs.get_event_sources()
def _update_persisted_instance_name_for_event(
self, event_id: str, instance_name: str
) -> None:
"""
Update the `instance_name` that persisted the the event in the database.
"""
return self.get_success(
self.store.db_pool.simple_update_one(
"events",
keyvalues={"event_id": event_id},
updatevalues={"instance_name": instance_name},
)
)
def _send_event_on_instance(
self, instance_name: str, room_id: str, access_token: str
) -> Tuple[JsonDict, PersistedEventPosition]:
"""
Send an event in a room and mimic that it was persisted by a specific
instance/worker.
"""
event_response = self.helper.send(
room_id, f"{instance_name} message", tok=access_token
)
self._update_persisted_instance_name_for_event(
event_response["event_id"], instance_name
)
event_pos = self.get_success(
self.store.get_position_for_event(event_response["event_id"])
)
return event_response, event_pos
def test_before_room_created(self) -> None:
"""
Test that no event is returned if we are using a token before the room was even created
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
before_room_token = self.event_sources.get_current_token()
room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
last_event = self.get_success(
self.store.get_last_event_in_room_before_stream_ordering(
room_id=room_id,
end_token=before_room_token.room_key,
)
)
self.assertIsNone(last_event)
def test_after_room_created(self) -> None:
"""
Test that an event is returned if we are using a token after the room was created
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
after_room_token = self.event_sources.get_current_token()
last_event = self.get_success(
self.store.get_last_event_in_room_before_stream_ordering(
room_id=room_id,
end_token=after_room_token.room_key,
)
)
self.assertIsNotNone(last_event)
def test_activity_in_other_rooms(self) -> None:
"""
Test to make sure that the last event in the room is returned even if the
`stream_ordering` has advanced from activity in other rooms.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
event_response = self.helper.send(room_id1, "target!", tok=user1_tok)
# Create another room to advance the stream_ordering
self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
after_room_token = self.event_sources.get_current_token()
last_event = self.get_success(
self.store.get_last_event_in_room_before_stream_ordering(
room_id=room_id1,
end_token=after_room_token.room_key,
)
)
# Make sure it's the event we expect (which also means we know it's from the
# correct room)
self.assertEqual(last_event, event_response["event_id"])
def test_activity_after_token_has_no_effect(self) -> None:
"""
Test to make sure we return the last event before the token even if there is
activity after it.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
event_response = self.helper.send(room_id1, "target!", tok=user1_tok)
after_room_token = self.event_sources.get_current_token()
# Send some events after the token
self.helper.send(room_id1, "after1", tok=user1_tok)
self.helper.send(room_id1, "after2", tok=user1_tok)
last_event = self.get_success(
self.store.get_last_event_in_room_before_stream_ordering(
room_id=room_id1,
end_token=after_room_token.room_key,
)
)
# Make sure it's the last event before the token
self.assertEqual(last_event, event_response["event_id"])
def test_last_event_within_sharded_token(self) -> None:
"""
Test to make sure we can find the last event that that is *within* the sharded
token (a token that has an `instance_map` and looks like
`m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`). We are specifically testing
that we can find an event within the tokens minimum and instance
`stream_ordering`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
event_response1, event_pos1 = self._send_event_on_instance(
"worker1", room_id1, user1_tok
)
event_response2, event_pos2 = self._send_event_on_instance(
"worker1", room_id1, user1_tok
)
event_response3, event_pos3 = self._send_event_on_instance(
"worker1", room_id1, user1_tok
)
# Create another room to advance the `stream_ordering` on the same worker
# so we can sandwich event3 in the middle of the token
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
event_response4, event_pos4 = self._send_event_on_instance(
"worker1", room_id2, user1_tok
)
# Assemble a token that encompasses event1 -> event4 on worker1
end_token = RoomStreamToken(
stream=event_pos2.stream,
instance_map=immutabledict({"worker1": event_pos4.stream}),
)
# Send some events after the token
self.helper.send(room_id1, "after1", tok=user1_tok)
self.helper.send(room_id1, "after2", tok=user1_tok)
last_event = self.get_success(
self.store.get_last_event_in_room_before_stream_ordering(
room_id=room_id1,
end_token=end_token,
)
)
# Should find closest event at/before the token in room1
self.assertEqual(
last_event,
event_response3["event_id"],
f"We expected {event_response3['event_id']} but saw {last_event} which corresponds to "
+ str(
{
"event1": event_response1["event_id"],
"event2": event_response2["event_id"],
"event3": event_response3["event_id"],
}
),
)
def test_last_event_before_sharded_token(self) -> None:
"""
Test to make sure we can find the last event that is *before* the sharded token
(a token that has an `instance_map` and looks like
`m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}`).
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
room_id1 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
event_response1, event_pos1 = self._send_event_on_instance(
"worker1", room_id1, user1_tok
)
event_response2, event_pos2 = self._send_event_on_instance(
"worker1", room_id1, user1_tok
)
# Create another room to advance the `stream_ordering` on the same worker
room_id2 = self.helper.create_room_as(user1_id, tok=user1_tok, is_public=True)
event_response3, event_pos3 = self._send_event_on_instance(
"worker1", room_id2, user1_tok
)
event_response4, event_pos4 = self._send_event_on_instance(
"worker1", room_id2, user1_tok
)
# Assemble a token that encompasses event3 -> event4 on worker1
end_token = RoomStreamToken(
stream=event_pos3.stream,
instance_map=immutabledict({"worker1": event_pos4.stream}),
)
# Send some events after the token
self.helper.send(room_id1, "after1", tok=user1_tok)
self.helper.send(room_id1, "after2", tok=user1_tok)
last_event = self.get_success(
self.store.get_last_event_in_room_before_stream_ordering(
room_id=room_id1,
end_token=end_token,
)
)
# Should find closest event at/before the token in room1
self.assertEqual(
last_event,
event_response2["event_id"],
f"We expected {event_response2['event_id']} but saw {last_event} which corresponds to "
+ str(
{
"event1": event_response1["event_id"],
"event2": event_response2["event_id"],
}
),
)

View file

@ -336,7 +336,6 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.hs.get_storage_controllers(),
"@joiner:test",
events_to_filter,
msc4115_membership_on_events=True,
)
)
resident_filtered_events = self.get_success(
@ -344,7 +343,6 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.hs.get_storage_controllers(),
"@resident:test",
events_to_filter,
msc4115_membership_on_events=True,
)
)
@ -357,7 +355,7 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.assertEqual(
["join", "join", "leave"],
[
e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
e.unsigned[EventUnsignedContentFields.MEMBERSHIP]
for e in joiner_filtered_events
],
)
@ -379,7 +377,7 @@ class FilterEventsForClientTestCase(HomeserverTestCase):
self.assertEqual(
["join", "join", "join", "join", "join"],
[
e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
e.unsigned[EventUnsignedContentFields.MEMBERSHIP]
for e in resident_filtered_events
],
)
@ -441,7 +439,6 @@ class FilterEventsOutOfBandEventsForClientTestCase(
self.hs.get_storage_controllers(),
"@user:test",
[invite_event, reject_event],
msc4115_membership_on_events=True,
)
)
self.assertEqual(
@ -451,7 +448,7 @@ class FilterEventsOutOfBandEventsForClientTestCase(
self.assertEqual(
["invite", "leave"],
[
e.unsigned[EventUnsignedContentFields.MSC4115_MEMBERSHIP]
e.unsigned[EventUnsignedContentFields.MEMBERSHIP]
for e in filtered_events
],
)
@ -463,7 +460,6 @@ class FilterEventsOutOfBandEventsForClientTestCase(
self.hs.get_storage_controllers(),
"@other:test",
[invite_event, reject_event],
msc4115_membership_on_events=True,
)
),
[],