Merge remote-tracking branch 'origin/release-v1.62' into develop

This commit is contained in:
Patrick Cloke 2022-06-30 13:27:24 -04:00
commit b0366853ca
6 changed files with 64 additions and 33 deletions

1
changelog.d/13140.bugfix Normal file
View file

@ -0,0 +1 @@
Fix unread counts for users on large servers. Introduced in v1.62.0rc1.

1
changelog.d/13141.bugfix Normal file
View file

@ -0,0 +1 @@
Fix DB performance when deleting old push notifications. Introduced in v1.62.0rc1.

View file

@ -58,9 +58,7 @@ from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateSt
from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
from synapse.storage.databases.main.event_push_actions import (
EventPushActionsWorkerStore,
)
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
from synapse.storage.databases.main.events_bg_updates import (
EventsBackgroundUpdatesStore,
)
@ -202,7 +200,7 @@ R = TypeVar("R")
class Store(
EventPushActionsWorkerStore,
EventPushActionsStore,
ClientIpBackgroundUpdateStore,
DeviceInboxBackgroundUpdateStore,
DeviceBackgroundUpdateStore,

View file

@ -864,18 +864,20 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
limit = 100
min_stream_id = self.db_pool.simple_select_one_onecol_txn(
min_receipts_stream_id = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_last_receipt_stream_id",
keyvalues={},
retcol="stream_id",
)
max_receipts_stream_id = self._receipts_id_gen.get_current_token()
sql = """
SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering
FROM receipts_linearized AS r
INNER JOIN events AS e USING (event_id)
WHERE r.stream_id > ? AND user_id LIKE ?
WHERE ? < r.stream_id AND r.stream_id <= ? AND user_id LIKE ?
ORDER BY r.stream_id ASC
LIMIT ?
"""
@ -887,13 +889,21 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
txn.execute(
sql,
(
min_stream_id,
min_receipts_stream_id,
max_receipts_stream_id,
user_filter,
limit,
),
)
rows = txn.fetchall()
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
keyvalues={},
retcol="stream_ordering",
)
# For each new read receipt we delete push actions from before it and
# recalculate the summary.
for _, room_id, user_id, stream_ordering in rows:
@ -912,13 +922,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
(room_id, user_id, stream_ordering),
)
old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
txn,
table="event_push_summary_stream_ordering",
keyvalues={},
retcol="stream_ordering",
)
notif_count, unread_count = self._get_notif_unread_count_for_user_room(
txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering
)
@ -937,18 +940,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
# We always update `event_push_summary_last_receipt_stream_id` to
# ensure that we don't rescan the same receipts for remote users.
#
# This requires repeatable read to be safe, as we need the
# `MAX(stream_id)` to not include any new rows that have been committed
# since the start of the transaction (since those rows won't have been
# returned by the query above). Alternatively we could query the max
# stream ID at the start of the transaction and bound everything by
# that.
txn.execute(
"""
UPDATE event_push_summary_last_receipt_stream_id
SET stream_id = (SELECT COALESCE(MAX(stream_id), 0) FROM receipts_linearized)
"""
upper_limit = max_receipts_stream_id
if len(rows) >= limit:
# If we pulled out a limited number of rows we only update the
# position to the last receipt we processed, so we continue
# processing the rest next iteration.
upper_limit = rows[-1][0]
self.db_pool.simple_update_txn(
txn,
table="event_push_summary_last_receipt_stream_id",
keyvalues={},
updatevalues={"stream_id": upper_limit},
)
return len(rows) < limit
@ -1199,6 +1203,16 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
where_clause="highlight=1",
)
# Add index to make deleting old push actions faster.
self.db_pool.updates.register_background_index_update(
"event_push_actions_stream_highlight_index",
index_name="event_push_actions_stream_highlight_index",
table="event_push_actions",
columns=["highlight", "stream_ordering"],
where_clause="highlight=0",
psql_only=True,
)
async def get_push_actions_for_user(
self,
user_id: str,

View file

@ -0,0 +1,19 @@
/* Copyright 2022 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- Add an index to `event_push_actions` to make deleting old non-highlight push
-- actions faster.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(7202, 'event_push_actions_stream_highlight_index', '{}');

View file

@ -136,15 +136,12 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
last_read_stream_ordering[0] = stream
self.get_success(
self.store.db_pool.runInteraction(
"",
self.store._insert_linearized_receipt_txn,
self.store.insert_receipt(
room_id,
"m.read",
user_id,
f"$test{stream}:example.com",
{},
stream,
user_id=user_id,
event_ids=[f"$test{stream}:example.com"],
data={},
)
)
@ -168,6 +165,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase):
_inject_actions(6, PlAIN_NOTIF)
_rotate(7)
_assert_counts(1, 0)
self.get_success(
self.store.db_pool.simple_delete(