Skip to content
This repository was archived by the owner on Apr 26, 2024. It is now read-only.

Commit a8da046

Browse files
authored
Invalidate the get_users_in_room{_with_profile} caches only when necessary. (#11878)
The get_users_in_room and get_users_in_room_with_profiles are now only invalidated when the membership of a room changes, instead of during any state change in the room.
1 parent 41818cd commit a8da046

File tree

3 files changed

+20
-8
lines changed

3 files changed

+20
-8
lines changed

changelog.d/11878.misc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes.

synapse/storage/_base.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def process_replication_rows(
5757
pass
5858

5959
def _invalidate_state_caches(
60-
self, room_id: str, members_changed: Iterable[str]
60+
self, room_id: str, members_changed: Collection[str]
6161
) -> None:
6262
"""Invalidates caches that are based on the current state, but does
6363
not stream invalidations down replication.
@@ -66,11 +66,16 @@ def _invalidate_state_caches(
6666
room_id: Room where state changed
6767
members_changed: The user_ids of members that have changed
6868
"""
69+
# If there were any membership changes, purge the appropriate caches.
6970
for host in {get_domain_from_id(u) for u in members_changed}:
7071
self._attempt_to_invalidate_cache("is_host_joined", (room_id, host))
72+
if members_changed:
73+
self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
74+
self._attempt_to_invalidate_cache(
75+
"get_users_in_room_with_profiles", (room_id,)
76+
)
7177

72-
self._attempt_to_invalidate_cache("get_users_in_room", (room_id,))
73-
self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,))
78+
# Purge other caches based on room state.
7479
self._attempt_to_invalidate_cache("get_room_summary", (room_id,))
7580
self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,))
7681

synapse/storage/databases/main/cache.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616
import itertools
1717
import logging
18-
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple
18+
from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple
1919

2020
from synapse.api.constants import EventTypes
2121
from synapse.replication.tcp.streams import BackfillStream, CachesStream
@@ -25,7 +25,11 @@
2525
EventsStreamEventRow,
2626
)
2727
from synapse.storage._base import SQLBaseStore
28-
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
28+
from synapse.storage.database import (
29+
DatabasePool,
30+
LoggingDatabaseConnection,
31+
LoggingTransaction,
32+
)
2933
from synapse.storage.engines import PostgresEngine
3034
from synapse.util.iterutils import batch_iter
3135

@@ -236,16 +240,18 @@ def _invalidate_all_cache_and_stream(self, txn, cache_func):
236240
txn.call_after(cache_func.invalidate_all)
237241
self._send_invalidation_to_replication(txn, cache_func.__name__, None)
238242

239-
def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed):
243+
def _invalidate_state_caches_and_stream(
244+
self, txn: LoggingTransaction, room_id: str, members_changed: Collection[str]
245+
) -> None:
240246
"""Special case invalidation of caches based on current state.
241247
242248
We special case this so that we can batch the cache invalidations into a
243249
single replication poke.
244250
245251
Args:
246252
txn
247-
room_id (str): Room where state changed
248-
members_changed (iterable[str]): The user_ids of members that have changed
253+
room_id: Room where state changed
254+
members_changed: The user_ids of members that have changed
249255
"""
250256
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
251257

0 commit comments

Comments
 (0)