From 17886d2603112531d4eda459d312f84d0d677652 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 15 Dec 2021 10:40:52 +0000 Subject: [PATCH 001/474] Add experimental support for MSC3202: allowing application services to masquerade as specific devices. (#11538) --- changelog.d/11538.feature | 1 + synapse/api/auth.py | 86 +++++++++++++++++++---- synapse/config/experimental.py | 5 ++ synapse/storage/databases/main/devices.py | 20 ++++++ tests/api/test_auth.py | 64 +++++++++++++++++ 5 files changed, 162 insertions(+), 14 deletions(-) create mode 100644 changelog.d/11538.feature diff --git a/changelog.d/11538.feature b/changelog.d/11538.feature new file mode 100644 index 000000000000..b6229e2b4522 --- /dev/null +++ b/changelog.d/11538.feature @@ -0,0 +1 @@ +Add experimental support for MSC3202: allowing application services to masquerade as specific devices. \ No newline at end of file diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 44883c6663ff..0bf58dff08a5 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -155,7 +155,11 @@ async def get_user_by_req( access_token = self.get_access_token_from_request(request) - user_id, app_service = await self._get_appservice_user_id(request) + ( + user_id, + device_id, + app_service, + ) = await self._get_appservice_user_id_and_device_id(request) if user_id and app_service: if ip_addr and self._track_appservice_user_ips: await self.store.insert_client_ip( @@ -163,16 +167,22 @@ async def get_user_by_req( access_token=access_token, ip=ip_addr, user_agent=user_agent, - device_id="dummy-device", # stubbed + device_id="dummy-device" + if device_id is None + else device_id, # stubbed ) - requester = create_requester(user_id, app_service=app_service) + requester = create_requester( + user_id, app_service=app_service, device_id=device_id + ) request.requester = user_id if user_id in self._force_tracing_for_users: opentracing.force_tracing() opentracing.set_tag("authenticated_entity", user_id) opentracing.set_tag("user_id", user_id) + if device_id is not None: + opentracing.set_tag("device_id", device_id) opentracing.set_tag("appservice_id", app_service.id) return requester @@ -274,33 +284,81 @@ async def validate_appservice_can_control_user_id( 403, "Application service has not registered this user (%s)" % user_id ) - async def _get_appservice_user_id( + async def _get_appservice_user_id_and_device_id( self, request: Request - ) -> Tuple[Optional[str], Optional[ApplicationService]]: + ) -> Tuple[Optional[str], Optional[str], Optional[ApplicationService]]: + """ + Given a request, reads the request parameters to determine: + - whether it's an application service that's making this request + - what user the application service should be treated as controlling + (the user_id URI parameter allows an application service to masquerade + any applicable user in its namespace) + - what device the application service should be treated as controlling + (the device_id[^1] URI parameter allows an application service to masquerade + as any device that exists for the relevant user) + + [^1] Unstable and provided by MSC3202. + Must use `org.matrix.msc3202.device_id` in place of `device_id` for now. + + Returns: + 3-tuple of + (user ID?, device ID?, application service?) + + Postconditions: + - If an application service is returned, so is a user ID + - A user ID is never returned without an application service + - A device ID is never returned without a user ID or an application service + - The returned application service, if present, is permitted to control the + returned user ID. + - The returned device ID, if present, has been checked to be a valid device ID + for the returned user ID. + """ + DEVICE_ID_ARG_NAME = b"org.matrix.msc3202.device_id" + app_service = self.store.get_app_service_by_token( self.get_access_token_from_request(request) ) if app_service is None: - return None, None + return None, None, None if app_service.ip_range_whitelist: ip_address = IPAddress(request.getClientIP()) if ip_address not in app_service.ip_range_whitelist: - return None, None + return None, None, None # This will always be set by the time Twisted calls us. assert request.args is not None - if b"user_id" not in request.args: - return app_service.sender, app_service + if b"user_id" in request.args: + effective_user_id = request.args[b"user_id"][0].decode("utf8") + await self.validate_appservice_can_control_user_id( + app_service, effective_user_id + ) + else: + effective_user_id = app_service.sender - user_id = request.args[b"user_id"][0].decode("utf8") - await self.validate_appservice_can_control_user_id(app_service, user_id) + effective_device_id: Optional[str] = None - if app_service.sender == user_id: - return app_service.sender, app_service + if ( + self.hs.config.experimental.msc3202_device_masquerading_enabled + and DEVICE_ID_ARG_NAME in request.args + ): + effective_device_id = request.args[DEVICE_ID_ARG_NAME][0].decode("utf8") + # We only just set this so it can't be None! + assert effective_device_id is not None + device_opt = await self.store.get_device( + effective_user_id, effective_device_id + ) + if device_opt is None: + # For now, use 400 M_EXCLUSIVE if the device doesn't exist. + # This is an open thread of discussion on MSC3202 as of 2021-12-09. + raise AuthError( + 400, + f"Application service trying to use a device that doesn't exist ('{effective_device_id}' for {effective_user_id})", + Codes.EXCLUSIVE, + ) - return user_id, app_service + return effective_user_id, effective_device_id, app_service async def get_user_by_access_token( self, diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d78a15097c87..678c78d56512 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -49,3 +49,8 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3030 (Jump to date API endpoint) self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False) + + # The portion of MSC3202 which is related to device masquerading. + self.msc3202_device_masquerading_enabled: bool = experimental.get( + "msc3202_device_masquerading", False + ) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 393259998857..273adb61fdae 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -128,6 +128,26 @@ async def get_device( allow_none=True, ) + async def get_device_opt( + self, user_id: str, device_id: str + ) -> Optional[Dict[str, Any]]: + """Retrieve a device. Only returns devices that are not marked as + hidden. + + Args: + user_id: The ID of the user which owns the device + device_id: The ID of the device to retrieve + Returns: + A dict containing the device information, or None if the device does not exist. + """ + return await self.db_pool.simple_select_one( + table="devices", + keyvalues={"user_id": user_id, "device_id": device_id, "hidden": False}, + retcols=("user_id", "device_id", "display_name"), + desc="get_device", + allow_none=True, + ) + async def get_devices_by_user(self, user_id: str) -> Dict[str, Dict[str, str]]: """Retrieve all of a user's registered devices. Only returns devices that are not marked as hidden. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 3aa9ba3c43ac..a2dfa1ed0507 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -31,6 +31,7 @@ from tests import unittest from tests.test_utils import simple_async_mock +from tests.unittest import override_config from tests.utils import mock_getRawHeaders @@ -210,6 +211,69 @@ def test_get_user_by_req_appservice_valid_token_bad_user_id(self): request.requestHeaders.getRawHeaders = mock_getRawHeaders() self.get_failure(self.auth.get_user_by_req(request), AuthError) + @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) + def test_get_user_by_req_appservice_valid_token_valid_device_id(self): + """ + Tests that when an application service passes the device_id URL parameter + with the ID of a valid device for the user in question, + the requester instance tracks that device ID. + """ + masquerading_user_id = b"@doppelganger:matrix.org" + masquerading_device_id = b"DOPPELDEVICE" + app_service = Mock( + token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None + ) + app_service.is_interested_in_user = Mock(return_value=True) + self.store.get_app_service_by_token = Mock(return_value=app_service) + # This just needs to return a truth-y value. + self.store.get_user_by_id = simple_async_mock({"is_guest": False}) + self.store.get_user_by_access_token = simple_async_mock(None) + # This also needs to just return a truth-y value + self.store.get_device = simple_async_mock({"hidden": False}) + + request = Mock(args={}) + request.getClientIP.return_value = "127.0.0.1" + request.args[b"access_token"] = [self.test_token] + request.args[b"user_id"] = [masquerading_user_id] + request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + requester = self.get_success(self.auth.get_user_by_req(request)) + self.assertEquals( + requester.user.to_string(), masquerading_user_id.decode("utf8") + ) + self.assertEquals(requester.device_id, masquerading_device_id.decode("utf8")) + + @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) + def test_get_user_by_req_appservice_valid_token_invalid_device_id(self): + """ + Tests that when an application service passes the device_id URL parameter + with an ID that is not a valid device ID for the user in question, + the request fails with the appropriate error code. + """ + masquerading_user_id = b"@doppelganger:matrix.org" + masquerading_device_id = b"NOT_A_REAL_DEVICE_ID" + app_service = Mock( + token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None + ) + app_service.is_interested_in_user = Mock(return_value=True) + self.store.get_app_service_by_token = Mock(return_value=app_service) + # This just needs to return a truth-y value. + self.store.get_user_by_id = simple_async_mock({"is_guest": False}) + self.store.get_user_by_access_token = simple_async_mock(None) + # This also needs to just return a falsey value + self.store.get_device = simple_async_mock(None) + + request = Mock(args={}) + request.getClientIP.return_value = "127.0.0.1" + request.args[b"access_token"] = [self.test_token] + request.args[b"user_id"] = [masquerading_user_id] + request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + + failure = self.get_failure(self.auth.get_user_by_req(request), AuthError) + self.assertEquals(failure.value.code, 400) + self.assertEquals(failure.value.errcode, Codes.EXCLUSIVE) + def test_get_user_from_macaroon(self): self.store.get_user_by_access_token = simple_async_mock( TokenLookupResult(user_id="@baldrick:matrix.org", device_id="device") From 323151b787cbc511919dd54a3acea76dc2ad0603 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 15 Dec 2021 11:10:02 -0500 Subject: [PATCH 002/474] Convert EventStreamResult to attrs. (#11574) --- changelog.d/11574.misc | 1 + synapse/handlers/events.py | 7 ++++--- synapse/notifier.py | 25 +++++++++++++++++++------ 3 files changed, 24 insertions(+), 9 deletions(-) create mode 100644 changelog.d/11574.misc diff --git a/changelog.d/11574.misc b/changelog.d/11574.misc new file mode 100644 index 000000000000..2b090a3780d0 --- /dev/null +++ b/changelog.d/11574.misc @@ -0,0 +1 @@ +Convert `EventStreamResult` from a `namedtuple` to `attrs` to improve type hints. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 32b0254c5f08..afed80ba14c0 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -79,13 +79,14 @@ async def get_stream( # thundering herds on restart. timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1)) - events, tokens = await self.notifier.get_events_for( + stream_result = await self.notifier.get_events_for( auth_user, pagin_config, timeout, is_guest=is_guest, explicit_room_id=room_id, ) + events = stream_result.events time_now = self.clock.time_msec() @@ -128,8 +129,8 @@ async def get_stream( chunk = { "chunk": chunks, - "start": await tokens[0].to_string(self.store), - "end": await tokens[1].to_string(self.store), + "start": await stream_result.start_token.to_string(self.store), + "end": await stream_result.end_token.to_string(self.store), } return chunk diff --git a/synapse/notifier.py b/synapse/notifier.py index 60e5409895e6..bbabdb05875c 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -13,7 +13,6 @@ # limitations under the License. import logging -from collections import namedtuple from typing import ( Awaitable, Callable, @@ -44,7 +43,13 @@ from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig -from synapse.types import PersistedEventPosition, RoomStreamToken, StreamToken, UserID +from synapse.types import ( + JsonDict, + PersistedEventPosition, + RoomStreamToken, + StreamToken, + UserID, +) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -178,7 +183,12 @@ def new_listener(self, token: StreamToken) -> _NotificationListener: return _NotificationListener(self.notify_deferred.observe()) -class EventStreamResult(namedtuple("EventStreamResult", ("events", "tokens"))): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventStreamResult: + events: List[Union[JsonDict, EventBase]] + start_token: StreamToken + end_token: StreamToken + def __bool__(self): return bool(self.events) @@ -582,9 +592,12 @@ async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: if after_token == before_token: - return EventStreamResult([], (from_token, from_token)) + return EventStreamResult([], from_token, from_token) - events: List[EventBase] = [] + # The events fetched from each source are a JsonDict, EventBase, or + # UserPresenceState, but see below for UserPresenceState being + # converted to JsonDict. + events: List[Union[JsonDict, EventBase]] = [] end_token = from_token for name, source in self.event_sources.sources.get_sources(): @@ -623,7 +636,7 @@ async def check_for_updates( events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) - return EventStreamResult(events, (from_token, end_token)) + return EventStreamResult(events, from_token, end_token) user_id_for_stream = user.to_string() if is_peeking: From f901f8b70eef7c1ac62b68587c0d6cd2e1e0febe Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 15 Dec 2021 12:00:50 -0500 Subject: [PATCH 003/474] Require Collections as the parameters for simple_* methods. (#11580) Instead of Iterable since the generators are not allowed due to the potential for their re-use. --- changelog.d/11580.misc | 1 + synapse/storage/database.py | 28 +++++++++--------------- synapse/storage/databases/main/pusher.py | 23 +------------------ 3 files changed, 12 insertions(+), 40 deletions(-) create mode 100644 changelog.d/11580.misc diff --git a/changelog.d/11580.misc b/changelog.d/11580.misc new file mode 100644 index 000000000000..2c48e22de0dd --- /dev/null +++ b/changelog.d/11580.misc @@ -0,0 +1 @@ +Add some safety checks that storage functions are used correctly. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a219999f155f..2cacc7dd6c5d 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -55,6 +55,7 @@ from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor +from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer @@ -986,7 +987,7 @@ async def simple_insert_many_values( self, table: str, keys: Collection[str], - values: Iterable[Iterable[Any]], + values: Collection[Collection[Any]], desc: str, ) -> None: """Executes an INSERT query on the named table. @@ -1427,7 +1428,7 @@ async def simple_select_one( self, table: str, keyvalues: Dict[str, Any], - retcols: Iterable[str], + retcols: Collection[str], allow_none: Literal[False] = False, desc: str = "simple_select_one", ) -> Dict[str, Any]: @@ -1438,7 +1439,7 @@ async def simple_select_one( self, table: str, keyvalues: Dict[str, Any], - retcols: Iterable[str], + retcols: Collection[str], allow_none: Literal[True] = True, desc: str = "simple_select_one", ) -> Optional[Dict[str, Any]]: @@ -1448,7 +1449,7 @@ async def simple_select_one( self, table: str, keyvalues: Dict[str, Any], - retcols: Iterable[str], + retcols: Collection[str], allow_none: bool = False, desc: str = "simple_select_one", ) -> Optional[Dict[str, Any]]: @@ -1618,7 +1619,7 @@ async def simple_select_list( self, table: str, keyvalues: Optional[Dict[str, Any]], - retcols: Iterable[str], + retcols: Collection[str], desc: str = "simple_select_list", ) -> List[Dict[str, Any]]: """Executes a SELECT query on the named table, which may return zero or @@ -1681,7 +1682,7 @@ async def simple_select_many_batch( table: str, column: str, iterable: Iterable[Any], - retcols: Iterable[str], + retcols: Collection[str], keyvalues: Optional[Dict[str, Any]] = None, desc: str = "simple_select_many_batch", batch_size: int = 100, @@ -1704,16 +1705,7 @@ async def simple_select_many_batch( results: List[Dict[str, Any]] = [] - if not iterable: - return results - - # iterables can not be sliced, so convert it to a list first - it_list = list(iterable) - - chunks = [ - it_list[i : i + batch_size] for i in range(0, len(it_list), batch_size) - ] - for chunk in chunks: + for chunk in batch_iter(iterable, batch_size): rows = await self.runInteraction( desc, self.simple_select_many_txn, @@ -1853,7 +1845,7 @@ def simple_select_one_txn( txn: LoggingTransaction, table: str, keyvalues: Dict[str, Any], - retcols: Iterable[str], + retcols: Collection[str], allow_none: bool = False, ) -> Optional[Dict[str, Any]]: select_sql = "SELECT %s FROM %s WHERE %s" % ( @@ -2146,7 +2138,7 @@ async def simple_search_list( table: str, term: Optional[str], col: str, - retcols: Iterable[str], + retcols: Collection[str], desc="simple_search_list", ) -> Optional[List[Dict[str, Any]]]: """Executes a SELECT query on the named table, which may return zero or diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index b73ce53c9156..7ab681ed6f5f 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -22,7 +22,7 @@ from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict from synapse.util import json_encoder -from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.descriptors import cached if TYPE_CHECKING: from synapse.server import HomeServer @@ -196,27 +196,6 @@ async def get_if_user_has_pusher(self, user_id: str): # This only exists for the cachedList decorator raise NotImplementedError() - @cachedList( - cached_method_name="get_if_user_has_pusher", - list_name="user_ids", - num_args=1, - ) - async def get_if_users_have_pushers( - self, user_ids: Iterable[str] - ) -> Dict[str, bool]: - rows = await self.db_pool.simple_select_many_batch( - table="pushers", - column="user_name", - iterable=user_ids, - retcols=["user_name"], - desc="get_if_users_have_pushers", - ) - - result = {user_id: False for user_id in user_ids} - result.update({r["user_name"]: True for r in rows}) - - return result - async def update_pusher_last_stream_ordering( self, app_id, pushkey, user_id, last_stream_ordering ) -> None: From c7fe32edb4da58a262ec37ce6fde7bef95920872 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 15 Dec 2021 18:00:48 +0000 Subject: [PATCH 004/474] Add type hints to `synapse/storage/databases/main/room.py` (#11575) --- changelog.d/11575.misc | 1 + mypy.ini | 4 +- synapse/handlers/room_member.py | 6 +- synapse/storage/databases/main/__init__.py | 1 - synapse/storage/databases/main/room.py | 173 ++++++++++++--------- 5 files changed, 108 insertions(+), 77 deletions(-) create mode 100644 changelog.d/11575.misc diff --git a/changelog.d/11575.misc b/changelog.d/11575.misc new file mode 100644 index 000000000000..d451940bf216 --- /dev/null +++ b/changelog.d/11575.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. diff --git a/mypy.ini b/mypy.ini index e38ad635aafb..cbe1e8302cf4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -37,7 +37,6 @@ exclude = (?x) |synapse/storage/databases/main/purge_events.py |synapse/storage/databases/main/push_rule.py |synapse/storage/databases/main/receipts.py - |synapse/storage/databases/main/room.py |synapse/storage/databases/main/roommember.py |synapse/storage/databases/main/search.py |synapse/storage/databases/main/state.py @@ -205,6 +204,9 @@ disallow_untyped_defs = True [mypy-synapse.storage.databases.main.events_worker] disallow_untyped_defs = True +[mypy-synapse.storage.databases.main.room] +disallow_untyped_defs = True + [mypy-synapse.storage.databases.main.room_batch] disallow_untyped_defs = True diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 447e3ce5713b..6aa910dd10f9 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1020,7 +1020,7 @@ async def transfer_room_state_on_room_upgrade( # Add new room to the room directory if the old room was there # Remove old room from the room directory old_room = await self.store.get_room(old_room_id) - if old_room and old_room["is_public"]: + if old_room is not None and old_room["is_public"]: await self.store.set_room_is_public(old_room_id, False) await self.store.set_room_is_public(room_id, True) @@ -1031,7 +1031,9 @@ async def transfer_room_state_on_room_upgrade( local_group_ids = await self.store.get_local_groups_for_room(old_room_id) for group_id in local_group_ids: # Add new the new room to those groups - await self.store.add_room_to_group(group_id, room_id, old_room["is_public"]) + await self.store.add_room_to_group( + group_id, room_id, old_room is not None and old_room["is_public"] + ) # Remove the old room from those groups await self.store.remove_room_from_group(group_id, old_room_id) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 716b25dd349a..a594223fc6ca 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -149,7 +149,6 @@ def __init__( ], ) - self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id") self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id") self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id") self._group_updates_id_gen = StreamIdGenerator( diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 6cf6cc8484ba..4472335af901 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -17,7 +17,7 @@ import logging from abc import abstractmethod from enum import Enum -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple, cast from synapse.api.constants import EventContentFields, EventTypes, JoinRules from synapse.api.errors import StoreError @@ -29,8 +29,9 @@ LoggingDatabaseConnection, LoggingTransaction, ) -from synapse.storage.databases.main.search import SearchStore +from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.types import Cursor +from synapse.storage.util.id_generators import IdGenerator from synapse.types import JsonDict, ThirdPartyInstanceID from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -75,7 +76,7 @@ class RoomSortOrder(Enum): STATE_EVENTS = "state_events" -class RoomWorkerStore(SQLBaseStore): +class RoomWorkerStore(CacheInvalidationWorkerStore): def __init__( self, database: DatabasePool, @@ -92,7 +93,7 @@ async def store_room( room_creator_user_id: str, is_public: bool, room_version: RoomVersion, - ): + ) -> None: """Stores a room. Args: @@ -120,7 +121,7 @@ async def store_room( logger.error("store_room with room_id=%s failed: %s", room_id, e) raise StoreError(500, "Problem creating room.") - async def get_room(self, room_id: str) -> dict: + async def get_room(self, room_id: str) -> Optional[Dict[str, Any]]: """Retrieve a room. Args: @@ -145,7 +146,9 @@ async def get_room_with_stats(self, room_id: str) -> Optional[Dict[str, Any]]: A dict containing the room information, or None if the room is unknown. """ - def get_room_with_stats_txn(txn, room_id): + def get_room_with_stats_txn( + txn: LoggingTransaction, room_id: str + ) -> Optional[Dict[str, Any]]: sql = """ SELECT room_id, state.name, state.canonical_alias, curr.joined_members, curr.local_users_in_room AS joined_local_members, rooms.room_version AS version, @@ -194,7 +197,7 @@ async def count_public_rooms( ignore_non_federatable: If true filters out non-federatable rooms """ - def _count_public_rooms_txn(txn): + def _count_public_rooms_txn(txn: LoggingTransaction) -> int: query_args = [] if network_tuple: @@ -235,7 +238,7 @@ def _count_public_rooms_txn(txn): } txn.execute(sql, query_args) - return txn.fetchone()[0] + return cast(Tuple[int], txn.fetchone())[0] return await self.db_pool.runInteraction( "count_public_rooms", _count_public_rooms_txn @@ -244,11 +247,11 @@ def _count_public_rooms_txn(txn): async def get_room_count(self) -> int: """Retrieve the total number of rooms.""" - def f(txn): + def f(txn: LoggingTransaction) -> int: sql = "SELECT count(*) FROM rooms" txn.execute(sql) - row = txn.fetchone() - return row[0] or 0 + row = cast(Tuple[int], txn.fetchone()) + return row[0] return await self.db_pool.runInteraction("get_rooms", f) @@ -260,7 +263,7 @@ async def get_largest_public_rooms( bounds: Optional[Tuple[int, str]], forwards: bool, ignore_non_federatable: bool = False, - ): + ) -> List[Dict[str, Any]]: """Gets the largest public rooms (where largest is in terms of joined members, as tracked in the statistics table). @@ -381,7 +384,9 @@ async def get_largest_public_rooms( LIMIT ? """ - def _get_largest_public_rooms_txn(txn): + def _get_largest_public_rooms_txn( + txn: LoggingTransaction, + ) -> List[Dict[str, Any]]: txn.execute(sql, query_args) results = self.db_pool.cursor_to_dict(txn) @@ -444,7 +449,7 @@ async def get_rooms_paginate( """ # Filter room names by a string where_statement = "" - search_pattern = [] + search_pattern: List[object] = [] if search_term: where_statement = """ WHERE LOWER(state.name) LIKE ? @@ -552,7 +557,9 @@ async def get_rooms_paginate( where_statement, ) - def _get_rooms_paginate_txn(txn): + def _get_rooms_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], int]: # Add the search term into the WHERE clause # and execute the data query txn.execute(info_sql, search_pattern + [limit, start]) @@ -584,7 +591,7 @@ def _get_rooms_paginate_txn(txn): # Add the search term into the WHERE clause if present txn.execute(count_sql, search_pattern) - room_count = txn.fetchone() + room_count = cast(Tuple[int], txn.fetchone()) return rooms, room_count[0] return await self.db_pool.runInteraction( @@ -629,7 +636,7 @@ async def set_ratelimit_for_user( burst_count: How many actions that can be performed before being limited. """ - def set_ratelimit_txn(txn): + def set_ratelimit_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_upsert_txn( txn, table="ratelimit_override", @@ -652,7 +659,7 @@ async def delete_ratelimit_for_user(self, user_id: str) -> None: user_id: user ID of the user """ - def delete_ratelimit_txn(txn): + def delete_ratelimit_txn(txn: LoggingTransaction) -> None: row = self.db_pool.simple_select_one_txn( txn, table="ratelimit_override", @@ -676,7 +683,7 @@ def delete_ratelimit_txn(txn): await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn) @cached() - async def get_retention_policy_for_room(self, room_id): + async def get_retention_policy_for_room(self, room_id: str) -> Dict[str, int]: """Get the retention policy for a given room. If no retention policy has been found for this room, returns a policy defined @@ -685,13 +692,15 @@ async def get_retention_policy_for_room(self, room_id): configuration). Args: - room_id (str): The ID of the room to get the retention policy of. + room_id: The ID of the room to get the retention policy of. Returns: - dict[int, int]: "min_lifetime" and "max_lifetime" for this room. + A dict containing "min_lifetime" and "max_lifetime" for this room. """ - def get_retention_policy_for_room_txn(txn): + def get_retention_policy_for_room_txn( + txn: LoggingTransaction, + ) -> List[Dict[str, Optional[int]]]: txn.execute( """ SELECT min_lifetime, max_lifetime FROM room_retention @@ -716,19 +725,23 @@ def get_retention_policy_for_room_txn(txn): "max_lifetime": self.config.retention.retention_default_max_lifetime, } - row = ret[0] + min_lifetime = ret[0]["min_lifetime"] + max_lifetime = ret[0]["max_lifetime"] # If one of the room's policy's attributes isn't defined, use the matching # attribute from the default policy. # The default values will be None if no default policy has been defined, or if one # of the attributes is missing from the default policy. - if row["min_lifetime"] is None: - row["min_lifetime"] = self.config.retention.retention_default_min_lifetime + if min_lifetime is None: + min_lifetime = self.config.retention.retention_default_min_lifetime - if row["max_lifetime"] is None: - row["max_lifetime"] = self.config.retention.retention_default_max_lifetime + if max_lifetime is None: + max_lifetime = self.config.retention.retention_default_max_lifetime - return row + return { + "min_lifetime": min_lifetime, + "max_lifetime": max_lifetime, + } async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[str]]: """Retrieves all the local and remote media MXC URIs in a given room @@ -740,7 +753,9 @@ async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[st The local and remote media as a lists of the media IDs. """ - def _get_media_mxcs_in_room_txn(txn): + def _get_media_mxcs_in_room_txn( + txn: LoggingTransaction, + ) -> Tuple[List[str], List[str]]: local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id) local_media_mxcs = [] remote_media_mxcs = [] @@ -766,7 +781,7 @@ async def quarantine_media_ids_in_room( logger.info("Quarantining media in room: %s", room_id) - def _quarantine_media_in_room_txn(txn): + def _quarantine_media_in_room_txn(txn: LoggingTransaction) -> int: local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id) return self._quarantine_media_txn( txn, local_mxcs, remote_mxcs, quarantined_by @@ -776,13 +791,11 @@ def _quarantine_media_in_room_txn(txn): "quarantine_media_in_room", _quarantine_media_in_room_txn ) - def _get_media_mxcs_in_room_txn(self, txn, room_id): + def _get_media_mxcs_in_room_txn( + self, txn: LoggingTransaction, room_id: str + ) -> Tuple[List[str], List[Tuple[str, str]]]: """Retrieves all the local and remote media MXC URIs in a given room - Args: - txn (cursor) - room_id (str) - Returns: The local and remote media as a lists of tuples where the key is the hostname and the value is the media ID. @@ -850,7 +863,7 @@ async def quarantine_media_by_id( logger.info("Quarantining media: %s/%s", server_name, media_id) is_local = server_name == self.config.server.server_name - def _quarantine_media_by_id_txn(txn): + def _quarantine_media_by_id_txn(txn: LoggingTransaction) -> int: local_mxcs = [media_id] if is_local else [] remote_mxcs = [(server_name, media_id)] if not is_local else [] @@ -872,7 +885,7 @@ async def quarantine_media_ids_by_user( quarantined_by: The ID of the user who made the quarantine request """ - def _quarantine_media_by_user_txn(txn): + def _quarantine_media_by_user_txn(txn: LoggingTransaction) -> int: local_media_ids = self._get_media_ids_by_user_txn(txn, user_id) return self._quarantine_media_txn(txn, local_media_ids, [], quarantined_by) @@ -880,7 +893,9 @@ def _quarantine_media_by_user_txn(txn): "quarantine_media_by_user", _quarantine_media_by_user_txn ) - def _get_media_ids_by_user_txn(self, txn, user_id: str, filter_quarantined=True): + def _get_media_ids_by_user_txn( + self, txn: LoggingTransaction, user_id: str, filter_quarantined: bool = True + ) -> List[str]: """Retrieves local media IDs by a given user Args: @@ -909,7 +924,7 @@ def _get_media_ids_by_user_txn(self, txn, user_id: str, filter_quarantined=True) def _quarantine_media_txn( self, - txn, + txn: LoggingTransaction, local_mxcs: List[str], remote_mxcs: List[Tuple[str, str]], quarantined_by: Optional[str], @@ -937,12 +952,15 @@ def _quarantine_media_txn( # set quarantine if quarantined_by is not None: sql += "AND safe_from_quarantine = ?" - rows = [(quarantined_by, media_id, False) for media_id in local_mxcs] + txn.executemany( + sql, [(quarantined_by, media_id, False) for media_id in local_mxcs] + ) # remove from quarantine else: - rows = [(quarantined_by, media_id) for media_id in local_mxcs] + txn.executemany( + sql, [(quarantined_by, media_id) for media_id in local_mxcs] + ) - txn.executemany(sql, rows) # Note that a rowcount of -1 can be used to indicate no rows were affected. total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0 @@ -960,7 +978,7 @@ def _quarantine_media_txn( async def get_rooms_for_retention_period_in_range( self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False - ) -> Dict[str, dict]: + ) -> Dict[str, Dict[str, Optional[int]]]: """Retrieves all of the rooms within the given retention range. Optionally includes the rooms which don't have a retention policy. @@ -980,7 +998,9 @@ async def get_rooms_for_retention_period_in_range( "min_lifetime" (int|None), and "max_lifetime" (int|None). """ - def get_rooms_for_retention_period_in_range_txn(txn): + def get_rooms_for_retention_period_in_range_txn( + txn: LoggingTransaction, + ) -> Dict[str, Dict[str, Optional[int]]]: range_conditions = [] args = [] @@ -1067,8 +1087,6 @@ def __init__( ): super().__init__(database, db_conn, hs) - self.config = hs.config - self.db_pool.updates.register_background_update_handler( "insert_room_retention", self._background_insert_retention, @@ -1099,7 +1117,9 @@ def __init__( self._background_populate_rooms_creator_column, ) - async def _background_insert_retention(self, progress, batch_size): + async def _background_insert_retention( + self, progress: JsonDict, batch_size: int + ) -> int: """Retrieves a list of all rooms within a range and inserts an entry for each of them into the room_retention table. NULLs the property's columns if missing from the retention event in the room's @@ -1109,7 +1129,7 @@ async def _background_insert_retention(self, progress, batch_size): last_room = progress.get("room_id", "") - def _background_insert_retention_txn(txn): + def _background_insert_retention_txn(txn: LoggingTransaction) -> bool: txn.execute( """ SELECT state.room_id, state.event_id, events.json @@ -1168,15 +1188,17 @@ def _background_insert_retention_txn(txn): return batch_size async def _background_add_rooms_room_version_column( - self, progress: dict, batch_size: int - ): + self, progress: JsonDict, batch_size: int + ) -> int: """Background update to go and add room version information to `rooms` table from `current_state_events` table. """ last_room_id = progress.get("room_id", "") - def _background_add_rooms_room_version_column_txn(txn: LoggingTransaction): + def _background_add_rooms_room_version_column_txn( + txn: LoggingTransaction, + ) -> bool: sql = """ SELECT room_id, json FROM current_state_events INNER JOIN event_json USING (room_id, event_id) @@ -1237,7 +1259,7 @@ def _background_add_rooms_room_version_column_txn(txn: LoggingTransaction): return batch_size async def _remove_tombstoned_rooms_from_directory( - self, progress, batch_size + self, progress: JsonDict, batch_size: int ) -> int: """Removes any rooms with tombstone events from the room directory @@ -1247,7 +1269,7 @@ async def _remove_tombstoned_rooms_from_directory( last_room = progress.get("room_id", "") - def _get_rooms(txn): + def _get_rooms(txn: LoggingTransaction) -> List[str]: txn.execute( """ SELECT room_id @@ -1285,7 +1307,7 @@ def _get_rooms(txn): return len(rooms) @abstractmethod - def set_room_is_public(self, room_id, is_public): + def set_room_is_public(self, room_id: str, is_public: bool) -> Awaitable[None]: # this will need to be implemented if a background update is performed with # existing (tombstoned, public) rooms in the database. # @@ -1332,7 +1354,7 @@ async def _background_populate_room_depth_min_depth2( 32-bit integer field. """ - def process(txn: Cursor) -> int: + def process(txn: LoggingTransaction) -> int: last_room = progress.get("last_room", "") txn.execute( """ @@ -1389,15 +1411,17 @@ def process(txn: Cursor) -> None: return 0 async def _background_populate_rooms_creator_column( - self, progress: dict, batch_size: int - ): + self, progress: JsonDict, batch_size: int + ) -> int: """Background update to go and add creator information to `rooms` table from `current_state_events` table. """ last_room_id = progress.get("room_id", "") - def _background_populate_rooms_creator_column_txn(txn: LoggingTransaction): + def _background_populate_rooms_creator_column_txn( + txn: LoggingTransaction, + ) -> bool: sql = """ SELECT room_id, json FROM event_json INNER JOIN rooms AS room USING (room_id) @@ -1448,7 +1472,7 @@ def _background_populate_rooms_creator_column_txn(txn: LoggingTransaction): return batch_size -class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): +class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore): def __init__( self, database: DatabasePool, @@ -1457,11 +1481,11 @@ def __init__( ): super().__init__(database, db_conn, hs) - self.config = hs.config + self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id") async def upsert_room_on_join( self, room_id: str, room_version: RoomVersion, auth_events: List[EventBase] - ): + ) -> None: """Ensure that the room is stored in the table Called when we join a room over federation, and overwrites any room version @@ -1507,7 +1531,7 @@ async def upsert_room_on_join( async def maybe_store_room_on_outlier_membership( self, room_id: str, room_version: RoomVersion - ): + ) -> None: """ When we receive an invite or any other event over federation that may relate to a room we are not in, store the version of the room if we don't already know the room version. @@ -1547,8 +1571,8 @@ async def set_room_is_public(self, room_id: str, is_public: bool) -> None: self.hs.get_notifier().on_new_replication_data() async def set_room_is_public_appservice( - self, room_id, appservice_id, network_id, is_public - ): + self, room_id: str, appservice_id: str, network_id: str, is_public: bool + ) -> None: """Edit the appservice/network specific public room list. Each appservice can have a number of published room lists associated @@ -1557,11 +1581,10 @@ async def set_room_is_public_appservice( network. Args: - room_id (str) - appservice_id (str) - network_id (str) - is_public (bool): Whether to publish or unpublish the room from the - list. + room_id + appservice_id + network_id + is_public: Whether to publish or unpublish the room from the list. """ if is_public: @@ -1626,7 +1649,9 @@ async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: event_report: json list of information from event report """ - def _get_event_report_txn(txn, report_id): + def _get_event_report_txn( + txn: LoggingTransaction, report_id: int + ) -> Optional[Dict[str, Any]]: sql = """ SELECT @@ -1698,9 +1723,11 @@ async def get_event_reports_paginate( count: total number of event reports matching the filter criteria """ - def _get_event_reports_paginate_txn(txn): + def _get_event_reports_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], int]: filters = [] - args = [] + args: List[object] = [] if user_id: filters.append("er.user_id LIKE ?") @@ -1724,7 +1751,7 @@ def _get_event_reports_paginate_txn(txn): where_clause ) txn.execute(sql, args) - count = txn.fetchone()[0] + count = cast(Tuple[int], txn.fetchone())[0] sql = """ SELECT From 43f5cc7adc02a05ba4075b8aab3b479bda67f441 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 16 Dec 2021 11:25:37 -0600 Subject: [PATCH 005/474] Add MSC2716 and MSC3030 to `/versions` -> `unstable_features` (#11582) As suggested in https://github.com/matrix-org/matrix-react-sdk/pull/7372#discussion_r769523369 --- changelog.d/11582.misc | 1 + synapse/config/experimental.py | 2 +- synapse/rest/client/versions.py | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11582.misc diff --git a/changelog.d/11582.misc b/changelog.d/11582.misc new file mode 100644 index 000000000000..a0291f64e271 --- /dev/null +++ b/changelog.d/11582.misc @@ -0,0 +1 @@ +Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 678c78d56512..dbaeb1091861 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -32,7 +32,7 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3026 (busy presence state) self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) - # MSC2716 (backfill existing history) + # MSC2716 (importing historical messages) self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False) # MSC2285 (hidden read receipts) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 8d888f456520..2290c57c126e 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -93,6 +93,10 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled, # Supports receiving hidden read receipts as per MSC2285 "org.matrix.msc2285": self.config.experimental.msc2285_enabled, + # Adds support for importing historical messages as per MSC2716 + "org.matrix.msc2716": self.config.experimental.msc2716_enabled, + # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 + "org.matrix.msc3030": self.config.experimental.msc3030_enabled, }, }, ) From 1847d027e67e41f4cba132b854fb865055998d16 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 16 Dec 2021 20:59:35 +0100 Subject: [PATCH 006/474] Add type hints to `synapse/storage/databases/main/transactions.py` (#11589) --- changelog.d/11589.misc | 1 + mypy.ini | 4 +- .../storage/databases/main/transactions.py | 49 ++++++++++--------- 3 files changed, 29 insertions(+), 25 deletions(-) create mode 100644 changelog.d/11589.misc diff --git a/changelog.d/11589.misc b/changelog.d/11589.misc new file mode 100644 index 000000000000..8e405b922674 --- /dev/null +++ b/changelog.d/11589.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index cbe1e8302cf4..c546487bdb4f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -41,7 +41,6 @@ exclude = (?x) |synapse/storage/databases/main/search.py |synapse/storage/databases/main/state.py |synapse/storage/databases/main/stats.py - |synapse/storage/databases/main/transactions.py |synapse/storage/databases/main/user_directory.py |synapse/storage/schema/ @@ -216,6 +215,9 @@ disallow_untyped_defs = True [mypy-synapse.storage.databases.main.state_deltas] disallow_untyped_defs = True +[mypy-synapse.storage.databases.main.transactions] +disallow_untyped_defs = True + [mypy-synapse.storage.databases.main.user_erasure_store] disallow_untyped_defs = True diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 54b41513ee13..6c299cafa511 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -13,9 +13,8 @@ # limitations under the License. import logging -from collections import namedtuple from enum import Enum -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, cast import attr from canonicaljson import encode_canonical_json @@ -39,16 +38,6 @@ logger = logging.getLogger(__name__) -_TransactionRow = namedtuple( - "_TransactionRow", - ("id", "transaction_id", "destination", "ts", "response_code", "response_json"), -) - -_UpdateTransactionRow = namedtuple( - "_TransactionRow", ("response_code", "response_json") -) - - class DestinationSortOrder(Enum): """Enum to define the sorting method used when returning destinations.""" @@ -91,7 +80,7 @@ async def _cleanup_transactions(self) -> None: now = self._clock.time_msec() month_ago = now - 30 * 24 * 60 * 60 * 1000 - def _cleanup_transactions_txn(txn): + def _cleanup_transactions_txn(txn: LoggingTransaction) -> None: txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) await self.db_pool.runInteraction( @@ -121,7 +110,9 @@ async def get_received_txn_response( origin, ) - def _get_received_txn_response(self, txn, transaction_id, origin): + def _get_received_txn_response( + self, txn: LoggingTransaction, transaction_id: str, origin: str + ) -> Optional[Tuple[int, JsonDict]]: result = self.db_pool.simple_select_one_txn( txn, table="received_transactions", @@ -196,7 +187,7 @@ async def get_destination_retry_timings( return result def _get_destination_retry_timings( - self, txn, destination: str + self, txn: LoggingTransaction, destination: str ) -> Optional[DestinationRetryTimings]: result = self.db_pool.simple_select_one_txn( txn, @@ -231,7 +222,7 @@ async def set_destination_retry_timings( """ if self.database_engine.can_native_upsert: - return await self.db_pool.runInteraction( + await self.db_pool.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings_native, destination, @@ -241,7 +232,7 @@ async def set_destination_retry_timings( db_autocommit=True, # Safe as its a single upsert ) else: - return await self.db_pool.runInteraction( + await self.db_pool.runInteraction( "set_destination_retry_timings", self._set_destination_retry_timings_emulated, destination, @@ -251,8 +242,13 @@ async def set_destination_retry_timings( ) def _set_destination_retry_timings_native( - self, txn, destination, failure_ts, retry_last_ts, retry_interval - ): + self, + txn: LoggingTransaction, + destination: str, + failure_ts: Optional[int], + retry_last_ts: int, + retry_interval: int, + ) -> None: assert self.database_engine.can_native_upsert # Upsert retry time interval if retry_interval is zero (i.e. we're @@ -282,8 +278,13 @@ def _set_destination_retry_timings_native( ) def _set_destination_retry_timings_emulated( - self, txn, destination, failure_ts, retry_last_ts, retry_interval - ): + self, + txn: LoggingTransaction, + destination: str, + failure_ts: Optional[int], + retry_last_ts: int, + retry_interval: int, + ) -> None: self.database_engine.lock_table(txn, "destinations") # We need to be careful here as the data may have changed from under us @@ -393,7 +394,7 @@ async def set_destination_last_successful_stream_ordering( last_successful_stream_ordering: the stream_ordering of the most recent successfully-sent PDU """ - return await self.db_pool.simple_upsert( + await self.db_pool.simple_upsert( "destinations", keyvalues={"destination": destination}, values={"last_successful_stream_ordering": last_successful_stream_ordering}, @@ -534,7 +535,7 @@ def get_destinations_paginate_txn( else: order = "ASC" - args = [] + args: List[object] = [] where_statement = "" if destination: args.extend(["%" + destination.lower() + "%"]) @@ -543,7 +544,7 @@ def get_destinations_paginate_txn( sql_base = f"FROM destinations {where_statement} " sql = f"SELECT COUNT(*) as total_destinations {sql_base}" txn.execute(sql, args) - count = txn.fetchone()[0] + count = cast(Tuple[int], txn.fetchone())[0] sql = f""" SELECT destination, retry_last_ts, retry_interval, failure_ts, From 8428ef66c73efcc01cdbe05ac8bc1c99c4c20a20 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 16 Dec 2021 20:59:56 +0100 Subject: [PATCH 007/474] Add type hints to `synapse/tests/rest/admin` (#11590) --- changelog.d/11590.misc | 1 + mypy.ini | 3 + tests/rest/admin/test_background_updates.py | 3 +- tests/rest/admin/test_federation.py | 33 ++++++----- tests/rest/admin/test_media.py | 4 +- tests/rest/admin/test_registration_tokens.py | 25 +++++--- tests/rest/admin/test_room.py | 62 ++++++++++---------- 7 files changed, 74 insertions(+), 57 deletions(-) create mode 100644 changelog.d/11590.misc diff --git a/changelog.d/11590.misc b/changelog.d/11590.misc new file mode 100644 index 000000000000..40e01194df19 --- /dev/null +++ b/changelog.d/11590.misc @@ -0,0 +1 @@ +Add type hints to `synapse/tests/rest/admin`. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index c546487bdb4f..c2cb71e0c036 100644 --- a/mypy.ini +++ b/mypy.ini @@ -242,6 +242,9 @@ disallow_untyped_defs = True [mypy-tests.storage.test_user_directory] disallow_untyped_defs = True +[mypy-tests.rest.admin.*] +disallow_untyped_defs = True + [mypy-tests.rest.client.test_directory] disallow_untyped_defs = True diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 4d152c0d66c2..1e3fe9c62c00 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -23,6 +23,7 @@ from synapse.rest.client import login from synapse.server import HomeServer from synapse.storage.background_updates import BackgroundUpdater +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -96,7 +97,7 @@ def test_invalid_parameter(self) -> None: def _register_bg_update(self) -> None: "Adds a bg update but doesn't start it" - async def _fake_update(progress, batch_size) -> int: + async def _fake_update(progress: JsonDict, batch_size: int) -> int: await self.clock.sleep(0.2) return batch_size diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index d1cd5b075157..742f19425729 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -16,11 +16,14 @@ from parameterized import parameterized +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.errors import Codes from synapse.rest.client import login from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest @@ -31,7 +34,7 @@ class FederationTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -44,7 +47,7 @@ def prepare(self, reactor, clock, hs: HomeServer): ("/_synapse/admin/v1/federation/destinations/dummy",), ] ) - def test_requester_is_no_admin(self, url: str): + def test_requester_is_no_admin(self, url: str) -> None: """ If the user is not a server admin, an error 403 is returned. """ @@ -62,7 +65,7 @@ def test_requester_is_no_admin(self, url: str): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -117,7 +120,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - def test_limit(self): + def test_limit(self) -> None: """ Testing list of destinations with limit """ @@ -137,7 +140,7 @@ def test_limit(self): self.assertEqual(channel.json_body["next_token"], "5") self._check_fields(channel.json_body["destinations"]) - def test_from(self): + def test_from(self) -> None: """ Testing list of destinations with a defined starting point (from) """ @@ -157,7 +160,7 @@ def test_from(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["destinations"]) - def test_limit_and_from(self): + def test_limit_and_from(self) -> None: """ Testing list of destinations with a defined starting point and limit """ @@ -177,7 +180,7 @@ def test_limit_and_from(self): self.assertEqual(len(channel.json_body["destinations"]), 10) self._check_fields(channel.json_body["destinations"]) - def test_next_token(self): + def test_next_token(self) -> None: """ Testing that `next_token` appears at the right place """ @@ -238,7 +241,7 @@ def test_next_token(self): self.assertEqual(len(channel.json_body["destinations"]), 1) self.assertNotIn("next_token", channel.json_body) - def test_list_all_destinations(self): + def test_list_all_destinations(self) -> None: """ List all destinations. """ @@ -259,7 +262,7 @@ def test_list_all_destinations(self): # Check that all fields are available self._check_fields(channel.json_body["destinations"]) - def test_order_by(self): + def test_order_by(self) -> None: """ Testing order list with parameter `order_by` """ @@ -268,7 +271,7 @@ def _order_test( expected_destination_list: List[str], order_by: Optional[str], dir: Optional[str] = None, - ): + ) -> None: """Request the list of destinations in a certain order. Assert that order is what we expect @@ -358,13 +361,13 @@ def _order_test( [dest[0][0], dest[2][0], dest[1][0]], "last_successful_stream_ordering", "b" ) - def test_search_term(self): + def test_search_term(self) -> None: """Test that searching for a destination works correctly""" def _search_test( expected_destination: Optional[str], search_term: str, - ): + ) -> None: """Search for a destination and check that the returned destinationis a match Args: @@ -410,7 +413,7 @@ def _search_test( _search_test(None, "foo") _search_test(None, "bar") - def test_get_single_destination(self): + def test_get_single_destination(self) -> None: """ Get one specific destinations. """ @@ -429,7 +432,7 @@ def test_get_single_destination(self): # convert channel.json_body into a List self._check_fields([channel.json_body]) - def _create_destinations(self, number_destinations: int): + def _create_destinations(self, number_destinations: int) -> None: """Create a number of destinations Args: @@ -442,7 +445,7 @@ def _create_destinations(self, number_destinations: int): self.store.set_destination_last_successful_stream_ordering(dest, 100) ) - def _check_fields(self, content: List[JsonDict]): + def _check_fields(self, content: List[JsonDict]) -> None: """Checks that the expected destination attributes are present in content Args: diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 3f727788cec8..86aff7575c9a 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -580,7 +580,9 @@ def _create_media(self) -> str: return server_and_media_id - def _access_media(self, server_and_media_id, expect_success=True) -> None: + def _access_media( + self, server_and_media_id: str, expect_success: bool = True + ) -> None: """ Try to access a media and check the result """ diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index 350a62dda672..81f3ac7f0448 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -14,6 +14,7 @@ import random import string from http import HTTPStatus +from typing import Optional from twisted.test.proto_helpers import MemoryReactor @@ -42,21 +43,27 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.url = "/_synapse/admin/v1/registration_tokens" - def _new_token(self, **kwargs) -> str: + def _new_token( + self, + token: Optional[str] = None, + uses_allowed: Optional[int] = None, + pending: int = 0, + completed: int = 0, + expiry_time: Optional[int] = None, + ) -> str: """Helper function to create a token.""" - token = kwargs.get( - "token", - "".join(random.choices(string.ascii_letters, k=8)), - ) + if token is None: + token = "".join(random.choices(string.ascii_letters, k=8)) + self.get_success( self.store.db_pool.simple_insert( "registration_tokens", { "token": token, - "uses_allowed": kwargs.get("uses_allowed", None), - "pending": kwargs.get("pending", 0), - "completed": kwargs.get("completed", 0), - "expiry_time": kwargs.get("expiry_time", None), + "uses_allowed": uses_allowed, + "pending": pending, + "completed": completed, + "expiry_time": expiry_time, }, ) ) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 22f9aa62346a..d2c8781cd4b9 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -66,7 +66,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) self.url = "/_synapse/admin/v1/rooms/%s" % self.room_id - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. """ @@ -81,7 +81,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_room_does_not_exist(self): + def test_room_does_not_exist(self) -> None: """ Check that unknown rooms/server return 200 """ @@ -96,7 +96,7 @@ def test_room_does_not_exist(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) - def test_room_is_not_valid(self): + def test_room_is_not_valid(self) -> None: """ Check that invalid room names, return an error HTTPStatus.BAD_REQUEST. """ @@ -115,7 +115,7 @@ def test_room_is_not_valid(self): channel.json_body["error"], ) - def test_new_room_user_does_not_exist(self): + def test_new_room_user_does_not_exist(self) -> None: """ Tests that the user ID must be from local server but it does not have to exist. """ @@ -133,7 +133,7 @@ def test_new_room_user_does_not_exist(self): self.assertIn("failed_to_kick_users", channel.json_body) self.assertIn("local_aliases", channel.json_body) - def test_new_room_user_is_not_local(self): + def test_new_room_user_is_not_local(self) -> None: """ Check that only local users can create new room to move members. """ @@ -151,7 +151,7 @@ def test_new_room_user_is_not_local(self): channel.json_body["error"], ) - def test_block_is_not_bool(self): + def test_block_is_not_bool(self) -> None: """ If parameter `block` is not boolean, return an error """ @@ -166,7 +166,7 @@ def test_block_is_not_bool(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) - def test_purge_is_not_bool(self): + def test_purge_is_not_bool(self) -> None: """ If parameter `purge` is not boolean, return an error """ @@ -181,7 +181,7 @@ def test_purge_is_not_bool(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) - def test_purge_room_and_block(self): + def test_purge_room_and_block(self) -> None: """Test to purge a room and block it. Members will not be moved to a new room and will not receive a message. """ @@ -212,7 +212,7 @@ def test_purge_room_and_block(self): self._is_blocked(self.room_id, expect=True) self._has_no_members(self.room_id) - def test_purge_room_and_not_block(self): + def test_purge_room_and_not_block(self) -> None: """Test to purge a room and do not block it. Members will not be moved to a new room and will not receive a message. """ @@ -243,7 +243,7 @@ def test_purge_room_and_not_block(self): self._is_blocked(self.room_id, expect=False) self._has_no_members(self.room_id) - def test_block_room_and_not_purge(self): + def test_block_room_and_not_purge(self) -> None: """Test to block a room without purging it. Members will not be moved to a new room and will not receive a message. The room will not be purged. @@ -299,7 +299,7 @@ def test_block_unknown_room(self, purge: bool) -> None: self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self._is_blocked(room_id) - def test_shutdown_room_consent(self): + def test_shutdown_room_consent(self) -> None: """Test that we can shutdown rooms with local users who have not yet accepted the privacy policy. This used to fail when we tried to force part the user from the old room. @@ -351,7 +351,7 @@ def test_shutdown_room_consent(self): self._is_purged(self.room_id) self._has_no_members(self.room_id) - def test_shutdown_room_block_peek(self): + def test_shutdown_room_block_peek(self) -> None: """Test that a world_readable room can no longer be peeked into after it has been shut down. Members will be moved to a new room and will receive a message. @@ -400,7 +400,7 @@ def test_shutdown_room_block_peek(self): # Assert we can no longer peek into the room self._assert_peek(self.room_id, expect_code=HTTPStatus.FORBIDDEN) - def _is_blocked(self, room_id, expect=True): + def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" d = self.store.is_room_blocked(room_id) if expect: @@ -408,17 +408,17 @@ def _is_blocked(self, room_id, expect=True): else: self.assertIsNone(self.get_success(d)) - def _has_no_members(self, room_id): + def _has_no_members(self, room_id: str) -> None: """Assert there is now no longer anyone in the room""" users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertEqual([], users_in_room) - def _is_member(self, room_id, user_id): + def _is_member(self, room_id: str, user_id: str) -> None: """Test that user is member of the room""" users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertIn(user_id, users_in_room) - def _is_purged(self, room_id): + def _is_purged(self, room_id: str) -> None: """Test that the following tables have been purged of all rows related to the room.""" for table in PURGE_TABLES: count = self.get_success( @@ -432,7 +432,7 @@ def _is_purged(self, room_id): self.assertEqual(count, 0, msg=f"Rows not purged in {table}") - def _assert_peek(self, room_id, expect_code): + def _assert_peek(self, room_id: str, expect_code: int) -> None: """Assert that the admin user can (or cannot) peek into the room.""" url = "rooms/%s/initialSync" % (room_id,) @@ -492,7 +492,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ("GET", "/_synapse/admin/v2/rooms/delete_status/%s"), ] ) - def test_requester_is_no_admin(self, method: str, url: str): + def test_requester_is_no_admin(self, method: str, url: str) -> None: """ If the user is not a server admin, an error HTTPStatus.FORBIDDEN is returned. """ @@ -507,7 +507,7 @@ def test_requester_is_no_admin(self, method: str, url: str): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_room_does_not_exist(self): + def test_room_does_not_exist(self) -> None: """ Check that unknown rooms/server return 200 @@ -544,7 +544,7 @@ def test_room_does_not_exist(self): ("GET", "/_synapse/admin/v2/rooms/%s/delete_status"), ] ) - def test_room_is_not_valid(self, method: str, url: str): + def test_room_is_not_valid(self, method: str, url: str) -> None: """ Check that invalid room names, return an error HTTPStatus.BAD_REQUEST. """ @@ -562,7 +562,7 @@ def test_room_is_not_valid(self, method: str, url: str): channel.json_body["error"], ) - def test_new_room_user_does_not_exist(self): + def test_new_room_user_does_not_exist(self) -> None: """ Tests that the user ID must be from local server but it does not have to exist. """ @@ -580,7 +580,7 @@ def test_new_room_user_does_not_exist(self): self._test_result(delete_id, self.other_user, expect_new_room=True) - def test_new_room_user_is_not_local(self): + def test_new_room_user_is_not_local(self) -> None: """ Check that only local users can create new room to move members. """ @@ -598,7 +598,7 @@ def test_new_room_user_is_not_local(self): channel.json_body["error"], ) - def test_block_is_not_bool(self): + def test_block_is_not_bool(self) -> None: """ If parameter `block` is not boolean, return an error """ @@ -613,7 +613,7 @@ def test_block_is_not_bool(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) - def test_purge_is_not_bool(self): + def test_purge_is_not_bool(self) -> None: """ If parameter `purge` is not boolean, return an error """ @@ -628,7 +628,7 @@ def test_purge_is_not_bool(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) - def test_delete_expired_status(self): + def test_delete_expired_status(self) -> None: """Test that the task status is removed after expiration.""" # first task, do not purge, that we can create a second task @@ -699,7 +699,7 @@ def test_delete_expired_status(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - def test_delete_same_room_twice(self): + def test_delete_same_room_twice(self) -> None: """Test that the call for delete a room at second time gives an exception.""" body = {"new_room_user_id": self.admin_user} @@ -743,7 +743,7 @@ def test_delete_same_room_twice(self): expect_new_room=True, ) - def test_purge_room_and_block(self): + def test_purge_room_and_block(self) -> None: """Test to purge a room and block it. Members will not be moved to a new room and will not receive a message. """ @@ -774,7 +774,7 @@ def test_purge_room_and_block(self): self._is_blocked(self.room_id, expect=True) self._has_no_members(self.room_id) - def test_purge_room_and_not_block(self): + def test_purge_room_and_not_block(self) -> None: """Test to purge a room and do not block it. Members will not be moved to a new room and will not receive a message. """ @@ -805,7 +805,7 @@ def test_purge_room_and_not_block(self): self._is_blocked(self.room_id, expect=False) self._has_no_members(self.room_id) - def test_block_room_and_not_purge(self): + def test_block_room_and_not_purge(self) -> None: """Test to block a room without purging it. Members will not be moved to a new room and will not receive a message. The room will not be purged. @@ -838,7 +838,7 @@ def test_block_room_and_not_purge(self): self._is_blocked(self.room_id, expect=True) self._has_no_members(self.room_id) - def test_shutdown_room_consent(self): + def test_shutdown_room_consent(self) -> None: """Test that we can shutdown rooms with local users who have not yet accepted the privacy policy. This used to fail when we tried to force part the user from the old room. @@ -899,7 +899,7 @@ def test_shutdown_room_consent(self): self._is_purged(self.room_id) self._has_no_members(self.room_id) - def test_shutdown_room_block_peek(self): + def test_shutdown_room_block_peek(self) -> None: """Test that a world_readable room can no longer be peeked into after it has been shut down. Members will be moved to a new room and will receive a message. From 8ad39438fafef653d019e3214037c96257507e55 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 20 Dec 2021 04:18:09 -0800 Subject: [PATCH 008/474] Add opentracing types (#11603) --- changelog.d/11603.misc | 1 + mypy.ini | 3 --- setup.py | 1 + synapse/logging/opentracing.py | 24 +++++++++++++++--------- synapse/logging/scopecontextmanager.py | 2 +- 5 files changed, 18 insertions(+), 13 deletions(-) create mode 100644 changelog.d/11603.misc diff --git a/changelog.d/11603.misc b/changelog.d/11603.misc new file mode 100644 index 000000000000..def24afb8d23 --- /dev/null +++ b/changelog.d/11603.misc @@ -0,0 +1 @@ +Add opentracing type stubs and fix associated mypy errors. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index c2cb71e0c036..3279c9bb2167 100644 --- a/mypy.ini +++ b/mypy.ini @@ -308,9 +308,6 @@ ignore_missing_imports = True [mypy-netaddr] ignore_missing_imports = True -[mypy-opentracing] -ignore_missing_imports = True - [mypy-parameterized.*] ignore_missing_imports = True diff --git a/setup.py b/setup.py index 2c6fb9aacb45..812459074ae1 100755 --- a/setup.py +++ b/setup.py @@ -107,6 +107,7 @@ def exec_file(path_segments): "mypy-zope==0.3.2", "types-bleach>=4.1.0", "types-jsonschema>=3.2.0", + "types-opentracing>=2.4.2", "types-Pillow>=8.3.4", "types-pyOpenSSL>=20.0.7", "types-PyYAML>=5.4.10", diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 20d23a426064..5d93ab07f181 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -222,8 +222,8 @@ class _DummyTagNames: tags = opentracing.tags except ImportError: - opentracing = None - tags = _DummyTagNames + opentracing = None # type: ignore[assignment] + tags = _DummyTagNames # type: ignore[assignment] try: from jaeger_client import Config as JaegerConfig @@ -366,7 +366,7 @@ def init_tracer(hs: "HomeServer"): global opentracing if not hs.config.tracing.opentracer_enabled: # We don't have a tracer - opentracing = None + opentracing = None # type: ignore[assignment] return if not opentracing or not JaegerConfig: @@ -452,7 +452,7 @@ def start_active_span( """ if opentracing is None: - return noop_context_manager() + return noop_context_manager() # type: ignore[unreachable] return opentracing.tracer.start_active_span( operation_name, @@ -477,7 +477,7 @@ def start_active_span_follows_from( forced, the new span will also have tracing forced. """ if opentracing is None: - return noop_context_manager() + return noop_context_manager() # type: ignore[unreachable] references = [opentracing.follows_from(context) for context in contexts] scope = start_active_span(operation_name, references=references) @@ -514,7 +514,7 @@ def start_active_span_from_request( # Also, twisted uses byte arrays while opentracing expects strings. if opentracing is None: - return noop_context_manager() + return noop_context_manager() # type: ignore[unreachable] header_dict = { k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() @@ -553,7 +553,7 @@ def start_active_span_from_edu( references = references or [] if opentracing is None: - return noop_context_manager() + return noop_context_manager() # type: ignore[unreachable] carrier = json_decoder.decode(edu_content.get("context", "{}")).get( "opentracing", {} @@ -594,18 +594,21 @@ def active_span(): @ensure_active_span("set a tag") def set_tag(key, value): """Sets a tag on the active span""" + assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.set_tag(key, value) @ensure_active_span("log") def log_kv(key_values, timestamp=None): """Log to the active span""" + assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.log_kv(key_values, timestamp) @ensure_active_span("set the traces operation name") def set_operation_name(operation_name): """Sets the operation name of the active span""" + assert opentracing.tracer.active_span is not None opentracing.tracer.active_span.set_operation_name(operation_name) @@ -674,6 +677,7 @@ def inject_header_dict( span = opentracing.tracer.active_span carrier: Dict[str, str] = {} + assert span is not None opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): @@ -716,6 +720,7 @@ def get_active_span_text_map(destination=None): return {} carrier: Dict[str, str] = {} + assert opentracing.tracer.active_span is not None opentracing.tracer.inject( opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) @@ -731,6 +736,7 @@ def active_span_context_as_string(): """ carrier: Dict[str, str] = {} if opentracing: + assert opentracing.tracer.active_span is not None opentracing.tracer.inject( opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) @@ -773,7 +779,7 @@ def trace(func=None, opname=None): def decorator(func): if opentracing is None: - return func + return func # type: ignore[unreachable] _opname = opname if opname else func.__name__ @@ -864,7 +870,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): """ if opentracing is None: - yield + yield # type: ignore[unreachable] return request_tags = { diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index b1e8e08fe96f..db8ca2c0497b 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -71,7 +71,7 @@ def activate(self, span, finish_on_close): if not ctx: # We don't want this scope to affect. logger.error("Tried to activate scope outside of loggingcontext") - return Scope(None, span) + return Scope(None, span) # type: ignore[arg-type] elif ctx.scope is not None: # We want the logging scope to look exactly the same so we give it # a blank suffix From 7a7ca8f2263f8750b8ff2c18b9aefaf4a3626235 Mon Sep 17 00:00:00 2001 From: V02460 Date: Mon, 20 Dec 2021 16:34:46 +0100 Subject: [PATCH 009/474] Use mock from standard library (#11588) Instead of the backported version. --- changelog.d/11588.removal | 1 + setup.py | 4 +--- tests/storage/test_background_update.py | 17 ++++++++--------- 3 files changed, 10 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11588.removal diff --git a/changelog.d/11588.removal b/changelog.d/11588.removal new file mode 100644 index 000000000000..f781021e11c7 --- /dev/null +++ b/changelog.d/11588.removal @@ -0,0 +1 @@ +Replace `mock` package by its standard library version. diff --git a/setup.py b/setup.py index 812459074ae1..e113da6782d6 100755 --- a/setup.py +++ b/setup.py @@ -120,9 +120,7 @@ def exec_file(path_segments): # Tests assume that all optional dependencies are installed. # # parameterized_class decorator was introduced in parameterized 0.7.0 -# -# We use `mock` library as that backports `AsyncMock` to Python 3.6 -CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0", "mock>=4.0.0"] +CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"] CONDITIONAL_REQUIREMENTS["dev"] = ( CONDITIONAL_REQUIREMENTS["lint"] diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index d77c001506c6..6156dfac4e58 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Use backported mock for AsyncMock support on Python 3.6. -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import Deferred, ensureDeferred from synapse.storage.background_updates import BackgroundUpdater from tests import unittest -from tests.test_utils import make_awaitable +from tests.test_utils import make_awaitable, simple_async_mock class BackgroundUpdateTestCase(unittest.HomeserverTestCase): @@ -116,14 +115,14 @@ def prepare(self, reactor, clock, homeserver): ) # Mock out the AsyncContextManager - self._update_ctx_manager = Mock(spec=["__aenter__", "__aexit__"]) - self._update_ctx_manager.__aenter__ = Mock( - return_value=make_awaitable(None), - ) - self._update_ctx_manager.__aexit__ = Mock(return_value=make_awaitable(None)) + class MockCM: + __aenter__ = simple_async_mock(return_value=None) + __aexit__ = simple_async_mock(return_value=None) + + self._update_ctx_manager = MockCM # Mock out the `update_handler` callback - self._on_update = Mock(return_value=self._update_ctx_manager) + self._on_update = Mock(return_value=self._update_ctx_manager()) # Define a default batch size value that's not the same as the internal default # value (100). From 3e0cfd447e17658a937fe62555db9e968f00b15b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 20 Dec 2021 11:00:13 -0500 Subject: [PATCH 010/474] Return JSON errors for unknown resources under /matrix/client. (#11602) Instead of returning 404 errors with HTML bodies when an unknown prefix was requested (e.g. /matrix/client/v1 before Synapse v1.49.0). --- changelog.d/11602.bugfix | 1 + synapse/app/homeserver.py | 9 ++------- synapse/http/server.py | 6 +++--- 3 files changed, 6 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11602.bugfix diff --git a/changelog.d/11602.bugfix b/changelog.d/11602.bugfix new file mode 100644 index 000000000000..e0dfbf1a1520 --- /dev/null +++ b/changelog.d/11602.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index dd76e0732108..177ce040e857 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -27,6 +27,7 @@ import synapse.config.logger from synapse import events from synapse.api.urls import ( + CLIENT_API_PREFIX, FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_R0_PREFIX, @@ -192,13 +193,7 @@ def _configure_named_resource( resources.update( { - "/_matrix/client/api/v1": client_resource, - "/_matrix/client/r0": client_resource, - "/_matrix/client/v1": client_resource, - "/_matrix/client/v3": client_resource, - "/_matrix/client/unstable": client_resource, - "/_matrix/client/v2_alpha": client_resource, - "/_matrix/client/versions": client_resource, + CLIENT_API_PREFIX: client_resource, "/.well-known": well_known_resource(self), "/_synapse/admin": AdminRestResource(self), **build_synapse_client_resource_tree(self), diff --git a/synapse/http/server.py b/synapse/http/server.py index 4fd5660a0819..7bbbe7648b8f 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -530,7 +530,7 @@ class RootRedirect(resource.Resource): """Redirects the root '/' path to another path.""" def __init__(self, path: str): - resource.Resource.__init__(self) + super().__init__() self.url = path def render_GET(self, request: Request) -> bytes: @@ -539,7 +539,7 @@ def render_GET(self, request: Request) -> bytes: def getChild(self, name: str, request: Request) -> resource.Resource: if len(name) == 0: return self # select ourselves as the child to render - return resource.Resource.getChild(self, name, request) + return super().getChild(name, request) class OptionsResource(resource.Resource): @@ -556,7 +556,7 @@ def render_OPTIONS(self, request: Request) -> bytes: def getChildWithDefault(self, path: str, request: Request) -> resource.Resource: if request.method == b"OPTIONS": return self # select ourselves as the child to render - return resource.Resource.getChildWithDefault(self, path, request) + return super().getChildWithDefault(path, request) class RootOptionsRedirectResource(OptionsResource, RootRedirect): From 76aa5537ad4f41cad130862a230c1f4cc4bfcbcf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 20 Dec 2021 16:33:35 +0000 Subject: [PATCH 011/474] Disable aggregation bundling on `/sync` responses (#11583) * Disable aggregation bundling on `/sync` responses A partial revert of #11478. This turns out to have had a significant CPU impact on initial-sync handling. For now, let's disable it, until we find a more efficient way of achieving this. * Fix tests. Co-authored-by: Patrick Cloke --- changelog.d/11583.bugfix | 1 + synapse/rest/client/sync.py | 10 +++++++++- tests/rest/client/test_relations.py | 10 +++++----- 3 files changed, 15 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11583.bugfix diff --git a/changelog.d/11583.bugfix b/changelog.d/11583.bugfix new file mode 100644 index 000000000000..d2ed113e2128 --- /dev/null +++ b/changelog.d/11583.bugfix @@ -0,0 +1 @@ +Fix a performance regression in `/sync` handling, introduced in 1.49.0. diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 88e4f5e0630f..7f5846d38934 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -522,7 +522,15 @@ def serialize(events: Iterable[EventBase]) -> Awaitable[List[JsonDict]]: time_now=time_now, # Don't bother to bundle aggregations if the timeline is unlimited, # as clients will have all the necessary information. - bundle_aggregations=room.timeline.limited, + # bundle_aggregations=room.timeline.limited, + # + # richvdh 2021-12-15: disable this temporarily as it has too high an + # overhead for initialsyncs. We need to figure out a way that the + # bundling can be done *before* the events are stored in the + # SyncResponseCache so that this part can be synchronous. + # + # Ensure to re-enable the test at tests/rest/client/test_relations.py::RelationsTestCase.test_bundled_aggregations. + bundle_aggregations=False, token_id=token_id, event_format=event_formatter, only_event_fields=only_fields, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 397c12c2a6c5..1b58b73136c5 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -574,11 +574,11 @@ def _find_and_assert_event(events): assert_bundle(channel.json_body["event"]["unsigned"].get("m.relations")) # Request sync. - channel = self.make_request("GET", "/sync", access_token=self.user_token) - self.assertEquals(200, channel.code, channel.json_body) - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - self.assertTrue(room_timeline["limited"]) - _find_and_assert_event(room_timeline["events"]) + # channel = self.make_request("GET", "/sync", access_token=self.user_token) + # self.assertEquals(200, channel.code, channel.json_body) + # room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + # self.assertTrue(room_timeline["limited"]) + # _find_and_assert_event(room_timeline["events"]) # Note that /relations is tested separately in test_aggregation_get_event_for_thread # since it needs different data configured. From 60fa4935b5d3ee26f9ebb4b25ec74bed26d3c98d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 20 Dec 2021 17:45:03 +0000 Subject: [PATCH 012/474] Improve opentracing for incoming HTTP requests (#11618) * remove `start_active_span_from_request` Instead, pull out a separate function, `span_context_from_request`, to extract the parent span, which we can then pass into `start_active_span` as normal. This seems to be clearer all round. * Remove redundant tags from `incoming-federation-request` These are all wrapped up inside a parent span generated in AsyncResource, so there's no point duplicating all the tags that are set there. * Leave request spans open until the request completes It may take some time for the response to be encoded into JSON, and that JSON to be streamed back to the client, and really we want that inside the top-level span, so let's hand responsibility for closure to the SynapseRequest. * opentracing logs for HTTP request events * changelog --- changelog.d/11618.misc | 1 + synapse/federation/transport/server/_base.py | 39 ++++------- synapse/http/site.py | 30 ++++++++- synapse/logging/opentracing.py | 68 +++++++------------- 4 files changed, 65 insertions(+), 73 deletions(-) create mode 100644 changelog.d/11618.misc diff --git a/changelog.d/11618.misc b/changelog.d/11618.misc new file mode 100644 index 000000000000..4076b30bf7c1 --- /dev/null +++ b/changelog.d/11618.misc @@ -0,0 +1 @@ +Improve opentracing support for incoming HTTP requests. diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index dc39e3537bf6..da1fbf8b6361 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -22,13 +22,11 @@ from synapse.http.server import HttpServer, ServletCallback from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.logging import opentracing from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( - SynapseTags, - start_active_span, - start_active_span_from_request, - tags, + set_tag, + span_context_from_request, + start_active_span_follows_from, whitelisted_homeserver, ) from synapse.server import HomeServer @@ -279,30 +277,19 @@ async def new_func( logger.warning("authenticate_request failed: %s", e) raise - request_tags = { - SynapseTags.REQUEST_ID: request.get_request_id(), - tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, - tags.HTTP_METHOD: request.get_method(), - tags.HTTP_URL: request.get_redacted_uri(), - tags.PEER_HOST_IPV6: request.getClientIP(), - "authenticated_entity": origin, - "servlet_name": request.request_metrics.name, - } - - # Only accept the span context if the origin is authenticated - # and whitelisted + # update the active opentracing span with the authenticated entity + set_tag("authenticated_entity", origin) + + # if the origin is authenticated and whitelisted, link to its span context + context = None if origin and whitelisted_homeserver(origin): - scope = start_active_span_from_request( - request, "incoming-federation-request", tags=request_tags - ) - else: - scope = start_active_span( - "incoming-federation-request", tags=request_tags - ) + context = span_context_from_request(request) - with scope: - opentracing.inject_response_headers(request.responseHeaders) + scope = start_active_span_follows_from( + "incoming-federation-request", contexts=(context,) if context else () + ) + with scope: if origin and self.RATELIMIT: with ratelimiter.ratelimit(origin) as d: await d diff --git a/synapse/http/site.py b/synapse/http/site.py index 9f68d7e19174..80f7a2ff58e5 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -14,7 +14,7 @@ import contextlib import logging import time -from typing import Any, Generator, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union import attr from zope.interface import implementer @@ -35,6 +35,9 @@ ) from synapse.types import Requester +if TYPE_CHECKING: + import opentracing + logger = logging.getLogger(__name__) _next_request_seq = 0 @@ -81,6 +84,10 @@ def __init__( # server name, for client requests this is the Requester object. self._requester: Optional[Union[Requester, str]] = None + # An opentracing span for this request. Will be closed when the request is + # completely processed. + self._opentracing_span: "Optional[opentracing.Span]" = None + # we can't yet create the logcontext, as we don't know the method. self.logcontext: Optional[LoggingContext] = None @@ -148,6 +155,13 @@ def requester(self, value: Union[Requester, str]) -> None: # If there's no authenticated entity, it was the requester. self.logcontext.request.authenticated_entity = authenticated_entity or requester + def set_opentracing_span(self, span: "opentracing.Span") -> None: + """attach an opentracing span to this request + + Doing so will cause the span to be closed when we finish processing the request + """ + self._opentracing_span = span + def get_request_id(self) -> str: return "%s-%i" % (self.get_method(), self.request_seq) @@ -286,6 +300,9 @@ async def handle_request(request): self._processing_finished_time = time.time() self._is_processing = False + if self._opentracing_span: + self._opentracing_span.log_kv({"event": "finished processing"}) + # if we've already sent the response, log it now; otherwise, we wait for the # response to be sent. if self.finish_time is not None: @@ -299,6 +316,8 @@ def finish(self) -> None: """ self.finish_time = time.time() Request.finish(self) + if self._opentracing_span: + self._opentracing_span.log_kv({"event": "response sent"}) if not self._is_processing: assert self.logcontext is not None with PreserveLoggingContext(self.logcontext): @@ -333,6 +352,11 @@ def connectionLost(self, reason: Union[Failure, Exception]) -> None: with PreserveLoggingContext(self.logcontext): logger.info("Connection from client lost before response was sent") + if self._opentracing_span: + self._opentracing_span.log_kv( + {"event": "client connection lost", "reason": str(reason.value)} + ) + if not self._is_processing: self._finished_processing() @@ -421,6 +445,10 @@ def _finished_processing(self) -> None: usage.evt_db_fetch_count, ) + # complete the opentracing span, if any. + if self._opentracing_span: + self._opentracing_span.finish() + try: self.request_metrics.stop(self.finish_time, self.code, self.sentLength) except Exception as e: diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 5d93ab07f181..63642906151e 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -173,6 +173,7 @@ def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): import attr from twisted.internet import defer +from twisted.web.http import Request from twisted.web.http_headers import Headers from synapse.config import ConfigError @@ -490,48 +491,6 @@ def start_active_span_follows_from( return scope -def start_active_span_from_request( - request, - operation_name, - references=None, - tags=None, - start_time=None, - ignore_active_span=False, - finish_on_close=True, -): - """ - Extracts a span context from a Twisted Request. - args: - headers (twisted.web.http.Request) - - For the other args see opentracing.tracer - - returns: - span_context (opentracing.span.SpanContext) - """ - # Twisted encodes the values as lists whereas opentracing doesn't. - # So, we take the first item in the list. - # Also, twisted uses byte arrays while opentracing expects strings. - - if opentracing is None: - return noop_context_manager() # type: ignore[unreachable] - - header_dict = { - k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() - } - context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) - - return opentracing.tracer.start_active_span( - operation_name, - child_of=context, - references=references, - tags=tags, - start_time=start_time, - ignore_active_span=ignore_active_span, - finish_on_close=finish_on_close, - ) - - def start_active_span_from_edu( edu_content, operation_name, @@ -743,6 +702,20 @@ def active_span_context_as_string(): return json_encoder.encode(carrier) +def span_context_from_request(request: Request) -> "Optional[opentracing.SpanContext]": + """Extract an opentracing context from the headers on an HTTP request + + This is useful when we have received an HTTP request from another part of our + system, and want to link our spans to those of the remote system. + """ + if not opentracing: + return None + header_dict = { + k.decode(): v[0].decode() for k, v in request.requestHeaders.getAllRawHeaders() + } + return opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) + + @only_if_tracing def span_context_from_string(carrier): """ @@ -882,10 +855,13 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): } request_name = request.request_metrics.name - if extract_context: - scope = start_active_span_from_request(request, request_name) - else: - scope = start_active_span(request_name) + context = span_context_from_request(request) if extract_context else None + + # we configure the scope not to finish the span immediately on exit, and instead + # pass the span into the SynapseRequest, which will finish it once we've finished + # sending the response to the client. + scope = start_active_span(request_name, child_of=context, finish_on_close=False) + request.set_opentracing_span(scope.span) with scope: inject_response_headers(request.responseHeaders) From c3e38b88f221971fdafbca1283cc628bd439efd3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 20 Dec 2021 18:12:08 +0000 Subject: [PATCH 013/474] Improve opentracing support for `ResponseCache` (#11607) This adds some opentracing annotations to ResponseCache, to make it easier to see what's going on; in particular, it adds a link back to the initial trace which is actually doing the work of generating the response. --- changelog.d/11607.misc | 1 + synapse/handlers/room.py | 2 +- synapse/util/async_helpers.py | 22 +++- synapse/util/caches/response_cache.py | 127 ++++++++++++++++------- tests/util/caches/test_response_cache.py | 45 ++++++-- 5 files changed, 149 insertions(+), 48 deletions(-) create mode 100644 changelog.d/11607.misc diff --git a/changelog.d/11607.misc b/changelog.d/11607.misc new file mode 100644 index 000000000000..e82f46776364 --- /dev/null +++ b/changelog.d/11607.misc @@ -0,0 +1 @@ +Improve opentracing support for requests which use a `ResponseCache`. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ead2198e14fe..b9c1cbffa5c5 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -172,7 +172,7 @@ async def upgrade_room( user_id = requester.user.to_string() # Check if this room is already being upgraded by another person - for key in self._upgrade_response_cache.pending_result_cache: + for key in self._upgrade_response_cache.keys(): if key[0] == old_room_id and key[1] != user_id: # Two different people are trying to upgrade the same room. # Send the second an error. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index bde99ea8787b..150a04b53e17 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import abc import collections import inspect import itertools @@ -57,7 +58,26 @@ _T = TypeVar("_T") -class ObservableDeferred(Generic[_T]): +class AbstractObservableDeferred(Generic[_T], metaclass=abc.ABCMeta): + """Abstract base class defining the consumer interface of ObservableDeferred""" + + __slots__ = () + + @abc.abstractmethod + def observe(self) -> "defer.Deferred[_T]": + """Add a new observer for this ObservableDeferred + + This returns a brand new deferred that is resolved when the underlying + deferred is resolved. Interacting with the returned deferred does not + effect the underlying deferred. + + Note that the returned Deferred doesn't follow the Synapse logcontext rules - + you will probably want to `make_deferred_yieldable` it. + """ + ... + + +class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): """Wraps a deferred object so that we can add observer deferreds. These observer deferreds do not affect the callback chain of the original deferred. diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 88ccf443377c..a3eb5f741bfc 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -12,19 +12,37 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Awaitable, Callable, Dict, Generic, Optional, TypeVar +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Dict, + Generic, + Iterable, + Optional, + TypeVar, +) import attr from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.logging.opentracing import ( + active_span, + start_active_span, + start_active_span_follows_from, +) from synapse.util import Clock -from synapse.util.async_helpers import ObservableDeferred +from synapse.util.async_helpers import AbstractObservableDeferred, ObservableDeferred from synapse.util.caches import register_cache logger = logging.getLogger(__name__) +if TYPE_CHECKING: + import opentracing + # the type of the key in the cache KV = TypeVar("KV") @@ -54,6 +72,20 @@ class ResponseCacheContext(Generic[KV]): """ +@attr.s(auto_attribs=True) +class ResponseCacheEntry: + result: AbstractObservableDeferred + """The (possibly incomplete) result of the operation. + + Note that we continue to store an ObservableDeferred even after the operation + completes (rather than switching to an immediate value), since that makes it + easier to cache Failure results. + """ + + opentracing_span_context: "Optional[opentracing.SpanContext]" + """The opentracing span which generated/is generating the result""" + + class ResponseCache(Generic[KV]): """ This caches a deferred response. Until the deferred completes it will be @@ -63,10 +95,7 @@ class ResponseCache(Generic[KV]): """ def __init__(self, clock: Clock, name: str, timeout_ms: float = 0): - # This is poorly-named: it includes both complete and incomplete results. - # We keep complete results rather than switching to absolute values because - # that makes it easier to cache Failure results. - self.pending_result_cache: Dict[KV, ObservableDeferred] = {} + self._result_cache: Dict[KV, ResponseCacheEntry] = {} self.clock = clock self.timeout_sec = timeout_ms / 1000.0 @@ -75,56 +104,63 @@ def __init__(self, clock: Clock, name: str, timeout_ms: float = 0): self._metrics = register_cache("response_cache", name, self, resizable=False) def size(self) -> int: - return len(self.pending_result_cache) + return len(self._result_cache) def __len__(self) -> int: return self.size() - def get(self, key: KV) -> Optional[defer.Deferred]: - """Look up the given key. + def keys(self) -> Iterable[KV]: + """Get the keys currently in the result cache - Returns a new Deferred (which also doesn't follow the synapse - logcontext rules). You will probably want to make_deferred_yieldable the result. + Returns both incomplete entries, and (if the timeout on this cache is non-zero), + complete entries which are still in the cache. - If there is no entry for the key, returns None. + Note that the returned iterator is not safe in the face of concurrent execution: + behaviour is undefined if `wrap` is called during iteration. + """ + return self._result_cache.keys() + + def _get(self, key: KV) -> Optional[ResponseCacheEntry]: + """Look up the given key. Args: - key: key to get/set in the cache + key: key to get in the cache Returns: - None if there is no entry for this key; otherwise a deferred which - resolves to the result. + The entry for this key, if any; else None. """ - result = self.pending_result_cache.get(key) - if result is not None: + entry = self._result_cache.get(key) + if entry is not None: self._metrics.inc_hits() - return result.observe() + return entry else: self._metrics.inc_misses() return None def _set( - self, context: ResponseCacheContext[KV], deferred: "defer.Deferred[RV]" - ) -> "defer.Deferred[RV]": + self, + context: ResponseCacheContext[KV], + deferred: "defer.Deferred[RV]", + opentracing_span_context: "Optional[opentracing.SpanContext]", + ) -> ResponseCacheEntry: """Set the entry for the given key to the given deferred. *deferred* should run its callbacks in the sentinel logcontext (ie, you should wrap normal synapse deferreds with synapse.logging.context.run_in_background). - Returns a new Deferred (which also doesn't follow the synapse logcontext rules). - You will probably want to make_deferred_yieldable the result. - Args: context: Information about the cache miss deferred: The deferred which resolves to the result. + opentracing_span_context: An opentracing span wrapping the calculation Returns: - A new deferred which resolves to the actual result. + The cache entry object. """ result = ObservableDeferred(deferred, consumeErrors=True) key = context.cache_key - self.pending_result_cache[key] = result + entry = ResponseCacheEntry(result, opentracing_span_context) + self._result_cache[key] = entry def on_complete(r: RV) -> RV: # if this cache has a non-zero timeout, and the callback has not cleared @@ -132,18 +168,18 @@ def on_complete(r: RV) -> RV: # its removal later. if self.timeout_sec and context.should_cache: self.clock.call_later( - self.timeout_sec, self.pending_result_cache.pop, key, None + self.timeout_sec, self._result_cache.pop, key, None ) else: # otherwise, remove the result immediately. - self.pending_result_cache.pop(key, None) + self._result_cache.pop(key, None) return r - # make sure we do this *after* adding the entry to pending_result_cache, + # make sure we do this *after* adding the entry to result_cache, # in case the result is already complete (in which case flipping the order would # leave us with a stuck entry in the cache). result.addBoth(on_complete) - return result.observe() + return entry async def wrap( self, @@ -189,20 +225,41 @@ async def handle_request(request): Returns: The result of the callback (from the cache, or otherwise) """ - result = self.get(key) - if not result: + entry = self._get(key) + if not entry: logger.debug( "[%s]: no cached result for [%s], calculating new one", self._name, key ) context = ResponseCacheContext(cache_key=key) if cache_context: kwargs["cache_context"] = context - d = run_in_background(callback, *args, **kwargs) - result = self._set(context, d) - elif not isinstance(result, defer.Deferred) or result.called: + + span_context: Optional[opentracing.SpanContext] = None + + async def cb() -> RV: + # NB it is important that we do not `await` before setting span_context! + nonlocal span_context + with start_active_span(f"ResponseCache[{self._name}].calculate"): + span = active_span() + if span: + span_context = span.context + return await callback(*args, **kwargs) + + d = run_in_background(cb) + entry = self._set(context, d, span_context) + return await make_deferred_yieldable(entry.result.observe()) + + result = entry.result.observe() + if result.called: logger.info("[%s]: using completed cached result for [%s]", self._name, key) else: logger.info( "[%s]: using incomplete cached result for [%s]", self._name, key ) - return await make_deferred_yieldable(result) + + span_context = entry.opentracing_span_context + with start_active_span_follows_from( + f"ResponseCache[{self._name}].wait", + contexts=(span_context,) if span_context else (), + ): + return await make_deferred_yieldable(result) diff --git a/tests/util/caches/test_response_cache.py b/tests/util/caches/test_response_cache.py index 1e83ef2f33d5..025b73e32f90 100644 --- a/tests/util/caches/test_response_cache.py +++ b/tests/util/caches/test_response_cache.py @@ -11,6 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from unittest.mock import Mock + from parameterized import parameterized from twisted.internet import defer @@ -60,10 +63,15 @@ def test_cache_hit(self): self.successResultOf(wrap_d), "initial wrap result should be the same", ) + + # a second call should return the result without a call to the wrapped function + unexpected = Mock(spec=()) + wrap2_d = defer.ensureDeferred(cache.wrap(0, unexpected)) + unexpected.assert_not_called() self.assertEqual( expected_result, - self.successResultOf(cache.get(0)), - "cache should have the result", + self.successResultOf(wrap2_d), + "cache should still have the result", ) def test_cache_miss(self): @@ -80,7 +88,7 @@ def test_cache_miss(self): self.successResultOf(wrap_d), "initial wrap result should be the same", ) - self.assertIsNone(cache.get(0), "cache should not have the result now") + self.assertCountEqual([], cache.keys(), "cache should not have the result now") def test_cache_expire(self): cache = self.with_cache("short_cache", ms=1000) @@ -92,16 +100,20 @@ def test_cache_expire(self): ) self.assertEqual(expected_result, self.successResultOf(wrap_d)) + + # a second call should return the result without a call to the wrapped function + unexpected = Mock(spec=()) + wrap2_d = defer.ensureDeferred(cache.wrap(0, unexpected)) + unexpected.assert_not_called() self.assertEqual( expected_result, - self.successResultOf(cache.get(0)), + self.successResultOf(wrap2_d), "cache should still have the result", ) # cache eviction timer is handled self.reactor.pump((2,)) - - self.assertIsNone(cache.get(0), "cache should not have the result now") + self.assertCountEqual([], cache.keys(), "cache should not have the result now") def test_cache_wait_hit(self): cache = self.with_cache("neutral_cache") @@ -133,16 +145,21 @@ def test_cache_wait_expire(self): self.reactor.pump((1, 1)) self.assertEqual(expected_result, self.successResultOf(wrap_d)) + + # a second call should immediately return the result without a call to the + # wrapped function + unexpected = Mock(spec=()) + wrap2_d = defer.ensureDeferred(cache.wrap(0, unexpected)) + unexpected.assert_not_called() self.assertEqual( expected_result, - self.successResultOf(cache.get(0)), + self.successResultOf(wrap2_d), "cache should still have the result", ) # (1 + 1 + 2) > 3.0, cache eviction timer is handled self.reactor.pump((2,)) - - self.assertIsNone(cache.get(0), "cache should not have the result now") + self.assertCountEqual([], cache.keys(), "cache should not have the result now") @parameterized.expand([(True,), (False,)]) def test_cache_context_nocache(self, should_cache: bool): @@ -183,10 +200,16 @@ async def non_caching(o: str, cache_context: ResponseCacheContext[int]): self.assertEqual(expected_result, self.successResultOf(wrap2_d)) if should_cache: + unexpected = Mock(spec=()) + wrap3_d = defer.ensureDeferred(cache.wrap(0, unexpected)) + unexpected.assert_not_called() self.assertEqual( expected_result, - self.successResultOf(cache.get(0)), + self.successResultOf(wrap3_d), "cache should still have the result", ) + else: - self.assertIsNone(cache.get(0), "cache should not have the result") + self.assertCountEqual( + [], cache.keys(), "cache should not have the result now" + ) From dd4778875213d9cb8be7ee71d32751fbd6cdaba2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 20 Dec 2021 14:14:38 -0500 Subject: [PATCH 014/474] Do not bundle aggregations for APIs which shouldn't include them. (#11592) And make bundling aggregations opt-in, instead of opt-out to avoid having APIs to include extraneous data (and being much heavier than necessary). --- changelog.d/11592.bugfix | 1 + synapse/events/utils.py | 2 +- synapse/handlers/events.py | 2 -- synapse/handlers/initial_sync.py | 18 ++++-------------- synapse/handlers/message.py | 4 +++- synapse/handlers/pagination.py | 5 ++++- synapse/rest/admin/rooms.py | 12 +++++++++--- synapse/rest/client/relations.py | 4 +++- synapse/rest/client/room.py | 10 ++++++---- 9 files changed, 31 insertions(+), 27 deletions(-) create mode 100644 changelog.d/11592.bugfix diff --git a/changelog.d/11592.bugfix b/changelog.d/11592.bugfix new file mode 100644 index 000000000000..4116e5dd7c7b --- /dev/null +++ b/changelog.d/11592.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 3da432c1df58..2038e72924a9 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -395,7 +395,7 @@ async def serialize_event( event: Union[JsonDict, EventBase], time_now: int, *, - bundle_aggregations: bool = True, + bundle_aggregations: bool = False, **kwargs: Any, ) -> JsonDict: """Serializes a single event. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index afed80ba14c0..1b996c420d20 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -123,8 +123,6 @@ async def get_stream( events, time_now, as_client_event=as_client_event, - # Don't bundle aggregations as this is a deprecated API. - bundle_aggregations=False, ) chunk = { diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 9ab723ff975e..601bab67f9cc 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -173,8 +173,6 @@ async def handle_room(event: RoomsForUser) -> None: d["invite"] = await self._event_serializer.serialize_event( invite_event, time_now, - # Don't bundle aggregations as this is a deprecated API. - bundle_aggregations=False, as_client_event=as_client_event, ) @@ -227,8 +225,6 @@ async def handle_room(event: RoomsForUser) -> None: await self._event_serializer.serialize_events( messages, time_now=time_now, - # Don't bundle aggregations as this is a deprecated API. - bundle_aggregations=False, as_client_event=as_client_event, ) ), @@ -239,8 +235,6 @@ async def handle_room(event: RoomsForUser) -> None: d["state"] = await self._event_serializer.serialize_events( current_state.values(), time_now=time_now, - # Don't bundle aggregations as this is a deprecated API. - bundle_aggregations=False, as_client_event=as_client_event, ) @@ -382,9 +376,7 @@ async def _room_initial_sync_parted( "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - await self._event_serializer.serialize_events( - messages, time_now, bundle_aggregations=False - ) + await self._event_serializer.serialize_events(messages, time_now) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), @@ -392,7 +384,7 @@ async def _room_initial_sync_parted( "state": ( # Don't bundle aggregations as this is a deprecated API. await self._event_serializer.serialize_events( - room_state.values(), time_now, bundle_aggregations=False + room_state.values(), time_now ) ), "presence": [], @@ -413,7 +405,7 @@ async def _room_initial_sync_joined( time_now = self.clock.time_msec() # Don't bundle aggregations as this is a deprecated API. state = await self._event_serializer.serialize_events( - current_state.values(), time_now, bundle_aggregations=False + current_state.values(), time_now ) now_token = self.hs.get_event_sources().get_current_token() @@ -488,9 +480,7 @@ async def get_receipts() -> List[JsonDict]: "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - await self._event_serializer.serialize_events( - messages, time_now, bundle_aggregations=False - ) + await self._event_serializer.serialize_events(messages, time_now) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5e3d3886eb1d..1a7190085a7d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -246,7 +246,9 @@ async def get_state_events( room_state = room_state_events[membership_event_id] now = self.clock.time_msec() - events = await self._event_serializer.serialize_events(room_state.values(), now) + events = await self._event_serializer.serialize_events( + room_state.values(), now, bundle_aggregations=True + ) return events async def get_joined_members(self, requester: Requester, room_id: str) -> dict: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 4f424380533b..7469cc55a251 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -542,7 +542,10 @@ async def get_messages( chunk = { "chunk": ( await self._event_serializer.serialize_events( - events, time_now, as_client_event=as_client_event + events, + time_now, + bundle_aggregations=True, + as_client_event=as_client_event, ) ), "start": await from_token.to_string(self.store), diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 17c6df1cc8c7..6030373ebcbb 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -745,13 +745,19 @@ async def on_GET( time_now = self.clock.time_msec() results["events_before"] = await self._event_serializer.serialize_events( - results["events_before"], time_now + results["events_before"], + time_now, + bundle_aggregations=True, ) results["event"] = await self._event_serializer.serialize_event( - results["event"], time_now + results["event"], + time_now, + bundle_aggregations=True, ) results["events_after"] = await self._event_serializer.serialize_events( - results["events_after"], time_now + results["events_after"], + time_now, + bundle_aggregations=True, ) results["state"] = await self._event_serializer.serialize_events( results["state"], time_now diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index ffa37ef06c89..5815650ee6cb 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -232,7 +232,9 @@ async def on_GET( ) # The relations returned for the requested event do include their # bundled aggregations. - serialized_events = await self._event_serializer.serialize_events(events, now) + serialized_events = await self._event_serializer.serialize_events( + events, now, bundle_aggregations=True + ) return_value = pagination_chunk.to_dict() return_value["chunk"] = serialized_events diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 60719331b640..40330749e546 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -662,7 +662,9 @@ async def on_GET( time_now = self.clock.time_msec() if event: - event_dict = await self._event_serializer.serialize_event(event, time_now) + event_dict = await self._event_serializer.serialize_event( + event, time_now, bundle_aggregations=True + ) return 200, event_dict raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) @@ -707,13 +709,13 @@ async def on_GET( time_now = self.clock.time_msec() results["events_before"] = await self._event_serializer.serialize_events( - results["events_before"], time_now + results["events_before"], time_now, bundle_aggregations=True ) results["event"] = await self._event_serializer.serialize_event( - results["event"], time_now + results["event"], time_now, bundle_aggregations=True ) results["events_after"] = await self._event_serializer.serialize_events( - results["events_after"], time_now + results["events_after"], time_now, bundle_aggregations=True ) results["state"] = await self._event_serializer.serialize_events( results["state"], time_now From 8c36d332d598f55f471196fb9415bf27db3e6fcd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 Dec 2021 11:07:41 +0000 Subject: [PATCH 015/474] 1.49.1 --- CHANGES.md | 9 +++++++++ changelog.d/11583.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/11583.bugfix diff --git a/CHANGES.md b/CHANGES.md index 58217e655eff..bdfa40686f8e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.49.1 (2021-12-21) +=========================== + +Bugfixes +-------- + +- Fix a performance regression in `/sync` handling, introduced in 1.49.0. ([\#11583](https://github.com/matrix-org/synapse/issues/11583)) + + Synapse 1.49.0 (2021-12-14) =========================== diff --git a/changelog.d/11583.bugfix b/changelog.d/11583.bugfix deleted file mode 100644 index d2ed113e2128..000000000000 --- a/changelog.d/11583.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a performance regression in `/sync` handling, introduced in 1.49.0. diff --git a/debian/changelog b/debian/changelog index 794d8efa406a..61a1d003b29e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.49.1) stable; urgency=medium + + * New synapse release 1.49.1. + + -- Synapse Packaging team Tue, 21 Dec 2021 11:07:30 +0000 + matrix-synapse-py3 (1.49.0) stable; urgency=medium * New synapse release 1.49.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index d44a03a2cb35..d45e0f18833c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.49.0" +__version__ = "1.49.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 221595414751f7b8fd0c79772c5ac4ffefeca10a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 21 Dec 2021 11:10:36 +0000 Subject: [PATCH 016/474] Various opentracing enhancements (#11619) * Wrap `auth.get_user_by_req` in an opentracing span give `get_user_by_req` its own opentracing span, since it can result in a non-trivial number of sub-spans which it is useful to group together. This requires a bit of reorganisation because it also sets some tags (and may force tracing) on the servlet span. * Emit opentracing span for encoding json responses This can be a significant time sink. * Rename all sync spans with a prefix * Write an opentracing span for encoding sync response * opentracing span to group generate_room_entries * opentracing spans within sync.encode_response * changelog * Use the `trace` decorator instead of context managers --- changelog.d/11619.misc | 1 + synapse/api/auth.py | 53 ++++++++++++++++++++++++++----------- synapse/handlers/sync.py | 7 ++--- synapse/http/server.py | 19 +++++++++++-- synapse/rest/client/sync.py | 6 +++++ 5 files changed, 65 insertions(+), 21 deletions(-) create mode 100644 changelog.d/11619.misc diff --git a/changelog.d/11619.misc b/changelog.d/11619.misc new file mode 100644 index 000000000000..2125cbddd2fd --- /dev/null +++ b/changelog.d/11619.misc @@ -0,0 +1 @@ +A number of improvements to opentracing support. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 0bf58dff08a5..4a32d430bdfc 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -32,7 +32,7 @@ from synapse.events import EventBase from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest -from synapse.logging import opentracing as opentracing +from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import Requester, StateMap, UserID, create_requester from synapse.util.caches.lrucache import LruCache @@ -149,6 +149,42 @@ async def get_user_by_req( is invalid. AuthError if access is denied for the user in the access token """ + parent_span = active_span() + with start_active_span("get_user_by_req"): + requester = await self._wrapped_get_user_by_req( + request, allow_guest, rights, allow_expired + ) + + if parent_span: + if requester.authenticated_entity in self._force_tracing_for_users: + # request tracing is enabled for this user, so we need to force it + # tracing on for the parent span (which will be the servlet span). + # + # It's too late for the get_user_by_req span to inherit the setting, + # so we also force it on for that. + force_tracing() + force_tracing(parent_span) + parent_span.set_tag( + "authenticated_entity", requester.authenticated_entity + ) + parent_span.set_tag("user_id", requester.user.to_string()) + if requester.device_id is not None: + parent_span.set_tag("device_id", requester.device_id) + if requester.app_service is not None: + parent_span.set_tag("appservice_id", requester.app_service.id) + return requester + + async def _wrapped_get_user_by_req( + self, + request: SynapseRequest, + allow_guest: bool, + rights: str, + allow_expired: bool, + ) -> Requester: + """Helper for get_user_by_req + + Once get_user_by_req has set up the opentracing span, this does the actual work. + """ try: ip_addr = request.getClientIP() user_agent = get_request_user_agent(request) @@ -177,14 +213,6 @@ async def get_user_by_req( ) request.requester = user_id - if user_id in self._force_tracing_for_users: - opentracing.force_tracing() - opentracing.set_tag("authenticated_entity", user_id) - opentracing.set_tag("user_id", user_id) - if device_id is not None: - opentracing.set_tag("device_id", device_id) - opentracing.set_tag("appservice_id", app_service.id) - return requester user_info = await self.get_user_by_access_token( @@ -242,13 +270,6 @@ async def get_user_by_req( ) request.requester = requester - if user_info.token_owner in self._force_tracing_for_users: - opentracing.force_tracing() - opentracing.set_tag("authenticated_entity", user_info.token_owner) - opentracing.set_tag("user_id", user_info.user_id) - if device_id: - opentracing.set_tag("device_id", device_id) - return requester except KeyError: raise MissingClientTokenError() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index bcd10cbb3051..d24124d6ac7b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -421,7 +421,7 @@ async def current_sync_for_user( span to track the sync. See `generate_sync_result` for the next part of your indoctrination. """ - with start_active_span("current_sync_for_user"): + with start_active_span("sync.current_sync_for_user"): log_kv({"since_token": since_token}) sync_result = await self.generate_sync_result( sync_config, since_token, full_state @@ -1585,7 +1585,8 @@ async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None: ) logger.debug("Generated room entry for %s", room_entry.room_id) - await concurrently_execute(handle_room_entries, room_entries, 10) + with start_active_span("sync.generate_room_entries"): + await concurrently_execute(handle_room_entries, room_entries, 10) sync_result_builder.invited.extend(invited) sync_result_builder.knocked.extend(knocked) @@ -2045,7 +2046,7 @@ async def _generate_room_entry( since_token = room_builder.since_token upto_token = room_builder.upto_token - with start_active_span("generate_room_entry"): + with start_active_span("sync.generate_room_entry"): set_tag("room_id", room_id) log_kv({"events": len(events or ())}) diff --git a/synapse/http/server.py b/synapse/http/server.py index 7bbbe7648b8f..e302946591bf 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -58,12 +58,14 @@ ) from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread, preserve_fn, run_in_background -from synapse.logging.opentracing import trace_servlet +from synapse.logging.opentracing import active_span, start_active_span, trace_servlet from synapse.util import json_encoder from synapse.util.caches import intern_dict from synapse.util.iterutils import chunk_seq if TYPE_CHECKING: + import opentracing + from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -759,7 +761,20 @@ async def _async_write_json_to_request_in_thread( expensive. """ - json_str = await defer_to_thread(request.reactor, json_encoder, json_object) + def encode(opentracing_span: "Optional[opentracing.Span]") -> bytes: + # it might take a while for the threadpool to schedule us, so we write + # opentracing logs once we actually get scheduled, so that we can see how + # much that contributed. + if opentracing_span: + opentracing_span.log_kv({"event": "scheduled"}) + res = json_encoder(json_object) + if opentracing_span: + opentracing_span.log_kv({"event": "encoded"}) + return res + + with start_active_span("encode_json_response"): + span = active_span() + json_str = await defer_to_thread(request.reactor, encode, span) _write_bytes_to_request(request, json_str) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 8c4b0f6e5def..e99a943d0d4b 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -48,6 +48,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest +from synapse.logging.opentracing import trace from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -222,6 +223,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: logger.debug("Event formatting complete") return 200, response_content + @trace(opname="sync.encode_response") async def encode_response( self, time_now: int, @@ -332,6 +334,7 @@ def encode_presence(events: List[UserPresenceState], time_now: int) -> JsonDict: ] } + @trace(opname="sync.encode_joined") async def encode_joined( self, rooms: List[JoinedSyncResult], @@ -368,6 +371,7 @@ async def encode_joined( return joined + @trace(opname="sync.encode_invited") async def encode_invited( self, rooms: List[InvitedSyncResult], @@ -406,6 +410,7 @@ async def encode_invited( return invited + @trace(opname="sync.encode_knocked") async def encode_knocked( self, rooms: List[KnockedSyncResult], @@ -460,6 +465,7 @@ async def encode_knocked( return knocked + @trace(opname="sync.encode_archived") async def encode_archived( self, rooms: List[ArchivedSyncResult], From b9f2f6d3c414a5b0d2bbce15ef12d669dbcea82a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 Dec 2021 11:23:35 +0000 Subject: [PATCH 017/474] more words --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index bdfa40686f8e..0fcfb3c782d1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.49.1 (2021-12-21) =========================== +This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it imposssible for such users to log in on a new client. + Bugfixes -------- From aa58e8a28a05c5a71003570f8bdcd1256276f243 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 Dec 2021 11:24:24 +0000 Subject: [PATCH 018/474] typopo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0fcfb3c782d1..69320458485e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.49.1 (2021-12-21) =========================== -This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it imposssible for such users to log in on a new client. +This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it impossible for such users to log in on a new client. Bugfixes -------- From 57ca8ab10f74e9f36b3a93ff31438cabb146fb5f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 Dec 2021 12:06:31 +0000 Subject: [PATCH 019/474] Add notes about dropping support for Python 3.6 and Postgres 9.6. --- CHANGES.md | 4 ++++ docs/deprecation_policy.md | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 69320458485e..dc6fa177946e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,10 @@ Synapse 1.49.1 (2021-12-21) This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it impossible for such users to log in on a new client. +**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support Python 3.6 and PostgreSQL 9.6, both of which have now reached upstream end-of-life. Synapse will require Python 3.7+ and PostgreSQL 10. + +**Note:** We will also stop producing packages for Ubuntu 18.04 (Bionic Beaver) after this release, as it uses Python 3.6. + Bugfixes -------- diff --git a/docs/deprecation_policy.md b/docs/deprecation_policy.md index 06ea34055951..359dac07c3dc 100644 --- a/docs/deprecation_policy.md +++ b/docs/deprecation_policy.md @@ -14,8 +14,8 @@ i.e. when a version reaches End of Life Synapse will withdraw support for that version in future releases. Details on the upstream support life cycles for Python and PostgreSQL are -documented at https://endoflife.date/python and -https://endoflife.date/postgresql. +documented at [https://endoflife.date/python](https://endoflife.date/python) and +[https://endoflife.date/postgresql](https://endoflife.date/postgresql). Context From b6102230a7391d1acaa50cc6c389813f7e0fab84 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 21 Dec 2021 08:25:34 -0500 Subject: [PATCH 020/474] Add type hints to event_push_actions. (#11594) --- changelog.d/11594.misc | 1 + mypy.ini | 4 +- synapse/handlers/sync.py | 12 +- synapse/push/emailpusher.py | 18 +- synapse/push/httppusher.py | 12 +- synapse/push/mailer.py | 40 ++- synapse/push/push_tools.py | 4 +- synapse/rest/client/notifications.py | 20 +- .../databases/main/event_push_actions.py | 249 +++++++++++------- .../replication/slave/storage/test_events.py | 7 +- tests/storage/test_event_push_actions.py | 12 +- 11 files changed, 225 insertions(+), 154 deletions(-) create mode 100644 changelog.d/11594.misc diff --git a/changelog.d/11594.misc b/changelog.d/11594.misc new file mode 100644 index 000000000000..d451940bf216 --- /dev/null +++ b/changelog.d/11594.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. diff --git a/mypy.ini b/mypy.ini index 3279c9bb2167..57e1a5df43f4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -28,7 +28,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/databases/main/devices.py |synapse/storage/databases/main/event_federation.py - |synapse/storage/databases/main/event_push_actions.py |synapse/storage/databases/main/events_bg_updates.py |synapse/storage/databases/main/group_server.py |synapse/storage/databases/main/metrics.py @@ -200,6 +199,9 @@ disallow_untyped_defs = True [mypy-synapse.storage.databases.main.end_to_end_keys] disallow_untyped_defs = True +[mypy-synapse.storage.databases.main.event_push_actions] +disallow_untyped_defs = True + [mypy-synapse.storage.databases.main.events_worker] disallow_untyped_defs = True diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index d24124d6ac7b..7baf3f199c51 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -36,6 +36,7 @@ from synapse.logging.context import current_context from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user +from synapse.storage.databases.main.event_push_actions import NotifCounts from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( @@ -1041,7 +1042,7 @@ async def compute_state_delta( async def unread_notifs_for_room_id( self, room_id: str, sync_config: SyncConfig - ) -> Dict[str, int]: + ) -> NotifCounts: with Measure(self.clock, "unread_notifs_for_room_id"): last_unread_event_id = await self.store.get_last_receipt_event_id_for_user( user_id=sync_config.user.to_string(), @@ -1049,10 +1050,9 @@ async def unread_notifs_for_room_id( receipt_type=ReceiptTypes.READ, ) - notifs = await self.store.get_unread_event_push_actions_by_room_for_user( + return await self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id ) - return notifs async def generate_sync_result( self, @@ -2174,10 +2174,10 @@ async def _generate_room_entry( if room_sync or always_include: notifs = await self.unread_notifs_for_room_id(room_id, sync_config) - unread_notifications["notification_count"] = notifs["notify_count"] - unread_notifications["highlight_count"] = notifs["highlight_count"] + unread_notifications["notification_count"] = notifs.notify_count + unread_notifications["highlight_count"] = notifs.highlight_count - room_sync.unread_count = notifs["unread_count"] + room_sync.unread_count = notifs.unread_count sync_result_builder.joined.append(room_sync) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 4f13c0418ab9..39bb2acae4b5 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -177,12 +177,12 @@ async def _unsafe_process(self) -> None: return for push_action in unprocessed: - received_at = push_action["received_ts"] + received_at = push_action.received_ts if received_at is None: received_at = 0 notif_ready_at = received_at + DELAY_BEFORE_MAIL_MS - room_ready_at = self.room_ready_to_notify_at(push_action["room_id"]) + room_ready_at = self.room_ready_to_notify_at(push_action.room_id) should_notify_at = max(notif_ready_at, room_ready_at) @@ -193,23 +193,23 @@ async def _unsafe_process(self) -> None: # to be delivered. reason: EmailReason = { - "room_id": push_action["room_id"], + "room_id": push_action.room_id, "now": self.clock.time_msec(), "received_at": received_at, "delay_before_mail_ms": DELAY_BEFORE_MAIL_MS, - "last_sent_ts": self.get_room_last_sent_ts(push_action["room_id"]), - "throttle_ms": self.get_room_throttle_ms(push_action["room_id"]), + "last_sent_ts": self.get_room_last_sent_ts(push_action.room_id), + "throttle_ms": self.get_room_throttle_ms(push_action.room_id), } await self.send_notification(unprocessed, reason) await self.save_last_stream_ordering_and_success( - max(ea["stream_ordering"] for ea in unprocessed) + max(ea.stream_ordering for ea in unprocessed) ) # we update the throttle on all the possible unprocessed push actions for ea in unprocessed: - await self.sent_notif_update_throttle(ea["room_id"], ea) + await self.sent_notif_update_throttle(ea.room_id, ea) break else: if soonest_due_at is None or should_notify_at < soonest_due_at: @@ -284,10 +284,10 @@ async def sent_notif_update_throttle( # THROTTLE_RESET_AFTER_MS after the previous one that triggered a # notif, we release the throttle. Otherwise, the throttle is increased. time_of_previous_notifs = await self.store.get_time_of_last_push_action_before( - notified_push_action["stream_ordering"] + notified_push_action.stream_ordering ) - time_of_this_notifs = notified_push_action["received_ts"] + time_of_this_notifs = notified_push_action.received_ts if time_of_previous_notifs is not None and time_of_this_notifs is not None: gap = time_of_this_notifs - time_of_previous_notifs diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 3fa603ccb7f7..96559081d0bf 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -199,7 +199,7 @@ async def _unsafe_process(self) -> None: "http-push", tags={ "authenticated_entity": self.user_id, - "event_id": push_action["event_id"], + "event_id": push_action.event_id, "app_id": self.app_id, "app_display_name": self.app_display_name, }, @@ -209,7 +209,7 @@ async def _unsafe_process(self) -> None: if processed: http_push_processed_counter.inc() self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC - self.last_stream_ordering = push_action["stream_ordering"] + self.last_stream_ordering = push_action.stream_ordering pusher_still_exists = ( await self.store.update_pusher_last_stream_ordering_and_success( self.app_id, @@ -252,7 +252,7 @@ async def _unsafe_process(self) -> None: self.pushkey, ) self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC - self.last_stream_ordering = push_action["stream_ordering"] + self.last_stream_ordering = push_action.stream_ordering await self.store.update_pusher_last_stream_ordering( self.app_id, self.pushkey, @@ -275,17 +275,17 @@ async def _unsafe_process(self) -> None: break async def _process_one(self, push_action: HttpPushAction) -> bool: - if "notify" not in push_action["actions"]: + if "notify" not in push_action.actions: return True - tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) + tweaks = push_rule_evaluator.tweaks_for_actions(push_action.actions) badge = await push_tools.get_badge_count( self.hs.get_datastore(), self.user_id, group_by_room=self._group_unread_count_by_room, ) - event = await self.store.get_event(push_action["event_id"], allow_none=True) + event = await self.store.get_event(push_action.event_id, allow_none=True) if event is None: return True # It's been redacted rejected = await self.dispatch_push(event, tweaks, badge) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index ba4f866487ec..ff904c2b4a2f 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -232,15 +232,13 @@ async def send_notification_mail( reason: The notification that was ready and is the cause of an email being sent. """ - rooms_in_order = deduped_ordered_list([pa["room_id"] for pa in push_actions]) + rooms_in_order = deduped_ordered_list([pa.room_id for pa in push_actions]) - notif_events = await self.store.get_events( - [pa["event_id"] for pa in push_actions] - ) + notif_events = await self.store.get_events([pa.event_id for pa in push_actions]) notifs_by_room: Dict[str, List[EmailPushAction]] = {} for pa in push_actions: - notifs_by_room.setdefault(pa["room_id"], []).append(pa) + notifs_by_room.setdefault(pa.room_id, []).append(pa) # collect the current state for all the rooms in which we have # notifications @@ -264,7 +262,7 @@ async def _fetch_room_state(room_id: str) -> None: await concurrently_execute(_fetch_room_state, rooms_in_order, 3) # actually sort our so-called rooms_in_order list, most recent room first - rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1]["received_ts"] or 0)) + rooms_in_order.sort(key=lambda r: -(notifs_by_room[r][-1].received_ts or 0)) rooms: List[RoomVars] = [] @@ -356,7 +354,7 @@ async def _get_room_vars( # Check if one of the notifs is an invite event for the user. is_invite = False for n in notifs: - ev = notif_events[n["event_id"]] + ev = notif_events[n.event_id] if ev.type == EventTypes.Member and ev.state_key == user_id: if ev.content.get("membership") == Membership.INVITE: is_invite = True @@ -376,7 +374,7 @@ async def _get_room_vars( if not is_invite: for n in notifs: notifvars = await self._get_notif_vars( - n, user_id, notif_events[n["event_id"]], room_state_ids + n, user_id, notif_events[n.event_id], room_state_ids ) # merge overlapping notifs together. @@ -444,15 +442,15 @@ async def _get_notif_vars( """ results = await self.store.get_events_around( - notif["room_id"], - notif["event_id"], + notif.room_id, + notif.event_id, before_limit=CONTEXT_BEFORE, after_limit=CONTEXT_AFTER, ) ret: NotifVars = { "link": self._make_notif_link(notif), - "ts": notif["received_ts"], + "ts": notif.received_ts, "messages": [], } @@ -516,7 +514,7 @@ async def _get_message_vars( ret: MessageVars = { "event_type": event.type, - "is_historical": event.event_id != notif["event_id"], + "is_historical": event.event_id != notif.event_id, "id": event.event_id, "ts": event.origin_server_ts, "sender_name": sender_name, @@ -610,7 +608,7 @@ async def _make_summary_text_single_room( # See if one of the notifs is an invite event for the user invite_event = None for n in notifs: - ev = notif_events[n["event_id"]] + ev = notif_events[n.event_id] if ev.type == EventTypes.Member and ev.state_key == user_id: if ev.content.get("membership") == Membership.INVITE: invite_event = ev @@ -659,7 +657,7 @@ async def _make_summary_text_single_room( if len(notifs) == 1: # There is just the one notification, so give some detail sender_name = None - event = notif_events[notifs[0]["event_id"]] + event = notif_events[notifs[0].event_id] if ("m.room.member", event.sender) in room_state_ids: state_event_id = room_state_ids[("m.room.member", event.sender)] state_event = await self.store.get_event(state_event_id) @@ -753,9 +751,9 @@ async def _make_summary_text_from_member_events( # are already in descending received_ts. sender_ids = {} for n in notifs: - sender = notif_events[n["event_id"]].sender + sender = notif_events[n.event_id].sender if sender not in sender_ids: - sender_ids[sender] = n["event_id"] + sender_ids[sender] = n.event_id # Get the actual member events (in order to calculate a pretty name for # the room). @@ -830,17 +828,17 @@ def _make_notif_link(self, notif: EmailPushAction) -> str: if self.hs.config.email.email_riot_base_url: return "%s/#/room/%s/%s" % ( self.hs.config.email.email_riot_base_url, - notif["room_id"], - notif["event_id"], + notif.room_id, + notif.event_id, ) elif self.app_name == "Vector": # need /beta for Universal Links to work on iOS return "https://vector.im/beta/#/room/%s/%s" % ( - notif["room_id"], - notif["event_id"], + notif.room_id, + notif.event_id, ) else: - return "https://matrix.to/#/%s/%s" % (notif["room_id"], notif["event_id"]) + return "https://matrix.to/#/%s/%s" % (notif.room_id, notif.event_id) def _make_unsubscribe_link( self, user_id: str, app_id: str, email_address: str diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index da641aca477c..957c9b780b94 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -37,7 +37,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - room_id, user_id, last_unread_event_id ) ) - if notifs["notify_count"] == 0: + if notifs.notify_count == 0: continue if group_by_room: @@ -45,7 +45,7 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - badge += 1 else: # increment the badge count by the number of unread messages in the room - badge += notifs["notify_count"] + badge += notifs.notify_count return badge diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index b12a332776e4..acd0c9e135d1 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -58,7 +58,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user_id, ReceiptTypes.READ ) - notif_event_ids = [pa["event_id"] for pa in push_actions] + notif_event_ids = [pa.event_id for pa in push_actions] notif_events = await self.store.get_events(notif_event_ids) returned_push_actions = [] @@ -67,30 +67,30 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: for pa in push_actions: returned_pa = { - "room_id": pa["room_id"], - "profile_tag": pa["profile_tag"], - "actions": pa["actions"], - "ts": pa["received_ts"], + "room_id": pa.room_id, + "profile_tag": pa.profile_tag, + "actions": pa.actions, + "ts": pa.received_ts, "event": ( await self._event_serializer.serialize_event( - notif_events[pa["event_id"]], + notif_events[pa.event_id], self.clock.time_msec(), event_format=format_event_for_client_v2_without_room_id, ) ), } - if pa["room_id"] not in receipts_by_room: + if pa.room_id not in receipts_by_room: returned_pa["read"] = False else: - receipt = receipts_by_room[pa["room_id"]] + receipt = receipts_by_room[pa.room_id] returned_pa["read"] = ( receipt["topological_ordering"], receipt["stream_ordering"], - ) >= (pa["topological_ordering"], pa["stream_ordering"]) + ) >= (pa.topological_ordering, pa.stream_ordering) returned_push_actions.append(returned_pa) - next_token = str(pa["stream_ordering"]) + next_token = str(pa.stream_ordering) return 200, {"notifications": returned_push_actions, "next_token": next_token} diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index eacff3e432d8..98ea0e884cb3 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -16,7 +16,6 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import attr -from typing_extensions import TypedDict from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json @@ -34,29 +33,64 @@ logger = logging.getLogger(__name__) -DEFAULT_NOTIF_ACTION = ["notify", {"set_tweak": "highlight", "value": False}] -DEFAULT_HIGHLIGHT_ACTION = [ +DEFAULT_NOTIF_ACTION: List[Union[dict, str]] = [ + "notify", + {"set_tweak": "highlight", "value": False}, +] +DEFAULT_HIGHLIGHT_ACTION: List[Union[dict, str]] = [ "notify", {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, ] -class BasePushAction(TypedDict): - event_id: str - actions: List[Union[dict, str]] - +@attr.s(slots=True, frozen=True, auto_attribs=True) +class HttpPushAction: + """ + HttpPushAction instances include the information used to generate HTTP + requests to a push gateway. + """ -class HttpPushAction(BasePushAction): + event_id: str room_id: str stream_ordering: int + actions: List[Union[dict, str]] +@attr.s(slots=True, frozen=True, auto_attribs=True) class EmailPushAction(HttpPushAction): + """ + EmailPushAction instances include the information used to render an email + push notification. + """ + received_ts: Optional[int] -def _serialize_action(actions, is_highlight): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class UserPushAction(EmailPushAction): + """ + UserPushAction instances include the necessary information to respond to + /notifications requests. + """ + + topological_ordering: int + highlight: bool + profile_tag: str + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class NotifCounts: + """ + The per-user, per-room count of notifications. Used by sync and push. + """ + + notify_count: int + unread_count: int + highlight_count: int + + +def _serialize_action(actions: List[Union[dict, str]], is_highlight: bool) -> str: """Custom serializer for actions. This allows us to "compress" common actions. We use the fact that most users have the same actions for notifs (and for @@ -74,7 +108,7 @@ def _serialize_action(actions, is_highlight): return json_encoder.encode(actions) -def _deserialize_action(actions, is_highlight): +def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, str]]: """Custom deserializer for actions. This allows us to "compress" common actions""" if actions: return db_to_json(actions) @@ -95,8 +129,8 @@ def __init__( super().__init__(database, db_conn, hs) # These get correctly set by _find_stream_orderings_for_times_txn - self.stream_ordering_month_ago = None - self.stream_ordering_day_ago = None + self.stream_ordering_month_ago: Optional[int] = None + self.stream_ordering_day_ago: Optional[int] = None cur = db_conn.cursor(txn_name="_find_stream_orderings_for_times_txn") self._find_stream_orderings_for_times_txn(cur) @@ -120,7 +154,7 @@ async def get_unread_event_push_actions_by_room_for_user( room_id: str, user_id: str, last_read_event_id: Optional[str], - ) -> Dict[str, int]: + ) -> NotifCounts: """Get the notification count, the highlight count and the unread message count for a given user in a given room after the given read receipt. @@ -149,15 +183,15 @@ async def get_unread_event_push_actions_by_room_for_user( def _get_unread_counts_by_receipt_txn( self, - txn, - room_id, - user_id, - last_read_event_id, - ): + txn: LoggingTransaction, + room_id: str, + user_id: str, + last_read_event_id: Optional[str], + ) -> NotifCounts: stream_ordering = None if last_read_event_id is not None: - stream_ordering = self.get_stream_id_for_event_txn( + stream_ordering = self.get_stream_id_for_event_txn( # type: ignore[attr-defined] txn, last_read_event_id, allow_none=True, @@ -175,13 +209,15 @@ def _get_unread_counts_by_receipt_txn( retcol="event_id", ) - stream_ordering = self.get_stream_id_for_event_txn(txn, event_id) + stream_ordering = self.get_stream_id_for_event_txn(txn, event_id) # type: ignore[attr-defined] return self._get_unread_counts_by_pos_txn( txn, room_id, user_id, stream_ordering ) - def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, stream_ordering): + def _get_unread_counts_by_pos_txn( + self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int + ) -> NotifCounts: sql = ( "SELECT" " COUNT(CASE WHEN notif = 1 THEN 1 END)," @@ -219,16 +255,16 @@ def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, stream_ordering): # for this row. unread_count += row[1] - return { - "notify_count": notif_count, - "unread_count": unread_count, - "highlight_count": highlight_count, - } + return NotifCounts( + notify_count=notif_count, + unread_count=unread_count, + highlight_count=highlight_count, + ) async def get_push_action_users_in_range( - self, min_stream_ordering, max_stream_ordering - ): - def f(txn): + self, min_stream_ordering: int, max_stream_ordering: int + ) -> List[str]: + def f(txn: LoggingTransaction) -> List[str]: sql = ( "SELECT DISTINCT(user_id) FROM event_push_actions WHERE" " stream_ordering >= ? AND stream_ordering <= ? AND notif = 1" @@ -236,8 +272,7 @@ def f(txn): txn.execute(sql, (min_stream_ordering, max_stream_ordering)) return [r[0] for r in txn] - ret = await self.db_pool.runInteraction("get_push_action_users_in_range", f) - return ret + return await self.db_pool.runInteraction("get_push_action_users_in_range", f) async def get_unread_push_actions_for_user_in_range_for_http( self, @@ -263,7 +298,9 @@ async def get_unread_push_actions_for_user_in_range_for_http( """ # find rooms that have a read receipt in them and return the next # push actions - def get_after_receipt(txn): + def get_after_receipt( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, int, str, bool]]: # find rooms that have a read receipt in them and return the next # push actions sql = ( @@ -289,7 +326,7 @@ def get_after_receipt(txn): ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() + return txn.fetchall() # type: ignore[return-value] after_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt @@ -298,7 +335,9 @@ def get_after_receipt(txn): # There are rooms with push actions in them but you don't have a read receipt in # them e.g. rooms you've been invited to, so get push actions for rooms which do # not have read receipts in them too. - def get_no_receipt(txn): + def get_no_receipt( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, int, str, bool]]: sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," " ep.highlight " @@ -318,19 +357,19 @@ def get_no_receipt(txn): ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() + return txn.fetchall() # type: ignore[return-value] no_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt ) notifs = [ - { - "event_id": row[0], - "room_id": row[1], - "stream_ordering": row[2], - "actions": _deserialize_action(row[3], row[4]), - } + HttpPushAction( + event_id=row[0], + room_id=row[1], + stream_ordering=row[2], + actions=_deserialize_action(row[3], row[4]), + ) for row in after_read_receipt + no_read_receipt ] @@ -338,7 +377,7 @@ def get_no_receipt(txn): # contain results from the first query, correctly ordered, followed # by results from the second query, but we want them all ordered # by stream_ordering, oldest first. - notifs.sort(key=lambda r: r["stream_ordering"]) + notifs.sort(key=lambda r: r.stream_ordering) # Take only up to the limit. We have to stop at the limit because # one of the subqueries may have hit the limit. @@ -368,7 +407,9 @@ async def get_unread_push_actions_for_user_in_range_for_email( """ # find rooms that have a read receipt in them and return the most recent # push actions - def get_after_receipt(txn): + def get_after_receipt( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, int, str, bool, int]]: sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," " ep.highlight, e.received_ts" @@ -393,7 +434,7 @@ def get_after_receipt(txn): ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() + return txn.fetchall() # type: ignore[return-value] after_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt @@ -402,7 +443,9 @@ def get_after_receipt(txn): # There are rooms with push actions in them but you don't have a read receipt in # them e.g. rooms you've been invited to, so get push actions for rooms which do # not have read receipts in them too. - def get_no_receipt(txn): + def get_no_receipt( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, int, str, bool, int]]: sql = ( "SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions," " ep.highlight, e.received_ts" @@ -422,7 +465,7 @@ def get_no_receipt(txn): ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() + return txn.fetchall() # type: ignore[return-value] no_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt @@ -430,13 +473,13 @@ def get_no_receipt(txn): # Make a list of dicts from the two sets of results. notifs = [ - { - "event_id": row[0], - "room_id": row[1], - "stream_ordering": row[2], - "actions": _deserialize_action(row[3], row[4]), - "received_ts": row[5], - } + EmailPushAction( + event_id=row[0], + room_id=row[1], + stream_ordering=row[2], + actions=_deserialize_action(row[3], row[4]), + received_ts=row[5], + ) for row in after_read_receipt + no_read_receipt ] @@ -444,7 +487,7 @@ def get_no_receipt(txn): # contain results from the first query, correctly ordered, followed # by results from the second query, but we want them all ordered # by received_ts (most recent first) - notifs.sort(key=lambda r: -(r["received_ts"] or 0)) + notifs.sort(key=lambda r: -(r.received_ts or 0)) # Now return the first `limit` return notifs[:limit] @@ -465,7 +508,7 @@ async def get_if_maybe_push_in_range_for_user( True if there may be push to process, False if there definitely isn't. """ - def _get_if_maybe_push_in_range_for_user_txn(txn): + def _get_if_maybe_push_in_range_for_user_txn(txn: LoggingTransaction) -> bool: sql = """ SELECT 1 FROM event_push_actions WHERE user_id = ? AND stream_ordering > ? AND notif = 1 @@ -499,19 +542,21 @@ async def add_push_actions_to_staging( # This is a helper function for generating the necessary tuple that # can be used to insert into the `event_push_actions_staging` table. - def _gen_entry(user_id, actions): + def _gen_entry( + user_id: str, actions: List[Union[dict, str]] + ) -> Tuple[str, str, str, int, int, int]: is_highlight = 1 if _action_has_highlight(actions) else 0 notif = 1 if "notify" in actions else 0 return ( event_id, # event_id column user_id, # user_id column - _serialize_action(actions, is_highlight), # actions column + _serialize_action(actions, bool(is_highlight)), # actions column notif, # notif column is_highlight, # highlight column int(count_as_unread), # unread column ) - def _add_push_actions_to_staging_txn(txn): + def _add_push_actions_to_staging_txn(txn: LoggingTransaction) -> None: # We don't use simple_insert_many here to avoid the overhead # of generating lists of dicts. @@ -539,12 +584,11 @@ async def remove_push_actions_from_staging(self, event_id: str) -> None: """ try: - res = await self.db_pool.simple_delete( + await self.db_pool.simple_delete( table="event_push_actions_staging", keyvalues={"event_id": event_id}, desc="remove_push_actions_from_staging", ) - return res except Exception: # this method is called from an exception handler, so propagating # another exception here really isn't helpful - there's nothing @@ -597,7 +641,9 @@ async def find_first_stream_ordering_after_ts(self, ts: int) -> int: ) @staticmethod - def _find_first_stream_ordering_after_ts_txn(txn, ts): + def _find_first_stream_ordering_after_ts_txn( + txn: LoggingTransaction, ts: int + ) -> int: """ Find the stream_ordering of the first event that was received on or after a given timestamp. This is relatively slow as there is no index @@ -609,14 +655,14 @@ def _find_first_stream_ordering_after_ts_txn(txn, ts): stream_ordering Args: - txn (twisted.enterprise.adbapi.Transaction): - ts (int): timestamp to search for + txn: + ts: timestamp to search for Returns: - int: stream ordering + The stream ordering """ txn.execute("SELECT MAX(stream_ordering) FROM events") - max_stream_ordering = txn.fetchone()[0] + max_stream_ordering = txn.fetchone()[0] # type: ignore[index] if max_stream_ordering is None: return 0 @@ -672,8 +718,10 @@ def _find_first_stream_ordering_after_ts_txn(txn, ts): return range_end - async def get_time_of_last_push_action_before(self, stream_ordering): - def f(txn): + async def get_time_of_last_push_action_before( + self, stream_ordering: int + ) -> Optional[int]: + def f(txn: LoggingTransaction) -> Optional[Tuple[int]]: sql = ( "SELECT e.received_ts" " FROM event_push_actions AS ep" @@ -683,7 +731,7 @@ def f(txn): " LIMIT 1" ) txn.execute(sql, (stream_ordering,)) - return txn.fetchone() + return txn.fetchone() # type: ignore[return-value] result = await self.db_pool.runInteraction( "get_time_of_last_push_action_before", f @@ -691,7 +739,7 @@ def f(txn): return result[0] if result else None @wrap_as_background_process("rotate_notifs") - async def _rotate_notifs(self): + async def _rotate_notifs(self) -> None: if self._doing_notif_rotation or self.stream_ordering_day_ago is None: return self._doing_notif_rotation = True @@ -709,7 +757,7 @@ async def _rotate_notifs(self): finally: self._doing_notif_rotation = False - def _rotate_notifs_txn(self, txn): + def _rotate_notifs_txn(self, txn: LoggingTransaction) -> bool: """Archives older notifications into event_push_summary. Returns whether the archiving process has caught up or not. """ @@ -734,6 +782,7 @@ def _rotate_notifs_txn(self, txn): stream_row = txn.fetchone() if stream_row: (offset_stream_ordering,) = stream_row + assert self.stream_ordering_day_ago is not None rotate_to_stream_ordering = min( self.stream_ordering_day_ago, offset_stream_ordering ) @@ -749,7 +798,9 @@ def _rotate_notifs_txn(self, txn): # We have caught up iff we were limited by `stream_ordering_day_ago` return caught_up - def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering): + def _rotate_notifs_before_txn( + self, txn: LoggingTransaction, rotate_to_stream_ordering: int + ) -> None: old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( txn, table="event_push_summary_stream_ordering", @@ -870,8 +921,8 @@ def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering): ) def _remove_old_push_actions_before_txn( - self, txn, room_id, user_id, stream_ordering - ): + self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int + ) -> None: """ Purges old push actions for a user and room before a given stream_ordering. @@ -943,9 +994,15 @@ def __init__( ) async def get_push_actions_for_user( - self, user_id, before=None, limit=50, only_highlight=False - ): - def f(txn): + self, + user_id: str, + before: Optional[str] = None, + limit: int = 50, + only_highlight: bool = False, + ) -> List[UserPushAction]: + def f( + txn: LoggingTransaction, + ) -> List[Tuple[str, str, int, int, str, bool, str, int]]: before_clause = "" if before: before_clause = "AND epa.stream_ordering < ?" @@ -972,32 +1029,42 @@ def f(txn): " LIMIT ?" % (before_clause,) ) txn.execute(sql, args) - return self.db_pool.cursor_to_dict(txn) + return txn.fetchall() # type: ignore[return-value] push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f) - for pa in push_actions: - pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) - return push_actions + return [ + UserPushAction( + event_id=row[0], + room_id=row[1], + stream_ordering=row[2], + actions=_deserialize_action(row[4], row[5]), + received_ts=row[7], + topological_ordering=row[3], + highlight=row[5], + profile_tag=row[6], + ) + for row in push_actions + ] -def _action_has_highlight(actions): +def _action_has_highlight(actions: List[Union[dict, str]]) -> bool: for action in actions: - try: - if action.get("set_tweak", None) == "highlight": - return action.get("value", True) - except AttributeError: - pass + if not isinstance(action, dict): + continue + + if action.get("set_tweak", None) == "highlight": + return action.get("value", True) return False -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _EventPushSummary: """Summary of pending event push actions for a given user in a given room. Used in _rotate_notifs_before_txn to manipulate results from event_push_actions. """ - unread_count = attr.ib(type=int) - stream_ordering = attr.ib(type=int) - old_user_id = attr.ib(type=str) - notif_count = attr.ib(type=int) + unread_count: int + stream_ordering: int + old_user_id: str + notif_count: int diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index b25a06b4271a..eca6a443af78 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -20,6 +20,7 @@ from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict from synapse.handlers.room import RoomEventSource from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.storage.databases.main.event_push_actions import NotifCounts from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser from synapse.types import PersistedEventPosition @@ -166,7 +167,7 @@ def test_push_actions_for_user(self): self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 0, "unread_count": 0, "notify_count": 0}, + NotifCounts(highlight_count=0, unread_count=0, notify_count=0), ) self.persist( @@ -179,7 +180,7 @@ def test_push_actions_for_user(self): self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 0, "unread_count": 0, "notify_count": 1}, + NotifCounts(highlight_count=0, unread_count=0, notify_count=1), ) self.persist( @@ -194,7 +195,7 @@ def test_push_actions_for_user(self): self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 1, "unread_count": 0, "notify_count": 2}, + NotifCounts(highlight_count=1, unread_count=0, notify_count=2), ) def test_get_rooms_for_user_with_stream_ordering(self): diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index bb5939ba4a51..738f3ad1dcc7 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -14,6 +14,8 @@ from unittest.mock import Mock +from synapse.storage.databases.main.event_push_actions import NotifCounts + from tests.unittest import HomeserverTestCase USER_ID = "@user:example.com" @@ -57,11 +59,11 @@ def _assert_counts(noitf_count, highlight_count): ) self.assertEquals( counts, - { - "notify_count": noitf_count, - "unread_count": 0, # Unread counts are tested in the sync tests. - "highlight_count": highlight_count, - }, + NotifCounts( + notify_count=noitf_count, + unread_count=0, # Unread counts are tested in the sync tests. + highlight_count=highlight_count, + ), ) def _inject_actions(stream, action): From e6897e7383178db0bd6243e68346a420bf90b982 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 21 Dec 2021 16:12:05 +0000 Subject: [PATCH 021/474] Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. (#11503) --- changelog.d/11503.misc | 1 + tests/server.py | 199 +++++++++++++++++++++++++++++-- tests/storage/test_base.py | 3 +- tests/storage/test_roommember.py | 2 +- tests/utils.py | 175 +-------------------------- 5 files changed, 195 insertions(+), 185 deletions(-) create mode 100644 changelog.d/11503.misc diff --git a/changelog.d/11503.misc b/changelog.d/11503.misc new file mode 100644 index 000000000000..03a24a922495 --- /dev/null +++ b/changelog.d/11503.misc @@ -0,0 +1 @@ +Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. \ No newline at end of file diff --git a/tests/server.py b/tests/server.py index 40cf5b12c3a0..ca2b7a5b9771 100644 --- a/tests/server.py +++ b/tests/server.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import hashlib import json import logging +import time +import uuid +import warnings from collections import deque from io import SEEK_END, BytesIO from typing import ( @@ -27,6 +30,7 @@ Type, Union, ) +from unittest.mock import Mock import attr from typing_extensions import Deque @@ -53,11 +57,24 @@ from twisted.web.resource import IResource from twisted.web.server import Request, Site +from synapse.config.database import DatabaseConnectionConfig from synapse.http.site import SynapseRequest +from synapse.server import HomeServer +from synapse.storage import DataStore +from synapse.storage.engines import PostgresEngine, create_engine from synapse.types import JsonDict from synapse.util import Clock -from tests.utils import setup_test_homeserver as _sth +from tests.utils import ( + LEAVE_DB, + POSTGRES_BASE_DB, + POSTGRES_HOST, + POSTGRES_PASSWORD, + POSTGRES_USER, + USE_POSTGRES_FOR_TESTS, + MockClock, + default_config, +) logger = logging.getLogger(__name__) @@ -450,14 +467,11 @@ def _(res): return d -def setup_test_homeserver(cleanup_func, *args, **kwargs): +def _make_test_homeserver_synchronous(server: HomeServer) -> None: """ - Set up a synchronous test server, driven by the reactor used by - the homeserver. + Make the given test homeserver's database interactions synchronous. """ - server = _sth(cleanup_func, *args, **kwargs) - # Make the thread pool synchronous. clock = server.get_clock() for database in server.get_datastores().databases: @@ -485,6 +499,7 @@ def runInteraction(interaction, *args, **kwargs): pool.runWithConnection = runWithConnection pool.runInteraction = runInteraction + # Replace the thread pool with a threadless 'thread' pool pool.threadpool = ThreadPool(clock._reactor) pool.running = True @@ -492,8 +507,6 @@ def runInteraction(interaction, *args, **kwargs): # thread, so we need to disable the dedicated thread behaviour. server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False - return server - def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: clock = ThreadedMemoryReactorClock() @@ -673,3 +686,171 @@ def connect_client( client.makeConnection(FakeTransport(server, reactor)) return client, server + + +class TestHomeServer(HomeServer): + DATASTORE_CLASS = DataStore + + +def setup_test_homeserver( + cleanup_func, + name="test", + config=None, + reactor=None, + homeserver_to_use: Type[HomeServer] = TestHomeServer, + **kwargs, +): + """ + Setup a homeserver suitable for running tests against. Keyword arguments + are passed to the Homeserver constructor. + + If no datastore is supplied, one is created and given to the homeserver. + + Args: + cleanup_func : The function used to register a cleanup routine for + after the test. + + Calling this method directly is deprecated: you should instead derive from + HomeserverTestCase. + """ + if reactor is None: + from twisted.internet import reactor + + if config is None: + config = default_config(name, parse=True) + + config.ldap_enabled = False + + if "clock" not in kwargs: + kwargs["clock"] = MockClock() + + if USE_POSTGRES_FOR_TESTS: + test_db = "synapse_test_%s" % uuid.uuid4().hex + + database_config = { + "name": "psycopg2", + "args": { + "database": test_db, + "host": POSTGRES_HOST, + "password": POSTGRES_PASSWORD, + "user": POSTGRES_USER, + "cp_min": 1, + "cp_max": 5, + }, + } + else: + database_config = { + "name": "sqlite3", + "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1}, + } + + if "db_txn_limit" in kwargs: + database_config["txn_limit"] = kwargs["db_txn_limit"] + + database = DatabaseConnectionConfig("master", database_config) + config.database.databases = [database] + + db_engine = create_engine(database.config) + + # Create the database before we actually try and connect to it, based off + # the template database we generate in setupdb() + if isinstance(db_engine, PostgresEngine): + db_conn = db_engine.module.connect( + database=POSTGRES_BASE_DB, + user=POSTGRES_USER, + host=POSTGRES_HOST, + password=POSTGRES_PASSWORD, + ) + db_conn.autocommit = True + cur = db_conn.cursor() + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + cur.execute( + "CREATE DATABASE %s WITH TEMPLATE %s;" % (test_db, POSTGRES_BASE_DB) + ) + cur.close() + db_conn.close() + + hs = homeserver_to_use( + name, + config=config, + version_string="Synapse/tests", + reactor=reactor, + ) + + # Install @cache_in_self attributes + for key, val in kwargs.items(): + setattr(hs, "_" + key, val) + + # Mock TLS + hs.tls_server_context_factory = Mock() + hs.tls_client_options_factory = Mock() + + hs.setup() + if homeserver_to_use == TestHomeServer: + hs.setup_background_tasks() + + if isinstance(db_engine, PostgresEngine): + database = hs.get_datastores().databases[0] + + # We need to do cleanup on PostgreSQL + def cleanup(): + import psycopg2 + + # Close all the db pools + database._db_pool.close() + + dropped = False + + # Drop the test database + db_conn = db_engine.module.connect( + database=POSTGRES_BASE_DB, + user=POSTGRES_USER, + host=POSTGRES_HOST, + password=POSTGRES_PASSWORD, + ) + db_conn.autocommit = True + cur = db_conn.cursor() + + # Try a few times to drop the DB. Some things may hold on to the + # database for a few more seconds due to flakiness, preventing + # us from dropping it when the test is over. If we can't drop + # it, warn and move on. + for _ in range(5): + try: + cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) + db_conn.commit() + dropped = True + except psycopg2.OperationalError as e: + warnings.warn( + "Couldn't drop old db: " + str(e), category=UserWarning + ) + time.sleep(0.5) + + cur.close() + db_conn.close() + + if not dropped: + warnings.warn("Failed to drop old DB.", category=UserWarning) + + if not LEAVE_DB: + # Register the cleanup hook + cleanup_func(cleanup) + + # bcrypt is far too slow to be doing in unit tests + # Need to let the HS build an auth handler and then mess with it + # because AuthHandler's constructor requires the HS, so we can't make one + # beforehand and pass it in to the HS's constructor (chicken / egg) + async def hash(p): + return hashlib.md5(p.encode("utf8")).hexdigest() + + hs.get_auth_handler().hash = hash + + async def validate_hash(p, h): + return hashlib.md5(p.encode("utf8")).hexdigest() == h + + hs.get_auth_handler().validate_hash = validate_hash + + # Make the threadpool and database transactions synchronous for testing. + _make_test_homeserver_synchronous(hs) + + return hs diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index ddad44bd6cbb..3e4f0579c9cb 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -23,7 +23,8 @@ from synapse.storage.engines import create_engine from tests import unittest -from tests.utils import TestHomeServer, default_config +from tests.server import TestHomeServer +from tests.utils import default_config class SQLBaseStoreTestCase(unittest.TestCase): diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index fccab733c029..5cfdfe9b852e 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -19,8 +19,8 @@ from synapse.types import UserID, create_requester from tests import unittest +from tests.server import TestHomeServer from tests.test_utils import event_injection -from tests.utils import TestHomeServer class RoomMemberStoreTestCase(unittest.HomeserverTestCase): diff --git a/tests/utils.py b/tests/utils.py index 983859120f55..6d013e851845 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -14,12 +14,7 @@ # limitations under the License. import atexit -import hashlib import os -import time -import uuid -import warnings -from typing import Type from unittest.mock import Mock, patch from urllib import parse as urlparse @@ -28,14 +23,11 @@ from synapse.api.constants import EventTypes from synapse.api.errors import CodeMessageException, cs_error from synapse.api.room_versions import RoomVersions -from synapse.config.database import DatabaseConnectionConfig from synapse.config.homeserver import HomeServerConfig from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.logging.context import current_context, set_current_context -from synapse.server import HomeServer -from synapse.storage import DataStore from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import PostgresEngine, create_engine +from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database # set this to True to run the tests against postgres instead of sqlite. @@ -182,171 +174,6 @@ def default_config(name, parse=False): return config_dict -class TestHomeServer(HomeServer): - DATASTORE_CLASS = DataStore - - -def setup_test_homeserver( - cleanup_func, - name="test", - config=None, - reactor=None, - homeserver_to_use: Type[HomeServer] = TestHomeServer, - **kwargs, -): - """ - Setup a homeserver suitable for running tests against. Keyword arguments - are passed to the Homeserver constructor. - - If no datastore is supplied, one is created and given to the homeserver. - - Args: - cleanup_func : The function used to register a cleanup routine for - after the test. - - Calling this method directly is deprecated: you should instead derive from - HomeserverTestCase. - """ - if reactor is None: - from twisted.internet import reactor - - if config is None: - config = default_config(name, parse=True) - - config.ldap_enabled = False - - if "clock" not in kwargs: - kwargs["clock"] = MockClock() - - if USE_POSTGRES_FOR_TESTS: - test_db = "synapse_test_%s" % uuid.uuid4().hex - - database_config = { - "name": "psycopg2", - "args": { - "database": test_db, - "host": POSTGRES_HOST, - "password": POSTGRES_PASSWORD, - "user": POSTGRES_USER, - "cp_min": 1, - "cp_max": 5, - }, - } - else: - database_config = { - "name": "sqlite3", - "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1}, - } - - if "db_txn_limit" in kwargs: - database_config["txn_limit"] = kwargs["db_txn_limit"] - - database = DatabaseConnectionConfig("master", database_config) - config.database.databases = [database] - - db_engine = create_engine(database.config) - - # Create the database before we actually try and connect to it, based off - # the template database we generate in setupdb() - if isinstance(db_engine, PostgresEngine): - db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, - user=POSTGRES_USER, - host=POSTGRES_HOST, - password=POSTGRES_PASSWORD, - ) - db_conn.autocommit = True - cur = db_conn.cursor() - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - cur.execute( - "CREATE DATABASE %s WITH TEMPLATE %s;" % (test_db, POSTGRES_BASE_DB) - ) - cur.close() - db_conn.close() - - hs = homeserver_to_use( - name, - config=config, - version_string="Synapse/tests", - reactor=reactor, - ) - - # Install @cache_in_self attributes - for key, val in kwargs.items(): - setattr(hs, "_" + key, val) - - # Mock TLS - hs.tls_server_context_factory = Mock() - hs.tls_client_options_factory = Mock() - - hs.setup() - if homeserver_to_use == TestHomeServer: - hs.setup_background_tasks() - - if isinstance(db_engine, PostgresEngine): - database = hs.get_datastores().databases[0] - - # We need to do cleanup on PostgreSQL - def cleanup(): - import psycopg2 - - # Close all the db pools - database._db_pool.close() - - dropped = False - - # Drop the test database - db_conn = db_engine.module.connect( - database=POSTGRES_BASE_DB, - user=POSTGRES_USER, - host=POSTGRES_HOST, - password=POSTGRES_PASSWORD, - ) - db_conn.autocommit = True - cur = db_conn.cursor() - - # Try a few times to drop the DB. Some things may hold on to the - # database for a few more seconds due to flakiness, preventing - # us from dropping it when the test is over. If we can't drop - # it, warn and move on. - for _ in range(5): - try: - cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) - db_conn.commit() - dropped = True - except psycopg2.OperationalError as e: - warnings.warn( - "Couldn't drop old db: " + str(e), category=UserWarning - ) - time.sleep(0.5) - - cur.close() - db_conn.close() - - if not dropped: - warnings.warn("Failed to drop old DB.", category=UserWarning) - - if not LEAVE_DB: - # Register the cleanup hook - cleanup_func(cleanup) - - # bcrypt is far too slow to be doing in unit tests - # Need to let the HS build an auth handler and then mess with it - # because AuthHandler's constructor requires the HS, so we can't make one - # beforehand and pass it in to the HS's constructor (chicken / egg) - async def hash(p): - return hashlib.md5(p.encode("utf8")).hexdigest() - - hs.get_auth_handler().hash = hash - - async def validate_hash(p, h): - return hashlib.md5(p.encode("utf8")).hexdigest() == h - - hs.get_auth_handler().validate_hash = validate_hash - - return hs - - def mock_getRawHeaders(headers=None): headers = headers if headers is not None else {} From 2bf31f7807c7a0c229170803c97090d612dc16f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 21 Dec 2021 17:26:04 +0000 Subject: [PATCH 022/474] Pin to frozendict<2.1.2 (#11625) ... to work around breakage on buster (https://github.com/Marco-Sulla/python-frozendict/issues/41) --- changelog.d/11625.misc | 1 + synapse/python_dependencies.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11625.misc diff --git a/changelog.d/11625.misc b/changelog.d/11625.misc new file mode 100644 index 000000000000..82ee78e72489 --- /dev/null +++ b/changelog.d/11625.misc @@ -0,0 +1 @@ +Work around a build problem on Debian Buster. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 7d26954244ea..13fb69460ea9 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -50,7 +50,8 @@ REQUIREMENTS = [ # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0 "jsonschema>=3.0.0", - "frozendict>=1", + # frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41 + "frozendict>=1,<2.1.2", "unpaddedbase64>=1.1.0", "canonicaljson>=1.4.0", # we use the type definitions added in signedjson 1.1. From aa874a13902fcf7f3e4024cd95a8491f308367bb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 Dec 2021 17:32:16 +0000 Subject: [PATCH 023/474] 1.49.2 --- CHANGES.md | 13 ++++++++++++- changelog.d/11625.misc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 19 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/11625.misc diff --git a/CHANGES.md b/CHANGES.md index dc6fa177946e..9301ec8804b0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -Synapse 1.49.1 (2021-12-21) +Synapse 1.49.2 (2021-12-21) =========================== This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it impossible for such users to log in on a new client. @@ -12,6 +12,17 @@ Bugfixes - Fix a performance regression in `/sync` handling, introduced in 1.49.0. ([\#11583](https://github.com/matrix-org/synapse/issues/11583)) +Internal Changes +---------------- + +- Work around a build problem on Debian Buster. ([\#11625](https://github.com/matrix-org/synapse/issues/11625)) + + +Synapse 1.49.1 (2021-12-21) +=========================== + +Not released due to problems building the debian packages. + Synapse 1.49.0 (2021-12-14) =========================== diff --git a/changelog.d/11625.misc b/changelog.d/11625.misc deleted file mode 100644 index 82ee78e72489..000000000000 --- a/changelog.d/11625.misc +++ /dev/null @@ -1 +0,0 @@ -Work around a build problem on Debian Buster. diff --git a/debian/changelog b/debian/changelog index 61a1d003b29e..ebe3e0cbf958 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.49.2) stable; urgency=medium + + * New synapse release 1.49.2. + + -- Synapse Packaging team Tue, 21 Dec 2021 17:31:03 +0000 + matrix-synapse-py3 (1.49.1) stable; urgency=medium * New synapse release 1.49.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index d45e0f18833c..95a49c20befc 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.49.1" +__version__ = "1.49.2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 6b6dcdc338a3fcfa1da6c3e2a38f62c268df1cbc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 Dec 2021 17:41:34 +0000 Subject: [PATCH 024/474] update changelog postgres 10 _+_ --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 9301ec8804b0..9f6e29631df6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ Synapse 1.49.2 (2021-12-21) This release fixes a regression introduced in Synapse 1.49.0 which could cause `/sync` requests to take significantly longer. This would particularly affect "initial" syncs for users participating in a large number of rooms, and in extreme cases, could make it impossible for such users to log in on a new client. -**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support Python 3.6 and PostgreSQL 9.6, both of which have now reached upstream end-of-life. Synapse will require Python 3.7+ and PostgreSQL 10. +**Note:** in line with our [deprecation policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html) for platform dependencies, this will be the last release to support Python 3.6 and PostgreSQL 9.6, both of which have now reached upstream end-of-life. Synapse will require Python 3.7+ and PostgreSQL 10+. **Note:** We will also stop producing packages for Ubuntu 18.04 (Bionic Beaver) after this release, as it uses Python 3.6. From 87da37374a68b935fe8a9378a7d369448461c128 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 21 Dec 2021 11:37:04 -0800 Subject: [PATCH 025/474] Drop EOL python 3.6 from CI (#11595) * remove python 3.6 and postgres 9.6 from github workflow * remove python 3.6 env from tox * newsfragment * correct postgres version * add py310 to tox env list --- .github/workflows/tests.yml | 12 ++++++------ changelog.d/11595.misc | 1 + tox.ini | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11595.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 21c9ee7823c7..cb72e1a233ec 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -76,7 +76,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10"] database: ["sqlite"] toxenv: ["py"] include: @@ -85,9 +85,9 @@ jobs: toxenv: "py-noextras" # Oldest Python with PostgreSQL - - python-version: "3.6" + - python-version: "3.7" database: "postgres" - postgres-version: "9.6" + postgres-version: "10" toxenv: "py" # Newest Python with newest PostgreSQL @@ -167,7 +167,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["pypy-3.6"] + python-version: ["pypy-3.7"] steps: - uses: actions/checkout@v2 @@ -291,8 +291,8 @@ jobs: strategy: matrix: include: - - python-version: "3.6" - postgres-version: "9.6" + - python-version: "3.7" + postgres-version: "10" - python-version: "3.10" postgres-version: "14" diff --git a/changelog.d/11595.misc b/changelog.d/11595.misc new file mode 100644 index 000000000000..57e54db5a9df --- /dev/null +++ b/changelog.d/11595.misc @@ -0,0 +1 @@ +Drop EOL python 3.6 and postgres 9.6 from CI. \ No newline at end of file diff --git a/tox.ini b/tox.ini index cfe6a0694269..2ffca14b2278 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = packaging, py36, py37, py38, py39, check_codestyle, check_isort +envlist = packaging, py37, py38, py39, py310, check_codestyle, check_isort # we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 minversion = 2.3.2 From c500bf37d660b08efb48501b7690dc4448b39eca Mon Sep 17 00:00:00 2001 From: AndrewFerr Date: Thu, 23 Dec 2021 05:42:50 -0500 Subject: [PATCH 026/474] Add details for how to set up TURN behind NAT (#11553) Signed-off-by: Andrew Ferrazzutti --- changelog.d/11553.doc | 1 + docs/turn-howto.md | 67 +++++++++++++++++++++++++++++++++---------- 2 files changed, 53 insertions(+), 15 deletions(-) create mode 100644 changelog.d/11553.doc diff --git a/changelog.d/11553.doc b/changelog.d/11553.doc new file mode 100644 index 000000000000..810ba1692861 --- /dev/null +++ b/changelog.d/11553.doc @@ -0,0 +1 @@ +Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. diff --git a/docs/turn-howto.md b/docs/turn-howto.md index e6812de69e6b..e32aaa1850d6 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -15,8 +15,8 @@ The following sections describe how to install [coturn](TURN->TURN->client flows work + # this should be one of the turn server's listening IPs allowed-peer-ip=10.0.0.1 # consider whether you want to limit the quota of relayed streams per user (or total) to avoid risk of DoS. @@ -123,7 +139,7 @@ This will install and start a systemd service called `coturn`. pkey=/path/to/privkey.pem ``` - In this case, replace the `turn:` schemes in the `turn_uri` settings below + In this case, replace the `turn:` schemes in the `turn_uris` settings below with `turns:`. We recommend that you only try to set up TLS/DTLS once you have set up a @@ -134,21 +150,33 @@ This will install and start a systemd service called `coturn`. traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535 for the UDP relay.) -1. We do not recommend running a TURN server behind NAT, and are not aware of - anyone doing so successfully. +1. If your TURN server is behind NAT, the NAT gateway must have an external, + publicly-reachable IP address. You must configure coturn to advertise that + address to connecting clients: + + ``` + external-ip=EXTERNAL_NAT_IPv4_ADDRESS + ``` - If you want to try it anyway, you will at least need to tell coturn its - external IP address: + You may optionally limit the TURN server to listen only on the local + address that is mapped by NAT to the external address: ``` - external-ip=192.88.99.1 + listening-ip=INTERNAL_TURNSERVER_IPv4_ADDRESS ``` - ... and your NAT gateway must forward all of the relayed ports directly - (eg, port 56789 on the external IP must be always be forwarded to port - 56789 on the internal IP). + If your NAT gateway is reachable over both IPv4 and IPv6, you may + configure coturn to advertise each available address: - If you get this working, let us know! + ``` + external-ip=EXTERNAL_NAT_IPv4_ADDRESS + external-ip=EXTERNAL_NAT_IPv6_ADDRESS + ``` + + When advertising an external IPv6 address, ensure that the firewall and + network settings of the system running your TURN server are configured to + accept IPv6 traffic, and that the TURN server is listening on the local + IPv6 address that is mapped by NAT to the external IPv6 address. 1. (Re)start the turn server: @@ -216,9 +244,6 @@ connecting". Unfortunately, troubleshooting this can be tricky. Here are a few things to try: - * Check that your TURN server is not behind NAT. As above, we're not aware of - anyone who has successfully set this up. - * Check that you have opened your firewall to allow TCP and UDP traffic to the TURN ports (normally 3478 and 5349). @@ -234,6 +259,18 @@ Here are a few things to try: Try removing any AAAA records for your TURN server, so that it is only reachable over IPv4. + * If your TURN server is behind NAT: + + * double-check that your NAT gateway is correctly forwarding all TURN + ports (normally 3478 & 5349 for TCP & UDP TURN traffic, and 49152-65535 for the UDP + relay) to the NAT-internal address of your TURN server. If advertising + both IPv4 and IPv6 external addresses via the `external-ip` option, ensure + that the NAT is forwarding both IPv4 and IPv6 traffic to the IPv4 and IPv6 + internal addresses of your TURN server. When in doubt, remove AAAA records + for your TURN server and specify only an IPv4 address as your `external-ip`. + + * ensure that your TURN server uses the NAT gateway as its default route. + * Enable more verbose logging in coturn via the `verbose` setting: ``` From 66d7aa783a8ad1b3eb6d17ba086e2dda8a912ab6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 23 Dec 2021 06:47:24 -0500 Subject: [PATCH 027/474] Fix mypy error with opentracing.tags. (#11622) --- changelog.d/11622.misc | 1 + synapse/logging/opentracing.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/11622.misc diff --git a/changelog.d/11622.misc b/changelog.d/11622.misc new file mode 100644 index 000000000000..def24afb8d23 --- /dev/null +++ b/changelog.d/11622.misc @@ -0,0 +1 @@ +Add opentracing type stubs and fix associated mypy errors. \ No newline at end of file diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 63642906151e..622445e9f477 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -220,6 +220,7 @@ class _DummyTagNames: try: import opentracing + import opentracing.tags tags = opentracing.tags except ImportError: From fcfe67578f70bb4f150b83cfad708f5bc0474c1e Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 23 Dec 2021 20:22:15 -0800 Subject: [PATCH 028/474] Update to the current version of Black and run it on Synapse codebase (#11596) * update black version * run updated version of black on code * newsfragment * enumerate python versions --- changelog.d/11596.misc | 1 + pyproject.toml | 2 +- setup.py | 2 +- synapse/logging/context.py | 1 - synapse/util/caches/lrucache.py | 1 - 5 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 changelog.d/11596.misc diff --git a/changelog.d/11596.misc b/changelog.d/11596.misc new file mode 100644 index 000000000000..3064bc632d57 --- /dev/null +++ b/changelog.d/11596.misc @@ -0,0 +1 @@ +Update black version and run it on all the files. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 8bca1fa4efd9..963f149c6af6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ showcontent = true [tool.black] -target-version = ['py36'] +target-version = ['py37', 'py38', 'py39', 'py310'] exclude = ''' ( diff --git a/setup.py b/setup.py index e113da6782d6..fbb0133016a7 100755 --- a/setup.py +++ b/setup.py @@ -96,7 +96,7 @@ def exec_file(path_segments): # We pin black so that our tests don't start failing on new releases. CONDITIONAL_REQUIREMENTS["lint"] = [ "isort==5.7.0", - "black==21.6b0", + "black==21.12b0", "flake8-comprehensions", "flake8-bugbear==21.3.2", "flake8", diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 25e78cc82fcd..d4ee89337642 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -68,7 +68,6 @@ def get_thread_resource_usage() -> "Optional[resource.struct_rusage]": return resource.getrusage(RUSAGE_THREAD) - except Exception: # If the system doesn't support resource.getrusage(RUSAGE_THREAD) then we # won't track resource usage. diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index eb96f7e665e6..3f11a2f9dd5c 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -69,7 +69,6 @@ def _get_size_of(val: Any, *, recurse: bool = True) -> int: sizer.exclude_refs((), None, "") return sizer.asizeof(val, limit=100 if recurse else 0) - except ImportError: def _get_size_of(val: Any, *, recurse: bool = True) -> int: From 15bb1c8511c13197a75df93f6a8021ec5f9586e6 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 29 Dec 2021 14:01:13 +0100 Subject: [PATCH 029/474] Add type hints to `synapse/storage/databases/main/stats.py` (#11653) --- changelog.d/11653.misc | 1 + mypy.ini | 4 +- synapse/storage/databases/main/stats.py | 94 ++++++++++++++----------- 3 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 changelog.d/11653.misc diff --git a/changelog.d/11653.misc b/changelog.d/11653.misc new file mode 100644 index 000000000000..8e405b922674 --- /dev/null +++ b/changelog.d/11653.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 57e1a5df43f4..724c7e2ae47e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -39,7 +39,6 @@ exclude = (?x) |synapse/storage/databases/main/roommember.py |synapse/storage/databases/main/search.py |synapse/storage/databases/main/state.py - |synapse/storage/databases/main/stats.py |synapse/storage/databases/main/user_directory.py |synapse/storage/schema/ @@ -214,6 +213,9 @@ disallow_untyped_defs = True [mypy-synapse.storage.databases.main.profile] disallow_untyped_defs = True +[mypy-synapse.storage.databases.main.stats] +disallow_untyped_defs = True + [mypy-synapse.storage.databases.main.state_deltas] disallow_untyped_defs = True diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index a0472e37f5b0..427ae1f649b1 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -16,7 +16,7 @@ import logging from enum import Enum from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast from typing_extensions import Counter @@ -24,7 +24,11 @@ from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import StoreError -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.types import JsonDict from synapse.util.caches.descriptors import cached @@ -122,7 +126,9 @@ def __init__( self.db_pool.updates.register_noop_background_update("populate_stats_cleanup") self.db_pool.updates.register_noop_background_update("populate_stats_prepare") - async def _populate_stats_process_users(self, progress, batch_size): + async def _populate_stats_process_users( + self, progress: JsonDict, batch_size: int + ) -> int: """ This is a background update which regenerates statistics for users. """ @@ -134,7 +140,7 @@ async def _populate_stats_process_users(self, progress, batch_size): last_user_id = progress.get("last_user_id", "") - def _get_next_batch(txn): + def _get_next_batch(txn: LoggingTransaction) -> List[str]: sql = """ SELECT DISTINCT name FROM users WHERE name > ? @@ -168,7 +174,9 @@ def _get_next_batch(txn): return len(users_to_work_on) - async def _populate_stats_process_rooms(self, progress, batch_size): + async def _populate_stats_process_rooms( + self, progress: JsonDict, batch_size: int + ) -> int: """This is a background update which regenerates statistics for rooms.""" if not self.stats_enabled: await self.db_pool.updates._end_background_update( @@ -178,7 +186,7 @@ async def _populate_stats_process_rooms(self, progress, batch_size): last_room_id = progress.get("last_room_id", "") - def _get_next_batch(txn): + def _get_next_batch(txn: LoggingTransaction) -> List[str]: sql = """ SELECT DISTINCT room_id FROM current_state_events WHERE room_id > ? @@ -307,7 +315,7 @@ async def bulk_update_stats_delta( stream_id: Current position. """ - def _bulk_update_stats_delta_txn(txn): + def _bulk_update_stats_delta_txn(txn: LoggingTransaction) -> None: for stats_type, stats_updates in updates.items(): for stats_id, fields in stats_updates.items(): logger.debug( @@ -339,7 +347,7 @@ async def update_stats_delta( stats_type: str, stats_id: str, fields: Dict[str, int], - complete_with_stream_id: Optional[int], + complete_with_stream_id: int, absolute_field_overrides: Optional[Dict[str, int]] = None, ) -> None: """ @@ -372,14 +380,14 @@ async def update_stats_delta( def _update_stats_delta_txn( self, - txn, - ts, - stats_type, - stats_id, - fields, - complete_with_stream_id, - absolute_field_overrides=None, - ): + txn: LoggingTransaction, + ts: int, + stats_type: str, + stats_id: str, + fields: Dict[str, int], + complete_with_stream_id: int, + absolute_field_overrides: Optional[Dict[str, int]] = None, + ) -> None: if absolute_field_overrides is None: absolute_field_overrides = {} @@ -422,20 +430,23 @@ def _update_stats_delta_txn( ) def _upsert_with_additive_relatives_txn( - self, txn, table, keyvalues, absolutes, additive_relatives - ): + self, + txn: LoggingTransaction, + table: str, + keyvalues: Dict[str, Any], + absolutes: Dict[str, Any], + additive_relatives: Dict[str, int], + ) -> None: """Used to update values in the stats tables. This is basically a slightly convoluted upsert that *adds* to any existing rows. Args: - txn - table (str): Table name - keyvalues (dict[str, any]): Row-identifying key values - absolutes (dict[str, any]): Absolute (set) fields - additive_relatives (dict[str, int]): Fields that will be added onto - if existing row present. + table: Table name + keyvalues: Row-identifying key values + absolutes: Absolute (set) fields + additive_relatives: Fields that will be added onto if existing row present. """ if self.database_engine.can_native_upsert: absolute_updates = [ @@ -491,20 +502,17 @@ def _upsert_with_additive_relatives_txn( current_row.update(absolutes) self.db_pool.simple_update_one_txn(txn, table, keyvalues, current_row) - async def _calculate_and_set_initial_state_for_room( - self, room_id: str - ) -> Tuple[dict, dict, int]: + async def _calculate_and_set_initial_state_for_room(self, room_id: str) -> None: """Calculate and insert an entry into room_stats_current. Args: room_id: The room ID under calculation. - - Returns: - A tuple of room state, membership counts and stream position. """ - def _fetch_current_state_stats(txn): - pos = self.get_room_max_stream_ordering() + def _fetch_current_state_stats( + txn: LoggingTransaction, + ) -> Tuple[List[str], Dict[str, int], int, List[str], int]: + pos = self.get_room_max_stream_ordering() # type: ignore[attr-defined] rows = self.db_pool.simple_select_many_txn( txn, @@ -524,7 +532,7 @@ def _fetch_current_state_stats(txn): retcols=["event_id"], ) - event_ids = [row["event_id"] for row in rows] + event_ids = cast(List[str], [row["event_id"] for row in rows]) txn.execute( """ @@ -544,9 +552,9 @@ def _fetch_current_state_stats(txn): (room_id,), ) - (current_state_events_count,) = txn.fetchone() + current_state_events_count = cast(Tuple[int], txn.fetchone())[0] - users_in_room = self.get_users_in_room_txn(txn, room_id) + users_in_room = self.get_users_in_room_txn(txn, room_id) # type: ignore[attr-defined] return ( event_ids, @@ -566,7 +574,7 @@ def _fetch_current_state_stats(txn): "get_initial_state_for_room", _fetch_current_state_stats ) - state_event_map = await self.get_events(event_ids, get_prev_content=False) + state_event_map = await self.get_events(event_ids, get_prev_content=False) # type: ignore[attr-defined] room_state = { "join_rules": None, @@ -622,8 +630,10 @@ def _fetch_current_state_stats(txn): }, ) - async def _calculate_and_set_initial_state_for_user(self, user_id): - def _calculate_and_set_initial_state_for_user_txn(txn): + async def _calculate_and_set_initial_state_for_user(self, user_id: str) -> None: + def _calculate_and_set_initial_state_for_user_txn( + txn: LoggingTransaction, + ) -> Tuple[int, int]: pos = self._get_max_stream_id_in_current_state_deltas_txn(txn) txn.execute( @@ -634,7 +644,7 @@ def _calculate_and_set_initial_state_for_user_txn(txn): """, (user_id,), ) - (count,) = txn.fetchone() + count = cast(Tuple[int], txn.fetchone())[0] return count, pos joined_rooms, pos = await self.db_pool.runInteraction( @@ -678,7 +688,9 @@ async def get_users_media_usage_paginate( users that exist given this query """ - def get_users_media_usage_paginate_txn(txn): + def get_users_media_usage_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[JsonDict], int]: filters = [] args = [self.hs.config.server.server_name] @@ -733,7 +745,7 @@ def get_users_media_usage_paginate_txn(txn): sql_base=sql_base, ) txn.execute(sql, args) - count = txn.fetchone()[0] + count = cast(Tuple[int], txn.fetchone())[0] sql = """ SELECT From f58b300d271ddaad7ce81687547f3c6ed7e7187a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 29 Dec 2021 08:02:03 -0500 Subject: [PATCH 030/474] Do not attempt to bundled aggregations for /members and /state. (#11623) Both of those APIs return state events, which will not have bundled aggregations added anyway. --- changelog.d/11623.bugfix | 1 + synapse/handlers/message.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11623.bugfix diff --git a/changelog.d/11623.bugfix b/changelog.d/11623.bugfix new file mode 100644 index 000000000000..4116e5dd7c7b --- /dev/null +++ b/changelog.d/11623.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1a7190085a7d..5e3d3886eb1d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -246,9 +246,7 @@ async def get_state_events( room_state = room_state_events[membership_event_id] now = self.clock.time_msec() - events = await self._event_serializer.serialize_events( - room_state.values(), now, bundle_aggregations=True - ) + events = await self._event_serializer.serialize_events(room_state.values(), now) return events async def get_joined_members(self, requester: Requester, room_id: str) -> dict: From f82d38ed2e8e07c81a0d60f31401e3edecd5e57f Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 29 Dec 2021 14:04:28 +0100 Subject: [PATCH 031/474] Improve type hints in storage classes. (#11652) By using cast and making ignores more specific. --- changelog.d/11652.misc | 1 + synapse/storage/databases/main/deviceinbox.py | 4 ++-- .../storage/databases/main/event_federation.py | 6 +++--- .../databases/main/event_push_actions.py | 18 ++++++++++-------- synapse/storage/databases/main/filtering.py | 4 ++-- .../storage/databases/main/media_repository.py | 3 ++- synapse/storage/databases/main/pusher.py | 6 +++--- synapse/storage/databases/main/registration.py | 17 ++++++++++------- synapse/storage/databases/main/relations.py | 4 ++-- synapse/storage/databases/main/ui_auth.py | 15 +++++++++------ 10 files changed, 44 insertions(+), 34 deletions(-) create mode 100644 changelog.d/11652.misc diff --git a/changelog.d/11652.misc b/changelog.d/11652.misc new file mode 100644 index 000000000000..8e405b922674 --- /dev/null +++ b/changelog.d/11652.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. \ No newline at end of file diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index b410eefdc71f..3682cb6a8139 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple, cast from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace @@ -673,7 +673,7 @@ def _remove_dead_devices_from_device_inbox_txn( # There's a type mismatch here between how we want to type the row and # what fetchone says it returns, but we silence it because we know that # res can't be None. - res: Tuple[Optional[int]] = txn.fetchone() # type: ignore[assignment] + res = cast(Tuple[Optional[int]], txn.fetchone()) if res[0] is None: # this can only happen if the `device_inbox` table is empty, in which # case we have no work to do. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index bc5ff25d0880..270b30800bf2 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -288,7 +288,7 @@ def _get_auth_chain_ids_txn( new_front = set() for chunk in batch_iter(front, 100): # Pull the auth events either from the cache or DB. - to_fetch = [] # Event IDs to fetch from DB # type: List[str] + to_fetch: List[str] = [] # Event IDs to fetch from DB for event_id in chunk: res = self._event_auth_cache.get(event_id) if res is None: @@ -615,8 +615,8 @@ def _get_auth_chain_difference_txn( # currently walking, either from cache or DB. search, chunk = search[:-100], search[-100:] - found = [] # Results found # type: List[Tuple[str, str, int]] - to_fetch = [] # Event IDs to fetch from DB # type: List[str] + found: List[Tuple[str, str, int]] = [] # Results found + to_fetch: List[str] = [] # Event IDs to fetch from DB for _, event_id in chunk: res = self._event_auth_cache.get(event_id) if res is None: diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 98ea0e884cb3..a98e6b259378 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast import attr @@ -326,7 +326,7 @@ def get_after_receipt( ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() # type: ignore[return-value] + return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) after_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt @@ -357,7 +357,7 @@ def get_no_receipt( ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() # type: ignore[return-value] + return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall()) no_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt @@ -434,7 +434,7 @@ def get_after_receipt( ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() # type: ignore[return-value] + return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) after_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt @@ -465,7 +465,7 @@ def get_no_receipt( ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] txn.execute(sql, args) - return txn.fetchall() # type: ignore[return-value] + return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall()) no_read_receipt = await self.db_pool.runInteraction( "get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt @@ -662,7 +662,7 @@ def _find_first_stream_ordering_after_ts_txn( The stream ordering """ txn.execute("SELECT MAX(stream_ordering) FROM events") - max_stream_ordering = txn.fetchone()[0] # type: ignore[index] + max_stream_ordering = cast(Tuple[Optional[int]], txn.fetchone())[0] if max_stream_ordering is None: return 0 @@ -731,7 +731,7 @@ def f(txn: LoggingTransaction) -> Optional[Tuple[int]]: " LIMIT 1" ) txn.execute(sql, (stream_ordering,)) - return txn.fetchone() # type: ignore[return-value] + return cast(Optional[Tuple[int]], txn.fetchone()) result = await self.db_pool.runInteraction( "get_time_of_last_push_action_before", f @@ -1029,7 +1029,9 @@ def f( " LIMIT ?" % (before_clause,) ) txn.execute(sql, args) - return txn.fetchall() # type: ignore[return-value] + return cast( + List[Tuple[str, str, int, int, str, bool, str, int]], txn.fetchall() + ) push_actions = await self.db_pool.runInteraction("get_push_actions_for_user", f) return [ diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index cf842803bcd6..cb9ee08fa8e8 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union +from typing import Optional, Tuple, Union, cast from canonicaljson import encode_canonical_json @@ -63,7 +63,7 @@ def _do_txn(txn: LoggingTransaction) -> int: sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?" txn.execute(sql, (user_localpart,)) - max_id = txn.fetchone()[0] # type: ignore[index] + max_id = cast(Tuple[Optional[int]], txn.fetchone())[0] if max_id is None: filter_id = 0 else: diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 1b076683f762..cbba356b4a98 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -23,6 +23,7 @@ Optional, Tuple, Union, + cast, ) from synapse.storage._base import SQLBaseStore @@ -220,7 +221,7 @@ def get_local_media_by_user_paginate_txn( WHERE user_id = ? """ txn.execute(sql, args) - count = txn.fetchone()[0] # type: ignore[index] + count = cast(Tuple[int], txn.fetchone())[0] sql = """ SELECT diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 7ab681ed6f5f..747b4f31df67 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -494,7 +494,7 @@ async def add_pusher( # invalidate, since we the user might not have had a pusher before await self.db_pool.runInteraction( "add_pusher", - self._invalidate_cache_and_stream, # type: ignore + self._invalidate_cache_and_stream, # type: ignore[attr-defined] self.get_if_user_has_pusher, (user_id,), ) @@ -503,7 +503,7 @@ async def delete_pusher_by_app_id_pushkey_user_id( self, app_id: str, pushkey: str, user_id: str ) -> None: def delete_pusher_txn(txn, stream_id): - self._invalidate_cache_and_stream( # type: ignore + self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_if_user_has_pusher, (user_id,) ) @@ -548,7 +548,7 @@ async def delete_all_pushers_for_user(self, user_id: str) -> None: pushers = list(await self.get_pushers_by_user_id(user_id)) def delete_pushers_txn(txn, stream_ids): - self._invalidate_cache_and_stream( # type: ignore + self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_if_user_has_pusher, (user_id,) ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 29d9d4de9627..4175c82a253c 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -16,7 +16,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast import attr @@ -1357,12 +1357,15 @@ def _use_registration_token_txn(txn): # Override type because the return type is only optional if # allow_none is True, and we don't want mypy throwing errors # about None not being indexable. - res: Dict[str, Any] = self.db_pool.simple_select_one_txn( - txn, - "registration_tokens", - keyvalues={"token": token}, - retcols=["pending", "completed"], - ) # type: ignore + res = cast( + Dict[str, Any], + self.db_pool.simple_select_one_txn( + txn, + "registration_tokens", + keyvalues={"token": token}, + retcols=["pending", "completed"], + ), + ) # Decrement pending and increment completed self.db_pool.simple_update_one_txn( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 729ff17e2e19..4ff6aed253a3 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import List, Optional, Tuple, Union +from typing import List, Optional, Tuple, Union, cast import attr @@ -399,7 +399,7 @@ def _get_thread_summary_txn( AND relation_type = ? """ txn.execute(sql, (event_id, room_id, RelationTypes.THREAD)) - count = txn.fetchone()[0] # type: ignore[index] + count = cast(Tuple[int], txn.fetchone())[0] return count, latest_event_id diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 340ca9e47d47..a1a1a6a14a85 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union, cast import attr @@ -225,11 +225,14 @@ def _set_ui_auth_session_data_txn( self, txn: LoggingTransaction, session_id: str, key: str, value: Any ): # Get the current value. - result: Dict[str, Any] = self.db_pool.simple_select_one_txn( # type: ignore - txn, - table="ui_auth_sessions", - keyvalues={"session_id": session_id}, - retcols=("serverdict",), + result = cast( + Dict[str, Any], + self.db_pool.simple_select_one_txn( + txn, + table="ui_auth_sessions", + keyvalues={"session_id": session_id}, + retcols=("serverdict",), + ), ) # Update it and add it back to the database. From 2c7f5e74e5ae5eb7e5d0b99001610a829b0b35da Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 29 Dec 2021 15:12:30 +0000 Subject: [PATCH 032/474] Fix a type annotation in `test_account_data.py` and remove it from the Mypy exclusion list. (#11657) Co-authored-by: Patrick Cloke --- changelog.d/11657.misc | 1 + mypy.ini | 1 - tests/storage/test_account_data.py | 4 ++-- 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11657.misc diff --git a/changelog.d/11657.misc b/changelog.d/11657.misc new file mode 100644 index 000000000000..8e405b922674 --- /dev/null +++ b/changelog.d/11657.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 724c7e2ae47e..e0204a3c0408 100644 --- a/mypy.ini +++ b/mypy.ini @@ -100,7 +100,6 @@ exclude = (?x) |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py |tests/state/test_v2.py - |tests/storage/test_account_data.py |tests/storage/test_background_update.py |tests/storage/test_base.py |tests/storage/test_client_ips.py diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 01af49a16b0f..d697d2bc1e79 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, Set +from typing import Iterable, Optional, Set from synapse.api.constants import AccountDataTypes @@ -25,7 +25,7 @@ def prepare(self, hs, reactor, clock): self.user = "@user:test" def _update_ignore_list( - self, *ignored_user_ids: Iterable[str], ignorer_user_id: str = None + self, *ignored_user_ids: Iterable[str], ignorer_user_id: Optional[str] = None ) -> None: """Update the account data to block the given users.""" if ignorer_user_id is None: From 07a3b5dabad98678bae52275b327c302adbf67af Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 30 Dec 2021 13:22:31 +0100 Subject: [PATCH 033/474] Add type hints to `synapse/storage/databases/main/events_bg_updates.py` (#11654) --- changelog.d/11654.misc | 1 + mypy.ini | 4 +- .../databases/main/events_bg_updates.py | 69 +++++++++++-------- 3 files changed, 44 insertions(+), 30 deletions(-) create mode 100644 changelog.d/11654.misc diff --git a/changelog.d/11654.misc b/changelog.d/11654.misc new file mode 100644 index 000000000000..8e405b922674 --- /dev/null +++ b/changelog.d/11654.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index e0204a3c0408..85fa22d28f2b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -28,7 +28,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/databases/main/devices.py |synapse/storage/databases/main/event_federation.py - |synapse/storage/databases/main/events_bg_updates.py |synapse/storage/databases/main/group_server.py |synapse/storage/databases/main/metrics.py |synapse/storage/databases/main/monthly_active_users.py @@ -200,6 +199,9 @@ disallow_untyped_defs = True [mypy-synapse.storage.databases.main.event_push_actions] disallow_untyped_defs = True +[mypy-synapse.storage.databases.main.events_bg_updates] +disallow_untyped_defs = True + [mypy-synapse.storage.databases.main.events_worker] disallow_untyped_defs = True diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 9b36941fecb8..a68f14ba4824 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Set, Tuple, cast import attr @@ -240,12 +240,14 @@ def __init__( ################################################################################ - async def _background_reindex_fields_sender(self, progress, batch_size): + async def _background_reindex_fields_sender( + self, progress: JsonDict, batch_size: int + ) -> int: target_min_stream_id = progress["target_min_stream_id_inclusive"] max_stream_id = progress["max_stream_id_exclusive"] rows_inserted = progress.get("rows_inserted", 0) - def reindex_txn(txn): + def reindex_txn(txn: LoggingTransaction) -> int: sql = ( "SELECT stream_ordering, event_id, json FROM events" " INNER JOIN event_json USING (event_id)" @@ -307,12 +309,14 @@ def reindex_txn(txn): return result - async def _background_reindex_origin_server_ts(self, progress, batch_size): + async def _background_reindex_origin_server_ts( + self, progress: JsonDict, batch_size: int + ) -> int: target_min_stream_id = progress["target_min_stream_id_inclusive"] max_stream_id = progress["max_stream_id_exclusive"] rows_inserted = progress.get("rows_inserted", 0) - def reindex_search_txn(txn): + def reindex_search_txn(txn: LoggingTransaction) -> int: sql = ( "SELECT stream_ordering, event_id FROM events" " WHERE ? <= stream_ordering AND stream_ordering < ?" @@ -381,7 +385,9 @@ def reindex_search_txn(txn): return result - async def _cleanup_extremities_bg_update(self, progress, batch_size): + async def _cleanup_extremities_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: """Background update to clean out extremities that should have been deleted previously. @@ -402,12 +408,12 @@ async def _cleanup_extremities_bg_update(self, progress, batch_size): # have any descendants, but if they do then we should delete those # extremities. - def _cleanup_extremities_bg_update_txn(txn): + def _cleanup_extremities_bg_update_txn(txn: LoggingTransaction) -> int: # The set of extremity event IDs that we're checking this round original_set = set() - # A dict[str, set[str]] of event ID to their prev events. - graph = {} + # A dict[str, Set[str]] of event ID to their prev events. + graph: Dict[str, Set[str]] = {} # The set of descendants of the original set that are not rejected # nor soft-failed. Ancestors of these events should be removed @@ -536,7 +542,7 @@ def _cleanup_extremities_bg_update_txn(txn): room_ids = {row["room_id"] for row in rows} for room_id in room_ids: txn.call_after( - self.get_latest_event_ids_in_room.invalidate, (room_id,) + self.get_latest_event_ids_in_room.invalidate, (room_id,) # type: ignore[attr-defined] ) self.db_pool.simple_delete_many_txn( @@ -558,7 +564,7 @@ def _cleanup_extremities_bg_update_txn(txn): _BackgroundUpdates.DELETE_SOFT_FAILED_EXTREMITIES ) - def _drop_table_txn(txn): + def _drop_table_txn(txn: LoggingTransaction) -> None: txn.execute("DROP TABLE _extremities_to_check") await self.db_pool.runInteraction( @@ -567,11 +573,11 @@ def _drop_table_txn(txn): return num_handled - async def _redactions_received_ts(self, progress, batch_size): + async def _redactions_received_ts(self, progress: JsonDict, batch_size: int) -> int: """Handles filling out the `received_ts` column in redactions.""" last_event_id = progress.get("last_event_id", "") - def _redactions_received_ts_txn(txn): + def _redactions_received_ts_txn(txn: LoggingTransaction) -> int: # Fetch the set of event IDs that we want to update sql = """ SELECT event_id FROM redactions @@ -622,10 +628,12 @@ def _redactions_received_ts_txn(txn): return count - async def _event_fix_redactions_bytes(self, progress, batch_size): + async def _event_fix_redactions_bytes( + self, progress: JsonDict, batch_size: int + ) -> int: """Undoes hex encoded censored redacted event JSON.""" - def _event_fix_redactions_bytes_txn(txn): + def _event_fix_redactions_bytes_txn(txn: LoggingTransaction) -> None: # This update is quite fast due to new index. txn.execute( """ @@ -650,11 +658,11 @@ def _event_fix_redactions_bytes_txn(txn): return 1 - async def _event_store_labels(self, progress, batch_size): + async def _event_store_labels(self, progress: JsonDict, batch_size: int) -> int: """Background update handler which will store labels for existing events.""" last_event_id = progress.get("last_event_id", "") - def _event_store_labels_txn(txn): + def _event_store_labels_txn(txn: LoggingTransaction) -> int: txn.execute( """ SELECT event_id, json FROM event_json @@ -754,7 +762,10 @@ def get_rejected_events( ), ) - return [(row[0], row[1], db_to_json(row[2]), row[3], row[4]) for row in txn] # type: ignore + return cast( + List[Tuple[str, str, JsonDict, bool, bool]], + [(row[0], row[1], db_to_json(row[2]), row[3], row[4]) for row in txn], + ) results = await self.db_pool.runInteraction( desc="_rejected_events_metadata_get", func=get_rejected_events @@ -912,7 +923,7 @@ async def _chain_cover_index(self, progress: dict, batch_size: int) -> int: def _calculate_chain_cover_txn( self, - txn: Cursor, + txn: LoggingTransaction, last_room_id: str, last_depth: int, last_stream: int, @@ -1023,10 +1034,10 @@ def _calculate_chain_cover_txn( PersistEventsStore._add_chain_cover_index( txn, self.db_pool, - self.event_chain_id_gen, + self.event_chain_id_gen, # type: ignore[attr-defined] event_to_room_id, event_to_types, - event_to_auth_chain, + cast(Dict[str, Sequence[str]], event_to_auth_chain), ) return _CalculateChainCover( @@ -1046,7 +1057,7 @@ async def _purged_chain_cover_index(self, progress: dict, batch_size: int) -> in """ current_event_id = progress.get("current_event_id", "") - def purged_chain_cover_txn(txn) -> int: + def purged_chain_cover_txn(txn: LoggingTransaction) -> int: # The event ID from events will be null if the chain ID / sequence # number points to a purged event. sql = """ @@ -1181,14 +1192,14 @@ def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int: # Iterate the parent IDs and invalidate caches. for parent_id in {r[1] for r in relations_to_insert}: cache_tuple = (parent_id,) - self._invalidate_cache_and_stream( - txn, self.get_relations_for_event, cache_tuple + self._invalidate_cache_and_stream( # type: ignore[attr-defined] + txn, self.get_relations_for_event, cache_tuple # type: ignore[attr-defined] ) - self._invalidate_cache_and_stream( - txn, self.get_aggregation_groups_for_event, cache_tuple + self._invalidate_cache_and_stream( # type: ignore[attr-defined] + txn, self.get_aggregation_groups_for_event, cache_tuple # type: ignore[attr-defined] ) - self._invalidate_cache_and_stream( - txn, self.get_thread_summary, cache_tuple + self._invalidate_cache_and_stream( # type: ignore[attr-defined] + txn, self.get_thread_summary, cache_tuple # type: ignore[attr-defined] ) if results: @@ -1220,7 +1231,7 @@ async def _background_populate_stream_ordering2( """ batch_size = max(batch_size, 1) - def process(txn: Cursor) -> int: + def process(txn: LoggingTransaction) -> int: last_stream = progress.get("last_stream", -(1 << 31)) txn.execute( """ From cbd82d0b2db069400b5d43373838817d8a0209e7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 30 Dec 2021 13:47:12 -0500 Subject: [PATCH 034/474] Convert all namedtuples to attrs. (#11665) To improve type hints throughout the code. --- changelog.d/11665.misc | 1 + synapse/api/filtering.py | 3 +- synapse/config/repository.py | 34 ++--- synapse/federation/federation_base.py | 5 - synapse/federation/send_queue.py | 47 ++++--- synapse/handlers/appservice.py | 4 +- synapse/handlers/directory.py | 10 +- synapse/handlers/room_list.py | 22 ++- synapse/handlers/typing.py | 14 +- synapse/http/server.py | 10 +- synapse/replication/tcp/streams/_base.py | 129 +++++++++--------- synapse/replication/tcp/streams/federation.py | 15 +- synapse/rest/media/v1/media_repository.py | 19 ++- synapse/state/__init__.py | 5 +- synapse/storage/databases/main/directory.py | 10 +- synapse/storage/databases/main/events.py | 13 +- synapse/storage/databases/main/room.py | 26 +++- synapse/storage/databases/main/search.py | 16 ++- synapse/storage/databases/main/state.py | 14 -- synapse/storage/databases/main/stream.py | 12 +- synapse/types.py | 22 ++- tests/replication/test_federation_ack.py | 6 +- 22 files changed, 231 insertions(+), 206 deletions(-) create mode 100644 changelog.d/11665.misc diff --git a/changelog.d/11665.misc b/changelog.d/11665.misc new file mode 100644 index 000000000000..e7cc8ff23f07 --- /dev/null +++ b/changelog.d/11665.misc @@ -0,0 +1 @@ +Convert `namedtuples` to `attrs`. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 13dd6ce248e1..d087c816db57 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -351,8 +351,7 @@ def _check(self, event: FilterEvent) -> bool: True if the event matches the filter. """ # We usually get the full "events" as dictionaries coming through, - # except for presence which actually gets passed around as its own - # namedtuple type. + # except for presence which actually gets passed around as its own type. if isinstance(event, UserPresenceState): user_id = event.user_id field_matchers = { diff --git a/synapse/config/repository.py b/synapse/config/repository.py index b129b9dd681c..1980351e7711 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -14,10 +14,11 @@ import logging import os -from collections import namedtuple from typing import Dict, List, Tuple from urllib.request import getproxies_environment # type: ignore +import attr + from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set from synapse.python_dependencies import DependencyException, check_requirements from synapse.types import JsonDict @@ -44,18 +45,20 @@ HTTP_PROXY_SET_WARNING = """\ The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured.""" -ThumbnailRequirement = namedtuple( - "ThumbnailRequirement", ["width", "height", "method", "media_type"] -) -MediaStorageProviderConfig = namedtuple( - "MediaStorageProviderConfig", - ( - "store_local", # Whether to store newly uploaded local files - "store_remote", # Whether to store newly downloaded remote files - "store_synchronous", # Whether to wait for successful storage for local uploads - ), -) +@attr.s(frozen=True, slots=True, auto_attribs=True) +class ThumbnailRequirement: + width: int + height: int + method: str + media_type: str + + +@attr.s(frozen=True, slots=True, auto_attribs=True) +class MediaStorageProviderConfig: + store_local: bool # Whether to store newly uploaded local files + store_remote: bool # Whether to store newly downloaded remote files + store_synchronous: bool # Whether to wait for successful storage for local uploads def parse_thumbnail_requirements( @@ -66,11 +69,10 @@ def parse_thumbnail_requirements( method, and thumbnail media type to precalculate Args: - thumbnail_sizes(list): List of dicts with "width", "height", and - "method" keys + thumbnail_sizes: List of dicts with "width", "height", and "method" keys + Returns: - Dictionary mapping from media type string to list of - ThumbnailRequirement tuples. + Dictionary mapping from media type string to list of ThumbnailRequirement. """ requirements: Dict[str, List[ThumbnailRequirement]] = {} for size in thumbnail_sizes: diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index f56344a3b94f..4df90e02d7e6 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from collections import namedtuple from typing import TYPE_CHECKING from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership @@ -104,10 +103,6 @@ async def _check_sigs_and_hash( return pdu -class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])): - pass - - async def _check_sigs_on_pdu( keyring: Keyring, room_version: RoomVersion, pdu: EventBase ) -> None: diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 63289a5a334f..0d7c4f506758 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -30,7 +30,6 @@ """ import logging -from collections import namedtuple from typing import ( TYPE_CHECKING, Dict, @@ -43,6 +42,7 @@ Type, ) +import attr from sortedcontainers import SortedDict from synapse.api.presence import UserPresenceState @@ -382,13 +382,11 @@ def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None: raise NotImplementedError() -class PresenceDestinationsRow( - BaseFederationRow, - namedtuple( - "PresenceDestinationsRow", - ("state", "destinations"), # UserPresenceState # list[str] - ), -): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class PresenceDestinationsRow(BaseFederationRow): + state: UserPresenceState + destinations: List[str] + TypeId = "pd" @staticmethod @@ -404,17 +402,15 @@ def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None: buff.presence_destinations.append((self.state, self.destinations)) -class KeyedEduRow( - BaseFederationRow, - namedtuple( - "KeyedEduRow", - ("key", "edu"), # tuple(str) - the edu key passed to send_edu # Edu - ), -): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class KeyedEduRow(BaseFederationRow): """Streams EDUs that have an associated key that is ued to clobber. For example, typing EDUs clobber based on room_id. """ + key: Tuple[str, ...] # the edu key passed to send_edu + edu: Edu + TypeId = "k" @staticmethod @@ -428,9 +424,12 @@ def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None: buff.keyed_edus.setdefault(self.edu.destination, {})[self.key] = self.edu -class EduRow(BaseFederationRow, namedtuple("EduRow", ("edu",))): # Edu +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EduRow(BaseFederationRow): """Streams EDUs that don't have keys. See KeyedEduRow""" + edu: Edu + TypeId = "e" @staticmethod @@ -453,14 +452,14 @@ def add_to_buffer(self, buff: "ParsedFederationStreamData") -> None: TypeToRow = {Row.TypeId: Row for Row in _rowtypes} -ParsedFederationStreamData = namedtuple( - "ParsedFederationStreamData", - ( - "presence_destinations", # list of tuples of UserPresenceState and destinations - "keyed_edus", # dict of destination -> { key -> Edu } - "edus", # dict of destination -> [Edu] - ), -) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class ParsedFederationStreamData: + # list of tuples of UserPresenceState and destinations + presence_destinations: List[Tuple[UserPresenceState, List[str]]] + # dict of destination -> { key -> Edu } + keyed_edus: Dict[str, Dict[Tuple[str, ...], Edu]] + # dict of destination -> [Edu] + edus: Dict[str, List[Edu]] def process_rows_for_federation( diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 9abdad262b78..7833e77e2b6b 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -462,9 +462,9 @@ async def query_room_alias_exists( Args: room_alias: The room alias to query. + Returns: - namedtuple: with keys "room_id" and "servers" or None if no - association can be found. + RoomAliasMapping or None if no association can be found. """ room_alias_str = room_alias.to_string() services = self.store.get_app_services() diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 7ee5c47fd96b..082f521791cd 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -278,13 +278,15 @@ async def get_association(self, room_alias: RoomAlias) -> JsonDict: users = await self.store.get_users_in_room(room_id) extra_servers = {get_domain_from_id(u) for u in users} - servers = set(extra_servers) | set(servers) + servers_set = set(extra_servers) | set(servers) # If this server is in the list of servers, return it first. - if self.server_name in servers: - servers = [self.server_name] + [s for s in servers if s != self.server_name] + if self.server_name in servers_set: + servers = [self.server_name] + [ + s for s in servers_set if s != self.server_name + ] else: - servers = list(servers) + servers = list(servers_set) return {"room_id": room_id, "servers": servers} diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index ba7a14d651c5..1a33211a1fe1 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -13,9 +13,9 @@ # limitations under the License. import logging -from collections import namedtuple from typing import TYPE_CHECKING, Any, Optional, Tuple +import attr import msgpack from unpaddedbase64 import decode_base64, encode_base64 @@ -474,16 +474,12 @@ async def _get_remote_list_cached( ) -class RoomListNextBatch( - namedtuple( - "RoomListNextBatch", - ( - "last_joined_members", # The count to get rooms after/before - "last_room_id", # The room_id to get rooms after/before - "direction_is_forward", # Bool if this is a next_batch, false if prev_batch - ), - ) -): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RoomListNextBatch: + last_joined_members: int # The count to get rooms after/before + last_room_id: str # The room_id to get rooms after/before + direction_is_forward: bool # True if this is a next_batch, false if prev_batch + KEY_DICT = { "last_joined_members": "m", "last_room_id": "r", @@ -502,12 +498,12 @@ def from_token(cls, token: str) -> "RoomListNextBatch": def to_token(self) -> str: return encode_base64( msgpack.dumps( - {self.KEY_DICT[key]: val for key, val in self._asdict().items()} + {self.KEY_DICT[key]: val for key, val in attr.asdict(self).items()} ) ) def copy_and_replace(self, **kwds: Any) -> "RoomListNextBatch": - return self._replace(**kwds) + return attr.evolve(self, **kwds) def _matches_room_entry(room_entry: JsonDict, search_filter: dict) -> bool: diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 1676ebd057c1..e43c22832da6 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -13,9 +13,10 @@ # limitations under the License. import logging import random -from collections import namedtuple from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +import attr + from synapse.api.errors import AuthError, ShadowBanError, SynapseError from synapse.appservice import ApplicationService from synapse.metrics.background_process_metrics import ( @@ -37,7 +38,10 @@ # A tiny object useful for storing a user's membership in a room, as a mapping # key -RoomMember = namedtuple("RoomMember", ("room_id", "user_id")) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RoomMember: + room_id: str + user_id: str # How often we expect remote servers to resend us presence. @@ -119,7 +123,7 @@ def _handle_timeout_for_member(self, now: int, member: RoomMember) -> None: self.wheel_timer.insert(now=now, obj=member, then=now + 60 * 1000) def is_typing(self, member: RoomMember) -> bool: - return member.user_id in self._room_typing.get(member.room_id, []) + return member.user_id in self._room_typing.get(member.room_id, set()) async def _push_remote(self, member: RoomMember, typing: bool) -> None: if not self.federation: @@ -166,9 +170,9 @@ def process_replication_rows( for row in rows: self._room_serials[row.room_id] = token - prev_typing = set(self._room_typing.get(row.room_id, [])) + prev_typing = self._room_typing.get(row.room_id, set()) now_typing = set(row.user_ids) - self._room_typing[row.room_id] = row.user_ids + self._room_typing[row.room_id] = now_typing if self.federation: run_as_background_process( diff --git a/synapse/http/server.py b/synapse/http/server.py index e302946591bf..09b412548968 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -14,7 +14,6 @@ # limitations under the License. import abc -import collections import html import logging import types @@ -37,6 +36,7 @@ Union, ) +import attr import jinja2 from canonicaljson import encode_canonical_json from typing_extensions import Protocol @@ -354,9 +354,11 @@ def _send_error_response( return_json_error(f, request) -_PathEntry = collections.namedtuple( - "_PathEntry", ["pattern", "callback", "servlet_classname"] -) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _PathEntry: + pattern: Pattern + callback: ServletCallback + servlet_classname: str class JsonResource(DirectServeJsonResource): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 743a01da08f0..5a2d90c5309f 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -15,7 +15,6 @@ import heapq import logging -from collections import namedtuple from typing import ( TYPE_CHECKING, Any, @@ -30,6 +29,7 @@ import attr from synapse.replication.http.streams import ReplicationGetStreamUpdates +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -226,17 +226,14 @@ class BackfillStream(Stream): or it went from being an outlier to not. """ - BackfillStreamRow = namedtuple( - "BackfillStreamRow", - ( - "event_id", # str - "room_id", # str - "type", # str - "state_key", # str, optional - "redacts", # str, optional - "relates_to", # str, optional - ), - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class BackfillStreamRow: + event_id: str + room_id: str + type: str + state_key: Optional[str] + redacts: Optional[str] + relates_to: Optional[str] NAME = "backfill" ROW_TYPE = BackfillStreamRow @@ -256,18 +253,15 @@ def _current_token(self, instance_name: str) -> int: class PresenceStream(Stream): - PresenceStreamRow = namedtuple( - "PresenceStreamRow", - ( - "user_id", # str - "state", # str - "last_active_ts", # int - "last_federation_update_ts", # int - "last_user_sync_ts", # int - "status_msg", # str - "currently_active", # bool - ), - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class PresenceStreamRow: + user_id: str + state: str + last_active_ts: int + last_federation_update_ts: int + last_user_sync_ts: int + status_msg: str + currently_active: bool NAME = "presence" ROW_TYPE = PresenceStreamRow @@ -302,7 +296,7 @@ class PresenceFederationStream(Stream): send. """ - @attr.s(slots=True, auto_attribs=True) + @attr.s(slots=True, frozen=True, auto_attribs=True) class PresenceFederationStreamRow: destination: str user_id: str @@ -320,9 +314,10 @@ def __init__(self, hs: "HomeServer"): class TypingStream(Stream): - TypingStreamRow = namedtuple( - "TypingStreamRow", ("room_id", "user_ids") # str # list(str) - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class TypingStreamRow: + room_id: str + user_ids: List[str] NAME = "typing" ROW_TYPE = TypingStreamRow @@ -348,16 +343,13 @@ def __init__(self, hs: "HomeServer"): class ReceiptsStream(Stream): - ReceiptsStreamRow = namedtuple( - "ReceiptsStreamRow", - ( - "room_id", # str - "receipt_type", # str - "user_id", # str - "event_id", # str - "data", # dict - ), - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ReceiptsStreamRow: + room_id: str + receipt_type: str + user_id: str + event_id: str + data: dict NAME = "receipts" ROW_TYPE = ReceiptsStreamRow @@ -374,7 +366,9 @@ def __init__(self, hs: "HomeServer"): class PushRulesStream(Stream): """A user has changed their push rules""" - PushRulesStreamRow = namedtuple("PushRulesStreamRow", ("user_id",)) # str + @attr.s(slots=True, frozen=True, auto_attribs=True) + class PushRulesStreamRow: + user_id: str NAME = "push_rules" ROW_TYPE = PushRulesStreamRow @@ -396,10 +390,12 @@ def _current_token(self, instance_name: str) -> int: class PushersStream(Stream): """A user has added/changed/removed a pusher""" - PushersStreamRow = namedtuple( - "PushersStreamRow", - ("user_id", "app_id", "pushkey", "deleted"), # str # str # str # bool - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class PushersStreamRow: + user_id: str + app_id: str + pushkey: str + deleted: bool NAME = "pushers" ROW_TYPE = PushersStreamRow @@ -419,7 +415,7 @@ class CachesStream(Stream): the cache on the workers """ - @attr.s(slots=True) + @attr.s(slots=True, frozen=True, auto_attribs=True) class CachesStreamRow: """Stream to inform workers they should invalidate their cache. @@ -430,9 +426,9 @@ class CachesStreamRow: invalidation_ts: Timestamp of when the invalidation took place. """ - cache_func = attr.ib(type=str) - keys = attr.ib(type=Optional[List[Any]]) - invalidation_ts = attr.ib(type=int) + cache_func: str + keys: Optional[List[Any]] + invalidation_ts: int NAME = "caches" ROW_TYPE = CachesStreamRow @@ -451,9 +447,9 @@ class DeviceListsStream(Stream): told about a device update. """ - @attr.s(slots=True) + @attr.s(slots=True, frozen=True, auto_attribs=True) class DeviceListsStreamRow: - entity = attr.ib(type=str) + entity: str NAME = "device_lists" ROW_TYPE = DeviceListsStreamRow @@ -470,7 +466,9 @@ def __init__(self, hs: "HomeServer"): class ToDeviceStream(Stream): """New to_device messages for a client""" - ToDeviceStreamRow = namedtuple("ToDeviceStreamRow", ("entity",)) # str + @attr.s(slots=True, frozen=True, auto_attribs=True) + class ToDeviceStreamRow: + entity: str NAME = "to_device" ROW_TYPE = ToDeviceStreamRow @@ -487,9 +485,11 @@ def __init__(self, hs: "HomeServer"): class TagAccountDataStream(Stream): """Someone added/removed a tag for a room""" - TagAccountDataStreamRow = namedtuple( - "TagAccountDataStreamRow", ("user_id", "room_id", "data") # str # str # dict - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class TagAccountDataStreamRow: + user_id: str + room_id: str + data: JsonDict NAME = "tag_account_data" ROW_TYPE = TagAccountDataStreamRow @@ -506,10 +506,11 @@ def __init__(self, hs: "HomeServer"): class AccountDataStream(Stream): """Global or per room account data was changed""" - AccountDataStreamRow = namedtuple( - "AccountDataStreamRow", - ("user_id", "room_id", "data_type"), # str # Optional[str] # str - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class AccountDataStreamRow: + user_id: str + room_id: Optional[str] + data_type: str NAME = "account_data" ROW_TYPE = AccountDataStreamRow @@ -573,10 +574,12 @@ async def _update_function( class GroupServerStream(Stream): - GroupsStreamRow = namedtuple( - "GroupsStreamRow", - ("group_id", "user_id", "type", "content"), # str # str # str # dict - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class GroupsStreamRow: + group_id: str + user_id: str + type: str + content: JsonDict NAME = "groups" ROW_TYPE = GroupsStreamRow @@ -593,7 +596,9 @@ def __init__(self, hs: "HomeServer"): class UserSignatureStream(Stream): """A user has signed their own device with their user-signing key""" - UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id")) # str + @attr.s(slots=True, frozen=True, auto_attribs=True) + class UserSignatureStreamRow: + user_id: str NAME = "user_signature" ROW_TYPE = UserSignatureStreamRow diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 0600cdbf363d..4046bdec6931 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -12,14 +12,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple from typing import TYPE_CHECKING, Any, Awaitable, Callable, List, Tuple +import attr + from synapse.replication.tcp.streams._base import ( Stream, current_token_without_instance, make_http_update_function, ) +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -30,13 +32,10 @@ class FederationStream(Stream): sending disabled. """ - FederationStreamRow = namedtuple( - "FederationStreamRow", - ( - "type", # str, the type of data as defined in the BaseFederationRows - "data", # dict, serialization of a federation.send_queue.BaseFederationRow - ), - ) + @attr.s(slots=True, frozen=True, auto_attribs=True) + class FederationStreamRow: + type: str # the type of data as defined in the BaseFederationRows + data: JsonDict # serialization of a federation.send_queue.BaseFederationRow NAME = "federation" ROW_TYPE = FederationStreamRow diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 244ba261bbc4..71b9a34b145d 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -739,14 +739,21 @@ async def _generate_thumbnails( # We deduplicate the thumbnail sizes by ignoring the cropped versions if # they have the same dimensions of a scaled one. thumbnails: Dict[Tuple[int, int, str], str] = {} - for r_width, r_height, r_method, r_type in requirements: - if r_method == "crop": - thumbnails.setdefault((r_width, r_height, r_type), r_method) - elif r_method == "scale": - t_width, t_height = thumbnailer.aspect(r_width, r_height) + for requirement in requirements: + if requirement.method == "crop": + thumbnails.setdefault( + (requirement.width, requirement.height, requirement.media_type), + requirement.method, + ) + elif requirement.method == "scale": + t_width, t_height = thumbnailer.aspect( + requirement.width, requirement.height + ) t_width = min(m_width, t_width) t_height = min(m_height, t_height) - thumbnails[(t_width, t_height, r_type)] = r_method + thumbnails[ + (t_width, t_height, requirement.media_type) + ] = requirement.method # Now we generate the thumbnails for each dimension, store it for (t_width, t_height, t_type), t_method in thumbnails.items(): diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 446204dbe52f..69ac8c3423ac 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -14,7 +14,7 @@ # limitations under the License. import heapq import logging -from collections import defaultdict, namedtuple +from collections import defaultdict from typing import ( TYPE_CHECKING, Any, @@ -69,9 +69,6 @@ ) -KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key")) - - EVICTION_TIMEOUT_SECONDS = 60 * 60 diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index a3442814d77a..f76c6121e8a9 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -12,16 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple from typing import Iterable, List, Optional, Tuple +import attr + from synapse.api.errors import SynapseError from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore from synapse.types import RoomAlias from synapse.util.caches.descriptors import cached -RoomAliasMapping = namedtuple("RoomAliasMapping", ("room_id", "room_alias", "servers")) + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RoomAliasMapping: + room_id: str + room_alias: str + servers: List[str] class DirectoryWorkerStore(CacheInvalidationWorkerStore): diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 81e67ece5535..dd255aefb9dd 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1976,14 +1976,17 @@ def _store_retention_policy_for_room_txn(self, txn, event): txn, self.store.get_retention_policy_for_room, (event.room_id,) ) - def store_event_search_txn(self, txn, event, key, value): + def store_event_search_txn( + self, txn: LoggingTransaction, event: EventBase, key: str, value: str + ) -> None: """Add event to the search table Args: - txn (cursor): - event (EventBase): - key (str): - value (str): + txn: The database transaction. + event: The event being added to the search table. + key: A key describing the search value (one of "content.name", + "content.topic", or "content.body") + value: The value from the event's content. """ self.store.store_search_entries_txn( txn, diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 4472335af901..c0e837854a32 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -13,11 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. -import collections import logging from abc import abstractmethod from enum import Enum -from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple, cast +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Dict, + List, + Optional, + Tuple, + Union, + cast, +) + +import attr from synapse.api.constants import EventContentFields, EventTypes, JoinRules from synapse.api.errors import StoreError @@ -43,9 +54,10 @@ logger = logging.getLogger(__name__) -RatelimitOverride = collections.namedtuple( - "RatelimitOverride", ("messages_per_second", "burst_count") -) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class RatelimitOverride: + messages_per_second: int + burst_count: int class RoomSortOrder(Enum): @@ -207,6 +219,7 @@ def _count_public_rooms_txn(txn: LoggingTransaction) -> int: WHERE appservice_id = ? AND network_id = ? """ query_args.append(network_tuple.appservice_id) + assert network_tuple.network_id is not None query_args.append(network_tuple.network_id) else: published_sql = """ @@ -284,7 +297,7 @@ async def get_largest_public_rooms( """ where_clauses = [] - query_args = [] + query_args: List[Union[str, int]] = [] if network_tuple: if network_tuple.appservice_id: @@ -293,6 +306,7 @@ async def get_largest_public_rooms( WHERE appservice_id = ? AND network_id = ? """ query_args.append(network_tuple.appservice_id) + assert network_tuple.network_id is not None query_args.append(network_tuple.network_id) else: published_sql = """ diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index f87acfb86604..2d085a5764a7 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -14,9 +14,10 @@ import logging import re -from collections import namedtuple from typing import TYPE_CHECKING, Collection, Iterable, List, Optional, Set +import attr + from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -33,10 +34,15 @@ logger = logging.getLogger(__name__) -SearchEntry = namedtuple( - "SearchEntry", - ["key", "value", "event_id", "room_id", "stream_ordering", "origin_server_ts"], -) + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SearchEntry: + key: str + value: str + event_id: str + room_id: str + stream_ordering: Optional[int] + origin_server_ts: int def _clean_value_for_search(value: str) -> str: diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 4bc044fb1606..7e5a6aae184d 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -14,7 +14,6 @@ # limitations under the License. import collections.abc import logging -from collections import namedtuple from typing import TYPE_CHECKING, Iterable, Optional, Set from synapse.api.constants import EventTypes, Membership @@ -43,19 +42,6 @@ MAX_STATE_DELTA_HOPS = 100 -class _GetStateGroupDelta( - namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids")) -): - """Return type of get_state_group_delta that implements __len__, which lets - us use the itrable flag when caching - """ - - __slots__ = [] - - def __len__(self): - return len(self.delta_ids) if self.delta_ids else 0 - - # this inherits from EventsWorkerStore because it calls self.get_events class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """The parts of StateGroupStore that can be called from workers.""" diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 9488fd509463..b0642ca69faf 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -36,9 +36,9 @@ """ import abc import logging -from collections import namedtuple from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple +import attr from frozendict import frozendict from twisted.internet import defer @@ -74,9 +74,11 @@ # Used as return values for pagination APIs -_EventDictReturn = namedtuple( - "_EventDictReturn", ("event_id", "topological_ordering", "stream_ordering") -) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _EventDictReturn: + event_id: str + topological_ordering: Optional[int] + stream_ordering: int def generate_pagination_where_clause( @@ -825,7 +827,7 @@ def _set_before_and_after( for event, row in zip(events, rows): stream = row.stream_ordering if topo_order and row.topological_ordering: - topo = row.topological_ordering + topo: Optional[int] = row.topological_ordering else: topo = None internal = event.internal_metadata diff --git a/synapse/types.py b/synapse/types.py index b06979e8e896..42aeaf6270e1 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -15,7 +15,6 @@ import abc import re import string -from collections import namedtuple from typing import ( TYPE_CHECKING, Any, @@ -227,8 +226,7 @@ class DomainSpecificString(metaclass=abc.ABCMeta): localpart = attr.ib(type=str) domain = attr.ib(type=str) - # Because this class is a namedtuple of strings and booleans, it is deeply - # immutable. + # Because this is a frozen class, it is deeply immutable. def __copy__(self): return self @@ -708,16 +706,18 @@ def to_room_stream_token(self) -> RoomStreamToken: return RoomStreamToken(None, self.stream) -class ThirdPartyInstanceID( - namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id")) -): +@attr.s(slots=True, frozen=True, auto_attribs=True) +class ThirdPartyInstanceID: + appservice_id: Optional[str] + network_id: Optional[str] + # Deny iteration because it will bite you if you try to create a singleton # set by: # users = set(user) def __iter__(self): raise ValueError("Attempted to iterate a %s" % (type(self).__name__,)) - # Because this class is a namedtuple of strings, it is deeply immutable. + # Because this class is a frozen class, it is deeply immutable. def __copy__(self): return self @@ -725,22 +725,18 @@ def __deepcopy__(self, memo): return self @classmethod - def from_string(cls, s): + def from_string(cls, s: str) -> "ThirdPartyInstanceID": bits = s.split("|", 2) if len(bits) != 2: raise SynapseError(400, "Invalid ID %r" % (s,)) return cls(appservice_id=bits[0], network_id=bits[1]) - def to_string(self): + def to_string(self) -> str: return "%s|%s" % (self.appservice_id, self.network_id) __str__ = to_string - @classmethod - def create(cls, appservice_id, network_id): - return cls(appservice_id=appservice_id, network_id=network_id) - @attr.s(slots=True) class ReadReceipt: diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 04a869e29549..1b6a4bf4b0b1 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -62,7 +62,11 @@ def test_federation_ack_sent(self): "federation", "master", token=10, - rows=[FederationStream.FederationStreamRow(type="x", data=[1, 2, 3])], + rows=[ + FederationStream.FederationStreamRow( + type="x", data={"test": [1, 2, 3]} + ) + ], ) ) From 13c974ed358c28940588f447b363063c76063fc2 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 3 Jan 2022 11:17:16 -0800 Subject: [PATCH 035/474] Drop Bionic from Debian builds (#11633) * update Trove classifiers to remove py36 * stop building bionic * update dh-virtualenv * newsfragment * fix newsfragment * update version refs * another try at correct tag * Update changelog --- changelog.d/11633.misc | 1 + docker/Dockerfile-dhvirtualenv | 9 ++++----- scripts-dev/build_debian_packages | 1 - setup.py | 1 - 4 files changed, 5 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11633.misc diff --git a/changelog.d/11633.misc b/changelog.d/11633.misc new file mode 100644 index 000000000000..73e814e58ef2 --- /dev/null +++ b/changelog.d/11633.misc @@ -0,0 +1 @@ +Drop support for Python 3.6 and Ubuntu 18.04. \ No newline at end of file diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 1dd88140c7a4..fbc1d2346fb8 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -16,7 +16,7 @@ ARG distro="" ### Stage 0: build a dh-virtualenv ### -# This is only really needed on bionic and focal, since other distributions we +# This is only really needed on focal, since other distributions we # care about have a recent version of dh-virtualenv by default. Unfortunately, # it looks like focal is going to be with us for a while. # @@ -36,9 +36,8 @@ RUN env DEBIAN_FRONTEND=noninteractive apt-get install \ wget # fetch and unpack the package -# TODO: Upgrade to 1.2.2 once bionic is dropped (1.2.2 requires debhelper 12; bionic has only 11) RUN mkdir /dh-virtualenv -RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/ac6e1b1.tar.gz +RUN wget -q -O /dh-virtualenv.tar.gz https://github.com/spotify/dh-virtualenv/archive/refs/tags/1.2.2.tar.gz RUN tar -xv --strip-components=1 -C /dh-virtualenv -f /dh-virtualenv.tar.gz # install its build deps. We do another apt-cache-update here, because we might @@ -86,12 +85,12 @@ RUN apt-get update -qq -o Acquire::Languages=none \ libpq-dev \ xmlsec1 -COPY --from=builder /dh-virtualenv_1.2~dev-1_all.deb / +COPY --from=builder /dh-virtualenv_1.2.2-1_all.deb / # install dhvirtualenv. Update the apt cache again first, in case we got a # cached cache from docker the first time. RUN apt-get update -qq -o Acquire::Languages=none \ - && apt-get install -yq /dh-virtualenv_1.2~dev-1_all.deb + && apt-get install -yq /dh-virtualenv_1.2.2-1_all.deb WORKDIR /synapse/source ENTRYPOINT ["bash","/synapse/source/docker/build_debian.sh"] diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index 3a9a2d257c6f..4d34e9070363 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -24,7 +24,6 @@ DISTS = ( "debian:bullseye", "debian:bookworm", "debian:sid", - "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23) "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) "ubuntu:hirsute", # 21.04 (EOL 2022-01-05) "ubuntu:impish", # 21.10 (EOL 2022-07) diff --git a/setup.py b/setup.py index fbb0133016a7..e618ff898b46 100755 --- a/setup.py +++ b/setup.py @@ -162,7 +162,6 @@ def exec_file(path_segments): "Topic :: Communications :: Chat", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", From 8422a7f7f6154bcbadc52f0d0d27b8e6b989cb4c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 4 Jan 2022 11:08:08 -0500 Subject: [PATCH 036/474] Include the topic event in the prejoin state, per MSC3173. (#11666) Invites and knocks will now include the topic in the stripped state send to clients before joining the room. --- changelog.d/11666.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/api.py | 2 ++ tests/federation/transport/test_knocking.py | 9 +++++++++ 4 files changed, 13 insertions(+) create mode 100644 changelog.d/11666.feature diff --git a/changelog.d/11666.feature b/changelog.d/11666.feature new file mode 100644 index 000000000000..6f6b127e2278 --- /dev/null +++ b/changelog.d/11666.feature @@ -0,0 +1 @@ +Include the room topic in the stripped state included with invites and knocking. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6696ed5d1ef9..00dfd2c01312 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1488,6 +1488,7 @@ room_prejoin_state: # - m.room.encryption # - m.room.name # - m.room.create + # - m.room.topic # # Uncomment the following to disable these defaults (so that only the event # types listed in 'additional_event_types' are shared). Defaults to 'false'. diff --git a/synapse/config/api.py b/synapse/config/api.py index b18044f9822a..25538b82d555 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -107,6 +107,8 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: EventTypes.Name, # Per MSC1772. EventTypes.Create, + # Per MSC3173. + EventTypes.Topic, ] diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 663960ff534a..bfa156eebbe5 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -108,6 +108,15 @@ def send_example_state_events_to_room( "state_key": "", }, ), + ( + EventTypes.Topic, + { + "content": { + "topic": "A really cool room", + }, + "state_key": "", + }, + ), ] ) From bd9821f7f17916deec9266483e26b69cd5ce2b3b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jan 2022 16:10:05 +0000 Subject: [PATCH 037/474] Better error messages from `get_create_event_for_room` (#11638) "Unknown room" can mean a multitude of things here. To help with debugging, add some more words to the exception text. --- changelog.d/11638.misc | 1 + synapse/storage/databases/main/state.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11638.misc diff --git a/changelog.d/11638.misc b/changelog.d/11638.misc new file mode 100644 index 000000000000..76dfb56bdac0 --- /dev/null +++ b/changelog.d/11638.misc @@ -0,0 +1 @@ +Improve the error messages from `get_create_event_for_room`. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 7e5a6aae184d..2fb3e65192f5 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -177,11 +177,15 @@ async def get_create_event_for_room(self, room_id: str) -> EventBase: NotFoundError if the room is unknown """ state_ids = await self.get_current_state_ids(room_id) + + if not state_ids: + raise NotFoundError(f"Current state for room {room_id} is empty") + create_id = state_ids.get((EventTypes.Create, "")) # If we can't find the create event, assume we've hit a dead end if not create_id: - raise NotFoundError("Unknown room %s" % (room_id,)) + raise NotFoundError(f"No create event in current state for room {room_id}") # Retrieve the room's create event and return create_event = await self.get_event(create_id) From 2359ee3864a065229c80e3ff58faa981edd24558 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jan 2022 16:10:27 +0000 Subject: [PATCH 038/474] Remove redundant `get_current_events_token` (#11643) * Push `get_room_{min,max_stream_ordering}` into StreamStore Both implementations of this are identical, so we may as well push it down and get rid of the abstract base class nonsense. * Remove redundant `StreamStore` class This is empty now * Remove redundant `get_current_events_token` This was an exact duplicate of `get_room_max_stream_ordering`, so let's get rid of it. * newsfile --- changelog.d/11643.misc | 1 + synapse/handlers/federation_event.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/replication/slave/storage/events.py | 9 ----- synapse/storage/databases/main/__init__.py | 4 +-- .../storage/databases/main/events_worker.py | 4 --- synapse/storage/databases/main/stream.py | 34 ++++++++----------- 7 files changed, 20 insertions(+), 36 deletions(-) create mode 100644 changelog.d/11643.misc diff --git a/changelog.d/11643.misc b/changelog.d/11643.misc new file mode 100644 index 000000000000..1c3b3071f66a --- /dev/null +++ b/changelog.d/11643.misc @@ -0,0 +1 @@ +Remove redundant `get_current_events_token` method. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9917613298c6..d08e48da58b4 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1838,7 +1838,7 @@ async def persist_events_and_notify( The stream ID after which all events have been persisted. """ if not event_and_contexts: - return self._store.get_current_events_token() + return self._store.get_room_max_stream_ordering() instance = self._config.worker.events_shard_config.get_instance(room_id) if instance != self._instance_name: diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 454d06c9733d..c781fefb1bc7 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -729,7 +729,7 @@ def run_persister() -> Awaitable[None]: # Presence is best effort and quickly heals itself, so lets just always # stream from the current state when we restart. - self._event_pos = self.store.get_current_events_token() + self._event_pos = self.store.get_room_max_stream_ordering() self._event_processing = False async def _on_shutdown(self) -> None: diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 50e7379e8301..0f0837269486 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -80,12 +80,3 @@ def __init__( min_curr_state_delta_id, prefilled_cache=curr_state_delta_prefill, ) - - # Cached functions can't be accessed through a class instance so we need - # to reach inside the __dict__ to extract them. - - def get_room_max_stream_ordering(self): - return self._stream_id_gen.get_current_token() - - def get_room_min_stream_ordering(self): - return self._backfill_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index a594223fc6ca..f024761ba7b8 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -68,7 +68,7 @@ from .signatures import SignatureStore from .state import StateStore from .stats import StatsStore -from .stream import StreamStore +from .stream import StreamWorkerStore from .tags import TagsStore from .transactions import TransactionWorkerStore from .ui_auth import UIAuthStore @@ -87,7 +87,7 @@ class DataStore( RoomStore, RoomBatchStore, RegistrationStore, - StreamStore, + StreamWorkerStore, ProfileStore, PresenceStore, TransactionWorkerStore, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c7b660ac5a6f..8d4287045a8b 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1383,10 +1383,6 @@ async def get_room_complexity(self, room_id: str) -> Dict[str, float]: return {"v1": complexity_v1} - def get_current_events_token(self) -> int: - """The current maximum token that events have reached""" - return self._stream_id_gen.get_current_token() - async def get_all_new_forward_event_rows( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index b0642ca69faf..319464b1fa83 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -34,7 +34,7 @@ - topological tokems: "t%d-%d", where the integers map to the topological and stream ordering columns respectively. """ -import abc + import logging from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple @@ -336,12 +336,7 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: return " AND ".join(clauses), args -class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): - """This is an abstract base class where subclasses must implement - `get_room_max_stream_ordering` and `get_room_min_stream_ordering` - which can be called in the initializer. - """ - +class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def __init__( self, database: DatabasePool, @@ -379,13 +374,22 @@ def __init__( self._stream_order_on_start = self.get_room_max_stream_ordering() - @abc.abstractmethod def get_room_max_stream_ordering(self) -> int: - raise NotImplementedError() + """Get the stream_ordering of regular events that we have committed up to + + Returns the maximum stream id such that all stream ids less than or + equal to it have been successfully persisted. + """ + return self._stream_id_gen.get_current_token() - @abc.abstractmethod def get_room_min_stream_ordering(self) -> int: - raise NotImplementedError() + """Get the stream_ordering of backfilled events that we have committed up to + + Backfilled events use *negative* stream orderings, so this returns the + minimum negative stream id such that all stream ids greater than or + equal to it have been successfully persisted. + """ + return self._backfill_id_gen.get_current_token() def get_room_max_token(self) -> RoomStreamToken: """Get a `RoomStreamToken` that marks the current maximum persisted @@ -1351,11 +1355,3 @@ async def get_name_from_instance_id(self, instance_id: int) -> str: retcol="instance_name", desc="get_name_from_instance_id", ) - - -class StreamStore(StreamWorkerStore): - def get_room_max_stream_ordering(self) -> int: - return self._stream_id_gen.get_current_token() - - def get_room_min_stream_ordering(self) -> int: - return self._backfill_id_gen.get_current_token() From 878aa5529399ece8fbb68674a6de86d6958bacae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jan 2022 16:31:32 +0000 Subject: [PATCH 039/474] `FederationClient.backfill`: stop flagging events as outliers (#11632) Events returned by `backfill` should not be flagged as outliers. Fixes: ``` AssertionError: null File "synapse/handlers/federation.py", line 313, in try_backfill dom, room_id, limit=100, extremities=extremities File "synapse/handlers/federation_event.py", line 517, in backfill await self._process_pulled_events(dest, events, backfilled=True) File "synapse/handlers/federation_event.py", line 642, in _process_pulled_events await self._process_pulled_event(origin, ev, backfilled=backfilled) File "synapse/handlers/federation_event.py", line 669, in _process_pulled_event assert not event.internal_metadata.is_outlier() ``` See https://sentry.matrix.org/sentry/synapse-matrixorg/issues/231992 Fixes #8894. --- changelog.d/11632.bugfix | 1 + synapse/federation/federation_client.py | 2 +- synapse/handlers/federation_event.py | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11632.bugfix diff --git a/changelog.d/11632.bugfix b/changelog.d/11632.bugfix new file mode 100644 index 000000000000..c73d41652a87 --- /dev/null +++ b/changelog.d/11632.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index fee1477ab684..7353c2b6b147 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -272,7 +272,7 @@ async def backfill( # Check signatures and hash of pdus, removing any from the list that fail checks pdus[:] = await self._check_sigs_and_hash_and_fetch( - dest, pdus, outlier=True, room_version=room_version + dest, pdus, room_version=room_version ) return pdus diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index d08e48da58b4..7c81e3651f95 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -666,7 +666,9 @@ async def _process_pulled_event( logger.info("Processing pulled event %s", event) # these should not be outliers. - assert not event.internal_metadata.is_outlier() + assert ( + not event.internal_metadata.is_outlier() + ), "pulled event unexpectedly flagged as outlier" event_id = event.event_id From b38bdae3a2e5b7cfe862580368b996b8d7dfa50f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jan 2022 16:36:33 +0000 Subject: [PATCH 040/474] Fix AssertionErrors after purging events (#11642) * Fix AssertionErrors after purging events If you purged a bunch of events from your database, and then restarted synapse without receiving more events, then you would get a bunch of AssertionErrors on restart. This fixes the situation by rewinding the stream processors. * `check-newsfragment`: ignore deleted newsfiles --- changelog.d/11536.bugfix | 1 + changelog.d/11536.misc | 1 - changelog.d/11642.bugfix | 1 + scripts-dev/check-newsfragment | 4 ++-- synapse/handlers/stats.py | 11 +++++++++++ synapse/handlers/user_directory.py | 18 +++++++++++++++--- 6 files changed, 30 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11536.bugfix delete mode 100644 changelog.d/11536.misc create mode 100644 changelog.d/11642.bugfix diff --git a/changelog.d/11536.bugfix b/changelog.d/11536.bugfix new file mode 100644 index 000000000000..4a1b00725479 --- /dev/null +++ b/changelog.d/11536.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. diff --git a/changelog.d/11536.misc b/changelog.d/11536.misc deleted file mode 100644 index b9191c111b18..000000000000 --- a/changelog.d/11536.misc +++ /dev/null @@ -1 +0,0 @@ -Improvements to log messages around handling stream ids. diff --git a/changelog.d/11642.bugfix b/changelog.d/11642.bugfix new file mode 100644 index 000000000000..4a1b00725479 --- /dev/null +++ b/changelog.d/11642.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment index af4de345df57..c764011d6adb 100755 --- a/scripts-dev/check-newsfragment +++ b/scripts-dev/check-newsfragment @@ -42,8 +42,8 @@ echo "--------------------------" echo matched=0 -for f in $(git diff --name-only FETCH_HEAD... -- changelog.d); do - # check that any modified newsfiles on this branch end with a full stop. +for f in $(git diff --diff-filter=d --name-only FETCH_HEAD... -- changelog.d); do + # check that any added newsfiles on this branch end with a full stop. lastchar=$(tr -d '\n' < "$f" | tail -c 1) if [ "$lastchar" != '.' ] && [ "$lastchar" != '!' ]; then echo -e "\e[31mERROR: newsfragment $f does not end with a '.' or '!'\e[39m" >&2 diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index bd3e6f2ec77b..29e41a4c796c 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -80,6 +80,17 @@ async def _unsafe_process(self) -> None: # If self.pos is None then means we haven't fetched it from DB if self.pos is None: self.pos = await self.store.get_stats_positions() + room_max_stream_ordering = self.store.get_room_max_stream_ordering() + if self.pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding stats processor", + self.pos, + room_max_stream_ordering, + ) + self.pos = room_max_stream_ordering # Loop round handling deltas until we're up to date diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index a0eb45446f56..1565e034cb25 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -148,9 +148,21 @@ async def _unsafe_process(self) -> None: if self.pos is None: self.pos = await self.store.get_user_directory_stream_pos() - # If still None then the initial background update hasn't happened yet. - if self.pos is None: - return None + # If still None then the initial background update hasn't happened yet. + if self.pos is None: + return None + + room_max_stream_ordering = self.store.get_room_max_stream_ordering() + if self.pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding user directory processor", + self.pos, + room_max_stream_ordering, + ) + self.pos = room_max_stream_ordering # Loop round handling deltas until we're up to date while True: From 79f6d3550a14cacadacbb47d1d1ccf9d06f73158 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jan 2022 22:31:45 +0000 Subject: [PATCH 041/474] update ngnix reverse-proxy example (#11680) this should not be a case-insensitive match. --- changelog.d/11680.doc | 1 + docs/reverse_proxy.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11680.doc diff --git a/changelog.d/11680.doc b/changelog.d/11680.doc new file mode 100644 index 000000000000..09399ad9d0e6 --- /dev/null +++ b/changelog.d/11680.doc @@ -0,0 +1 @@ +Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index f3b3aea732c7..1a89da50fd97 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -63,7 +63,7 @@ server { server_name matrix.example.com; - location ~* ^(\/_matrix|\/_synapse\/client) { + location ~ ^(/_matrix|/_synapse/client) { # note: do not add a path (even a single /) after the port in `proxy_pass`, # otherwise nginx will canonicalise the URI and cause signature verification # errors. From 0715e77b06ce4b73c3061562081b979b68440209 Mon Sep 17 00:00:00 2001 From: Donny Johnson Date: Wed, 5 Jan 2022 04:38:51 -0600 Subject: [PATCH 042/474] Correct Synapse install command for FreeBSD. (#11267) Co-authored-by: reivilibre --- changelog.d/11267.doc | 1 + docs/setup/installation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11267.doc diff --git a/changelog.d/11267.doc b/changelog.d/11267.doc new file mode 100644 index 000000000000..3a720158ded4 --- /dev/null +++ b/changelog.d/11267.doc @@ -0,0 +1 @@ +Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 16562be95388..210c80daced8 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -164,7 +164,7 @@ xbps-install -S synapse Synapse can be installed via FreeBSD Ports or Packages contributed by Brendan Molloy from: - Ports: `cd /usr/ports/net-im/py-matrix-synapse && make install clean` -- Packages: `pkg install py37-matrix-synapse` +- Packages: `pkg install py38-matrix-synapse` #### OpenBSD From 7013e06e2f60e1401349fd054372808376facc87 Mon Sep 17 00:00:00 2001 From: Callum Macdonald Date: Wed, 5 Jan 2022 11:50:28 +0100 Subject: [PATCH 043/474] Improve Docker docs for use with Postgres (#11640) --- changelog.d/11640.doc | 1 + contrib/docker/docker-compose.yml | 1 + docker/README.md | 4 ++++ 3 files changed, 6 insertions(+) create mode 100644 changelog.d/11640.doc diff --git a/changelog.d/11640.doc b/changelog.d/11640.doc new file mode 100644 index 000000000000..c4773e4f3ac5 --- /dev/null +++ b/changelog.d/11640.doc @@ -0,0 +1 @@ +Add references for using Postgres to the Docker documentation. diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 26d640c44887..5ac41139e31d 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -14,6 +14,7 @@ services: # failure restart: unless-stopped # See the readme for a full documentation of the environment settings + # NOTE: You must edit homeserver.yaml to use postgres, it defaults to sqlite environment: - SYNAPSE_CONFIG_PATH=/data/homeserver.yaml volumes: diff --git a/docker/README.md b/docker/README.md index 4349e71f87bb..67c3bc65f095 100644 --- a/docker/README.md +++ b/docker/README.md @@ -68,6 +68,10 @@ The following environment variables are supported in `generate` mode: directories. If unset, and no user is set via `docker run --user`, defaults to `991`, `991`. +## Postgres + +By default the config will use SQLite. See the [docs on using Postgres](https://github.com/matrix-org/synapse/blob/develop/docs/postgres.md) for more info on how to use Postgres. Until this section is improved [this issue](https://github.com/matrix-org/synapse/issues/8304) may provide useful information. + ## Running synapse Once you have a valid configuration file, you can start synapse as follows: From 84d790a32ec4641365adf494a21b28bd680faf38 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 5 Jan 2022 11:25:32 +0000 Subject: [PATCH 044/474] Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. (#11681) --- changelog.d/11681.doc | 1 + docs/sso_mapping_providers.md | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11681.doc diff --git a/changelog.d/11681.doc b/changelog.d/11681.doc new file mode 100644 index 000000000000..fef70211cd7f --- /dev/null +++ b/changelog.d/11681.doc @@ -0,0 +1 @@ +Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. \ No newline at end of file diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index 7a407012e0b1..7b4ddc5b7423 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -49,12 +49,12 @@ comment these options out and use those specified by the module instead. A custom mapping provider must specify the following methods: -* `__init__(self, parsed_config)` +* `def __init__(self, parsed_config)` - Arguments: - `parsed_config` - A configuration object that is the return value of the `parse_config` method. You should set any configuration options needed by the module here. -* `parse_config(config)` +* `def parse_config(config)` - This method should have the `@staticmethod` decoration. - Arguments: - `config` - A `dict` representing the parsed content of the @@ -63,13 +63,13 @@ A custom mapping provider must specify the following methods: any option values they need here. - Whatever is returned will be passed back to the user mapping provider module's `__init__` method during construction. -* `get_remote_user_id(self, userinfo)` +* `def get_remote_user_id(self, userinfo)` - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user information from. - This method must return a string, which is the unique, immutable identifier for the user. Commonly the `sub` claim of the response. -* `map_user_attributes(self, userinfo, token, failures)` +* `async def map_user_attributes(self, userinfo, token, failures)` - This method must be async. - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user @@ -91,7 +91,7 @@ A custom mapping provider must specify the following methods: during a user's first login. Once a localpart has been associated with a remote user ID (see `get_remote_user_id`) it cannot be updated. - `displayname`: An optional string, the display name for the user. -* `get_extra_attributes(self, userinfo, token)` +* `async def get_extra_attributes(self, userinfo, token)` - This method must be async. - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user @@ -125,15 +125,15 @@ comment these options out and use those specified by the module instead. A custom mapping provider must specify the following methods: -* `__init__(self, parsed_config, module_api)` +* `def __init__(self, parsed_config, module_api)` - Arguments: - `parsed_config` - A configuration object that is the return value of the `parse_config` method. You should set any configuration options needed by the module here. - `module_api` - a `synapse.module_api.ModuleApi` object which provides the stable API available for extension modules. -* `parse_config(config)` - - This method should have the `@staticmethod` decoration. +* `def parse_config(config)` + - **This method should have the `@staticmethod` decoration.** - Arguments: - `config` - A `dict` representing the parsed content of the `saml_config.user_mapping_provider.config` homeserver config option. @@ -141,15 +141,15 @@ A custom mapping provider must specify the following methods: any option values they need here. - Whatever is returned will be passed back to the user mapping provider module's `__init__` method during construction. -* `get_saml_attributes(config)` - - This method should have the `@staticmethod` decoration. +* `def get_saml_attributes(config)` + - **This method should have the `@staticmethod` decoration.** - Arguments: - `config` - A object resulting from a call to `parse_config`. - Returns a tuple of two sets. The first set equates to the SAML auth response attributes that are required for the module to function, whereas the second set consists of those attributes which can be used if available, but are not necessary. -* `get_remote_user_id(self, saml_response, client_redirect_url)` +* `def get_remote_user_id(self, saml_response, client_redirect_url)` - Arguments: - `saml_response` - A `saml2.response.AuthnResponse` object to extract user information from. @@ -157,7 +157,7 @@ A custom mapping provider must specify the following methods: redirected to. - This method must return a string, which is the unique, immutable identifier for the user. Commonly the `uid` claim of the response. -* `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)` +* `def saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)` - Arguments: - `saml_response` - A `saml2.response.AuthnResponse` object to extract user information from. From 84bfe47b01402cf037417f0026ed4077223ed259 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 5 Jan 2022 11:41:49 +0000 Subject: [PATCH 045/474] Re-apply: Move glob_to_regex and re_word_boundary to matrix-python-common #11505 (#11687) Co-authored-by: Sean Quah --- changelog.d/11505.misc | 1 + changelog.d/11687.misc | 1 + synapse/config/room_directory.py | 3 +- synapse/config/tls.py | 3 +- synapse/federation/federation_server.py | 3 +- synapse/push/push_rule_evaluator.py | 7 +-- synapse/python_dependencies.py | 1 + synapse/util/__init__.py | 59 +------------------------ tests/util/test_glob_to_regex.py | 59 ------------------------- 9 files changed, 14 insertions(+), 123 deletions(-) create mode 100644 changelog.d/11505.misc create mode 100644 changelog.d/11687.misc delete mode 100644 tests/util/test_glob_to_regex.py diff --git a/changelog.d/11505.misc b/changelog.d/11505.misc new file mode 100644 index 000000000000..926b562fade9 --- /dev/null +++ b/changelog.d/11505.misc @@ -0,0 +1 @@ +Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. diff --git a/changelog.d/11687.misc b/changelog.d/11687.misc new file mode 100644 index 000000000000..926b562fade9 --- /dev/null +++ b/changelog.d/11687.misc @@ -0,0 +1 @@ +Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 57316c59b6a0..3c5e0f7ce73c 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -15,8 +15,9 @@ from typing import List +from matrix_common.regex import glob_to_regex + from synapse.types import JsonDict -from synapse.util import glob_to_regex from ._base import Config, ConfigError diff --git a/synapse/config/tls.py b/synapse/config/tls.py index ffb316e4c011..6e673d65a711 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -16,11 +16,12 @@ import os from typing import List, Optional, Pattern +from matrix_common.regex import glob_to_regex + from OpenSSL import SSL, crypto from twisted.internet._sslverify import Certificate, trustRootFromCertificates from synapse.config._base import Config, ConfigError -from synapse.util import glob_to_regex logger = logging.getLogger(__name__) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index cf067b56c6b4..ee71f289c81c 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -28,6 +28,7 @@ Union, ) +from matrix_common.regex import glob_to_regex from prometheus_client import Counter, Gauge, Histogram from twisted.internet.abstract import isIPAddress @@ -65,7 +66,7 @@ ) from synapse.storage.databases.main.lock import Lock from synapse.types import JsonDict, get_domain_from_id -from synapse.util import glob_to_regex, json_decoder, unwrapFirstError +from synapse.util import json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import parse_server_name diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 7f68092ec5e5..659a53805df1 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -17,9 +17,10 @@ import re from typing import Any, Dict, List, Optional, Pattern, Tuple, Union +from matrix_common.regex import glob_to_regex, to_word_pattern + from synapse.events import EventBase from synapse.types import JsonDict, UserID -from synapse.util import glob_to_regex, re_word_boundary from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -184,7 +185,7 @@ def _contains_display_name(self, display_name: Optional[str]) -> bool: r = regex_cache.get((display_name, False, True), None) if not r: r1 = re.escape(display_name) - r1 = re_word_boundary(r1) + r1 = to_word_pattern(r1) r = re.compile(r1, flags=re.IGNORECASE) regex_cache[(display_name, False, True)] = r @@ -213,7 +214,7 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: try: r = regex_cache.get((glob, True, word_boundary), None) if not r: - r = glob_to_regex(glob, word_boundary) + r = glob_to_regex(glob, word_boundary=word_boundary) regex_cache[(glob, True, word_boundary)] = r return bool(r.search(value)) except re.error: diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 13fb69460ea9..d844fbb3b3d6 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -88,6 +88,7 @@ # with the latest security patches. "cryptography>=3.4.7", "ijson>=3.1", + "matrix-common==1.0.0", ] CONDITIONAL_REQUIREMENTS = { diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 95f23e27b6b1..f157132210dd 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -14,9 +14,8 @@ import json import logging -import re import typing -from typing import Any, Callable, Dict, Generator, Optional, Pattern +from typing import Any, Callable, Dict, Generator, Optional import attr from frozendict import frozendict @@ -35,9 +34,6 @@ logger = logging.getLogger(__name__) -_WILDCARD_RUN = re.compile(r"([\?\*]+)") - - def _reject_invalid_json(val: Any) -> None: """Do not allow Infinity, -Infinity, or NaN values in JSON.""" raise ValueError("Invalid JSON value: '%s'" % val) @@ -185,56 +181,3 @@ def log_failure( if not consumeErrors: return failure return None - - -def glob_to_regex(glob: str, word_boundary: bool = False) -> Pattern: - """Converts a glob to a compiled regex object. - - Args: - glob: pattern to match - word_boundary: If True, the pattern will be allowed to match at word boundaries - anywhere in the string. Otherwise, the pattern is anchored at the start and - end of the string. - - Returns: - compiled regex pattern - """ - - # Patterns with wildcards must be simplified to avoid performance cliffs - # - The glob `?**?**?` is equivalent to the glob `???*` - # - The glob `???*` is equivalent to the regex `.{3,}` - chunks = [] - for chunk in _WILDCARD_RUN.split(glob): - # No wildcards? re.escape() - if not _WILDCARD_RUN.match(chunk): - chunks.append(re.escape(chunk)) - continue - - # Wildcards? Simplify. - qmarks = chunk.count("?") - if "*" in chunk: - chunks.append(".{%d,}" % qmarks) - else: - chunks.append(".{%d}" % qmarks) - - res = "".join(chunks) - - if word_boundary: - res = re_word_boundary(res) - else: - # \A anchors at start of string, \Z at end of string - res = r"\A" + res + r"\Z" - - return re.compile(res, re.IGNORECASE) - - -def re_word_boundary(r: str) -> str: - """ - Adds word boundary characters to the start and end of an - expression to require that the match occur as a whole word, - but do so respecting the fact that strings starting or ending - with non-word characters will change word boundaries. - """ - # we can't use \b as it chokes on unicode. however \W seems to be okay - # as shorthand for [^0-9A-Za-z_]. - return r"(^|\W)%s(\W|$)" % (r,) diff --git a/tests/util/test_glob_to_regex.py b/tests/util/test_glob_to_regex.py deleted file mode 100644 index 220accb92b65..000000000000 --- a/tests/util/test_glob_to_regex.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from synapse.util import glob_to_regex - -from tests.unittest import TestCase - - -class GlobToRegexTestCase(TestCase): - def test_literal_match(self): - """patterns without wildcards should match""" - pat = glob_to_regex("foobaz") - self.assertTrue( - pat.match("FoobaZ"), "patterns should match and be case-insensitive" - ) - self.assertFalse( - pat.match("x foobaz"), "pattern should not match at word boundaries" - ) - - def test_wildcard_match(self): - pat = glob_to_regex("f?o*baz") - - self.assertTrue( - pat.match("FoobarbaZ"), - "* should match string and pattern should be case-insensitive", - ) - self.assertTrue(pat.match("foobaz"), "* should match 0 characters") - self.assertFalse(pat.match("fooxaz"), "the character after * must match") - self.assertFalse(pat.match("fobbaz"), "? should not match 0 characters") - self.assertFalse(pat.match("fiiobaz"), "? should not match 2 characters") - - def test_multi_wildcard(self): - """patterns with multiple wildcards in a row should match""" - pat = glob_to_regex("**baz") - self.assertTrue(pat.match("agsgsbaz"), "** should match any string") - self.assertTrue(pat.match("baz"), "** should match the empty string") - self.assertEqual(pat.pattern, r"\A.{0,}baz\Z") - - pat = glob_to_regex("*?baz") - self.assertTrue(pat.match("agsgsbaz"), "*? should match any string") - self.assertTrue(pat.match("abaz"), "*? should match a single char") - self.assertFalse(pat.match("baz"), "*? should not match the empty string") - self.assertEqual(pat.pattern, r"\A.{1,}baz\Z") - - pat = glob_to_regex("a?*?*?baz") - self.assertTrue(pat.match("a g baz"), "?*?*? should match 3 chars") - self.assertFalse(pat.match("a..baz"), "?*?*? should not match 2 chars") - self.assertTrue(pat.match("a.gg.baz"), "?*?*? should match 4 chars") - self.assertEqual(pat.pattern, r"\Aa.{3,}baz\Z") From 7a1cefc6e37aa583647f2804c9d9c9765712c59a Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 5 Jan 2022 12:49:06 +0100 Subject: [PATCH 046/474] Add admin API to get users' account data (#11664) Co-authored-by: reivilibre --- changelog.d/11664.feature | 1 + docs/admin_api/user_admin_api.md | 75 ++++++++++++++++++++++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/users.py | 30 +++++++++++ tests/rest/admin/test_user.py | 90 ++++++++++++++++++++++++++++++++ 5 files changed, 198 insertions(+) create mode 100644 changelog.d/11664.feature diff --git a/changelog.d/11664.feature b/changelog.d/11664.feature new file mode 100644 index 000000000000..df81783c66a0 --- /dev/null +++ b/changelog.d/11664.feature @@ -0,0 +1 @@ +Add admin API to get users' account data. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index ba574d795fcc..74933d2fcf0e 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -480,6 +480,81 @@ The following fields are returned in the JSON response body: - `joined_rooms` - An array of `room_id`. - `total` - Number of rooms. +## Account Data +Gets information about account data for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v1/users//accountdata +``` + +A response body like the following is returned: + +```json +{ + "account_data": { + "global": { + "m.secret_storage.key.LmIGHTg5W": { + "algorithm": "m.secret_storage.v1.aes-hmac-sha2", + "iv": "fwjNZatxg==", + "mac": "eWh9kNnLWZUNOgnc=" + }, + "im.vector.hide_profile": { + "hide_profile": true + }, + "org.matrix.preview_urls": { + "disable": false + }, + "im.vector.riot.breadcrumb_rooms": { + "rooms": [ + "!LxcBDAsDUVAfJDEo:matrix.org", + "!MAhRxqasbItjOqxu:matrix.org" + ] + }, + "m.accepted_terms": { + "accepted": [ + "https://example.org/somewhere/privacy-1.2-en.html", + "https://example.org/somewhere/terms-2.0-en.html" + ] + }, + "im.vector.setting.breadcrumbs": { + "recent_rooms": [ + "!MAhRxqasbItqxuEt:matrix.org", + "!ZtSaPCawyWtxiImy:matrix.org" + ] + } + }, + "rooms": { + "!GUdfZSHUJibpiVqHYd:matrix.org": { + "m.fully_read": { + "event_id": "$156334540fYIhZ:matrix.org" + } + }, + "!tOZwOOiqwCYQkLhV:matrix.org": { + "m.fully_read": { + "event_id": "$xjsIyp4_NaVl2yPvIZs_k1Jl8tsC_Sp23wjqXPno" + } + } + } + } +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `account_data` - A map containing the account data for the user + - `global` - A map containing the global account data for the user + - `rooms` - A map containing the account data per room for the user + ## User media ### List media uploaded by a user diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 701c609c1208..465e06772b26 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -69,6 +69,7 @@ from synapse.rest.admin.statistics import UserMediaStatisticsRestServlet from synapse.rest.admin.username_available import UsernameAvailableRestServlet from synapse.rest.admin.users import ( + AccountDataRestServlet, AccountValidityRenewServlet, DeactivateAccountRestServlet, PushersRestServlet, @@ -255,6 +256,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: UserMediaStatisticsRestServlet(hs).register(http_server) EventReportDetailRestServlet(hs).register(http_server) EventReportsRestServlet(hs).register(http_server) + AccountDataRestServlet(hs).register(http_server) PushersRestServlet(hs).register(http_server) MakeRoomAdminRestServlet(hs).register(http_server) ShadowBanRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index db678da4cf14..78e795c34764 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -1121,3 +1121,33 @@ async def on_DELETE( await self.store.delete_ratelimit_for_user(user_id) return HTTPStatus.OK, {} + + +class AccountDataRestServlet(RestServlet): + """Retrieve the given user's account data""" + + PATTERNS = admin_patterns("/users/(?P[^/]*)/accountdata") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastore() + self._is_mine_id = hs.is_mine_id + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + if not self._is_mine_id(user_id): + raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only look up local users") + + if not await self._store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + global_data, by_room_data = await self._store.get_account_data_for_user(user_id) + return HTTPStatus.OK, { + "account_data": { + "global": global_data, + "rooms": by_room_data, + }, + } diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index eea675991cbc..e0b9fe8e91b3 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -3883,3 +3883,93 @@ def test_success(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertNotIn("messages_per_second", channel.json_body) self.assertNotIn("burst_count", channel.json_body) + + +class AccountDataTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs) -> None: + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = f"/_synapse/admin/v1/users/{self.other_user}/accountdata" + + def test_no_auth(self) -> None: + """Try to get information of a user without authentication.""" + channel = self.make_request("GET", self.url, {}) + + self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self) -> None: + """If the user is not a server admin, an error is returned.""" + other_user_token = self.login("user", "pass") + + channel = self.make_request( + "GET", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self) -> None: + """Tests that a lookup for a user that does not exist returns a 404""" + url = "/_synapse/admin/v1/users/@unknown_person:test/override_ratelimit" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self) -> None: + """Tests that a lookup for a user that is not a local returns a 400""" + url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/accountdata" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual("Can only look up local users", channel.json_body["error"]) + + def test_success(self) -> None: + """Request account data should succeed for an admin.""" + + # add account data + self.get_success( + self.store.add_account_data_for_user(self.other_user, "m.global", {"a": 1}) + ) + self.get_success( + self.store.add_account_data_to_room( + self.other_user, "test_room", "m.per_room", {"b": 2} + ) + ) + + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual( + {"a": 1}, channel.json_body["account_data"]["global"]["m.global"] + ) + self.assertEqual( + {"b": 2}, + channel.json_body["account_data"]["rooms"]["test_room"]["m.per_room"], + ) From 0201c6371cdfa0e8245c59686c131e40384bbac2 Mon Sep 17 00:00:00 2001 From: Fr3shTea <31766876+Fr3shTea@users.noreply.github.com> Date: Wed, 5 Jan 2022 11:59:29 +0000 Subject: [PATCH 047/474] Fix SimpleHttpClient not sending Accept header in `get_json` (#11677) Co-authored-by: reivilibre --- changelog.d/11677.bugfix | 1 + synapse/http/client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11677.bugfix diff --git a/changelog.d/11677.bugfix b/changelog.d/11677.bugfix new file mode 100644 index 000000000000..5691064a303c --- /dev/null +++ b/changelog.d/11677.bugfix @@ -0,0 +1 @@ +Fix wrong variable reference in `SimpleHttpClient.get_json` that results in the absence of the `Accept` header in the request. diff --git a/synapse/http/client.py b/synapse/http/client.py index fbbeceabeb6a..ca33b45cb21f 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -588,7 +588,7 @@ async def get_json( if headers: actual_headers.update(headers) # type: ignore - body = await self.get_raw(uri, args, headers=headers) + body = await self.get_raw(uri, args, headers=actual_headers) return json_decoder.decode(body.decode("utf-8")) async def put_json( From eedb4527f1524fc3b83cd4838774b04d6f1e3911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philipp=20Matthias=20Sch=C3=A4fer?= Date: Wed, 5 Jan 2022 13:16:52 +0100 Subject: [PATCH 048/474] Fix link from generated configuration file to documentation (#11678) Co-authored-by: reivilibre Co-authored-by: reivilibre --- changelog.d/11678.doc | 1 + docs/sample_config.yaml | 2 +- synapse/config/modules.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11678.doc diff --git a/changelog.d/11678.doc b/changelog.d/11678.doc new file mode 100644 index 000000000000..dff663e78238 --- /dev/null +++ b/changelog.d/11678.doc @@ -0,0 +1 @@ +Fix the documentation link in newly-generated configuration files. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 00dfd2c01312..810a14b07706 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -37,7 +37,7 @@ # Server admins can expand Synapse's functionality with external modules. # -# See https://matrix-org.github.io/synapse/latest/modules.html for more +# See https://matrix-org.github.io/synapse/latest/modules/index.html for more # documentation on how to configure or create custom modules for Synapse. # modules: diff --git a/synapse/config/modules.py b/synapse/config/modules.py index ae0821e5a504..85fb05890d7c 100644 --- a/synapse/config/modules.py +++ b/synapse/config/modules.py @@ -37,7 +37,7 @@ def generate_config_section(self, **kwargs): # Server admins can expand Synapse's functionality with external modules. # - # See https://matrix-org.github.io/synapse/latest/modules.html for more + # See https://matrix-org.github.io/synapse/latest/modules/index.html for more # documentation on how to configure or create custom modules for Synapse. # modules: From 0fb3dd0830e476c0e0b89c3bf6c7855a4129ff11 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jan 2022 12:26:11 +0000 Subject: [PATCH 049/474] Refactor the way we set `outlier` (#11634) * `_auth_and_persist_outliers`: mark persisted events as outliers Mark any events that get persisted via `_auth_and_persist_outliers` as, well, outliers. Currently this will be a no-op as everything will already be flagged as an outlier, but I'm going to change that. * `process_remote_join`: stop flagging as outlier The events are now flagged as outliers later on, by `_auth_and_persist_outliers`. * `send_join`: remove `outlier=True` The events created here are returned in the result of `send_join` to `FederationHandler.do_invite_join`. From there they are passed into `FederationEventHandler.process_remote_join`, which passes them to `_auth_and_persist_outliers`... which sets the `outlier` flag. * `get_event_auth`: remove `outlier=True` stop flagging the events returned by `get_event_auth` as outliers. This method is only called by `_get_remote_auth_chain_for_event`, which passes the results into `_auth_and_persist_outliers`, which will flag them as outliers. * `_get_remote_auth_chain_for_event`: remove `outlier=True` we pass all the events into `_auth_and_persist_outliers`, which will now flag the events as outliers. * `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter This param is now never set to True, so we can remove it. * `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param This is no longer set anywhere, so we can remove it. * `get_pdu`: remove unused `outlier` parameter ... and chase it down into `get_pdu_from_destination_raw`. * `event_from_pdu_json`: remove redundant `outlier` param This is never set to `True`, so can be removed. * changelog * update docstring --- changelog.d/11634.misc | 1 + synapse/federation/federation_base.py | 7 +---- synapse/federation/federation_client.py | 36 ++++--------------------- synapse/handlers/federation_event.py | 15 ++++++----- tests/handlers/test_federation.py | 4 +-- 5 files changed, 16 insertions(+), 47 deletions(-) create mode 100644 changelog.d/11634.misc diff --git a/changelog.d/11634.misc b/changelog.d/11634.misc new file mode 100644 index 000000000000..4069cbc2f483 --- /dev/null +++ b/changelog.d/11634.misc @@ -0,0 +1 @@ +Refactor the way that the `outlier` flag is set on events received over federation. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 4df90e02d7e6..addc0bf00045 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -215,15 +215,12 @@ def _is_invite_via_3pid(event: EventBase) -> bool: ) -def event_from_pdu_json( - pdu_json: JsonDict, room_version: RoomVersion, outlier: bool = False -) -> EventBase: +def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventBase: """Construct an EventBase from an event json received over federation Args: pdu_json: pdu as received over federation room_version: The version of the room this event belongs to - outlier: True to mark this event as an outlier Raises: SynapseError: if the pdu is missing required fields or is otherwise @@ -247,6 +244,4 @@ def event_from_pdu_json( validate_canonicaljson(pdu_json) event = make_event_from_dict(pdu_json, room_version) - event.internal_metadata.outlier = outlier - return event diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7353c2b6b147..6ea4edfc71f8 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -265,10 +265,7 @@ async def backfill( room_version = await self.store.get_room_version(room_id) - pdus = [ - event_from_pdu_json(p, room_version, outlier=False) - for p in transaction_data_pdus - ] + pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus] # Check signatures and hash of pdus, removing any from the list that fail checks pdus[:] = await self._check_sigs_and_hash_and_fetch( @@ -282,7 +279,6 @@ async def get_pdu_from_destination_raw( destination: str, event_id: str, room_version: RoomVersion, - outlier: bool = False, timeout: Optional[int] = None, ) -> Optional[EventBase]: """Requests the PDU with given origin and ID from the remote home @@ -292,9 +288,6 @@ async def get_pdu_from_destination_raw( destination: Which homeserver to query event_id: event to fetch room_version: version of the room - outlier: Indicates whether the PDU is an `outlier`, i.e. if - it's from an arbitrary point in the context as opposed to part - of the current block of PDUs. Defaults to `False` timeout: How long to try (in ms) each destination for before moving to the next destination. None indicates no timeout. @@ -316,8 +309,7 @@ async def get_pdu_from_destination_raw( ) pdu_list: List[EventBase] = [ - event_from_pdu_json(p, room_version, outlier=outlier) - for p in transaction_data["pdus"] + event_from_pdu_json(p, room_version) for p in transaction_data["pdus"] ] if pdu_list and pdu_list[0]: @@ -334,7 +326,6 @@ async def get_pdu( destinations: Iterable[str], event_id: str, room_version: RoomVersion, - outlier: bool = False, timeout: Optional[int] = None, ) -> Optional[EventBase]: """Requests the PDU with given origin and ID from the remote home @@ -347,9 +338,6 @@ async def get_pdu( destinations: Which homeservers to query event_id: event to fetch room_version: version of the room - outlier: Indicates whether the PDU is an `outlier`, i.e. if - it's from an arbitrary point in the context as opposed to part - of the current block of PDUs. Defaults to `False` timeout: How long to try (in ms) each destination for before moving to the next destination. None indicates no timeout. @@ -377,7 +365,6 @@ async def get_pdu( destination=destination, event_id=event_id, room_version=room_version, - outlier=outlier, timeout=timeout, ) @@ -435,7 +422,6 @@ async def _check_sigs_and_hash_and_fetch( origin: str, pdus: Collection[EventBase], room_version: RoomVersion, - outlier: bool = False, ) -> List[EventBase]: """Takes a list of PDUs and checks the signatures and hashes of each one. If a PDU fails its signature check then we check if we have it in @@ -451,7 +437,6 @@ async def _check_sigs_and_hash_and_fetch( origin pdu room_version - outlier: Whether the events are outliers or not Returns: A list of PDUs that have valid signatures and hashes. @@ -466,7 +451,6 @@ async def _execute(pdu: EventBase) -> None: valid_pdu = await self._check_sigs_and_hash_and_fetch_one( pdu=pdu, origin=origin, - outlier=outlier, room_version=room_version, ) @@ -482,7 +466,6 @@ async def _check_sigs_and_hash_and_fetch_one( pdu: EventBase, origin: str, room_version: RoomVersion, - outlier: bool = False, ) -> Optional[EventBase]: """Takes a PDU and checks its signatures and hashes. If the PDU fails its signature check then we check if we have it in the database and if @@ -494,9 +477,6 @@ async def _check_sigs_and_hash_and_fetch_one( origin pdu room_version - outlier: Whether the events are outliers or not - include_none: Whether to include None in the returned list - for events that have failed their checks Returns: The PDU (possibly redacted) if it has valid signatures and hashes. @@ -521,7 +501,6 @@ async def _check_sigs_and_hash_and_fetch_one( destinations=[pdu_origin], event_id=pdu.event_id, room_version=room_version, - outlier=outlier, timeout=10000, ) except SynapseError: @@ -541,13 +520,10 @@ async def get_event_auth( room_version = await self.store.get_room_version(room_id) - auth_chain = [ - event_from_pdu_json(p, room_version, outlier=True) - for p in res["auth_chain"] - ] + auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]] signed_auth = await self._check_sigs_and_hash_and_fetch( - destination, auth_chain, outlier=True, room_version=room_version + destination, auth_chain, room_version=room_version ) return signed_auth @@ -816,7 +792,6 @@ async def send_request(destination: str) -> SendJoinResult: valid_pdu = await self._check_sigs_and_hash_and_fetch_one( pdu=event, origin=destination, - outlier=True, room_version=room_version, ) @@ -864,7 +839,6 @@ async def _execute(pdu: EventBase) -> None: valid_pdu = await self._check_sigs_and_hash_and_fetch_one( pdu=pdu, origin=destination, - outlier=True, room_version=room_version, ) @@ -1235,7 +1209,7 @@ async def get_missing_events( ] signed_events = await self._check_sigs_and_hash_and_fetch( - destination, events, outlier=False, room_version=room_version + destination, events, room_version=room_version ) except HttpResponseException as e: if not e.code == 400: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 7c81e3651f95..11771f3c9c2b 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -421,9 +421,6 @@ async def process_remote_join( Raises: SynapseError if the response is in some way invalid. """ - for e in itertools.chain(auth_events, state): - e.internal_metadata.outlier = True - event_map = {e.event_id: e for e in itertools.chain(auth_events, state)} create_event = None @@ -1194,7 +1191,6 @@ async def get_event(event_id: str) -> None: [destination], event_id, room_version, - outlier=True, ) if event is None: logger.warning( @@ -1223,9 +1219,10 @@ async def _auth_and_persist_outliers( """Persist a batch of outlier events fetched from remote servers. We first sort the events to make sure that we process each event's auth_events - before the event itself, and then auth and persist them. + before the event itself. - Notifies about the events where appropriate. + We then mark the events as outliers, persist them to the database, and, where + appropriate (eg, an invite), awake the notifier. Params: room_id: the room that the events are meant to be in (though this has @@ -1276,7 +1273,8 @@ async def _auth_and_persist_outliers_inner( Persists a batch of events where we have (theoretically) already persisted all of their auth events. - Notifies about the events where appropriate. + Marks the events as outliers, auths them, persists them to the database, and, + where appropriate (eg, an invite), awakes the notifier. Params: origin: where the events came from @@ -1314,6 +1312,9 @@ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: return None auth.append(ae) + # we're not bothering about room state, so flag the event as an outlier. + event.internal_metadata.outlier = True + context = EventContext.for_outlier() try: validate_event_for_room_version(room_version_obj, event) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index e1557566e4bc..496b58172643 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -373,9 +373,7 @@ async def get_event_auth( destination: str, room_id: str, event_id: str ) -> List[EventBase]: return [ - event_from_pdu_json( - ae.get_pdu_json(), room_version=room_version, outlier=True - ) + event_from_pdu_json(ae.get_pdu_json(), room_version=room_version) for ae in auth_events ] From 9be5aacc2d901f553ec972d0d62738bd42c87037 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 5 Jan 2022 12:39:48 +0000 Subject: [PATCH 050/474] 1.50.0rc1 --- CHANGES.md | 85 +++++++++++++++++++++++++++++++++++++++ changelog.d/10520.misc | 1 - changelog.d/11243.misc | 1 - changelog.d/11267.doc | 1 - changelog.d/11331.misc | 1 - changelog.d/11360.misc | 1 - changelog.d/11378.feature | 1 - changelog.d/11427.doc | 1 - changelog.d/11480.misc | 1 - changelog.d/11487.misc | 1 - changelog.d/11503.misc | 1 - changelog.d/11505.misc | 1 - changelog.d/11516.bugfix | 1 - changelog.d/11520.misc | 1 - changelog.d/11531.misc | 1 - changelog.d/11535.misc | 1 - changelog.d/11536.bugfix | 1 - changelog.d/11538.feature | 1 - changelog.d/11541.misc | 1 - changelog.d/11542.misc | 1 - changelog.d/11543.misc | 1 - changelog.d/11546.misc | 1 - changelog.d/11547.bugfix | 1 - changelog.d/11549.misc | 1 - changelog.d/11550.misc | 1 - changelog.d/11551.misc | 1 - changelog.d/11553.doc | 1 - changelog.d/11555.misc | 1 - changelog.d/11556.misc | 1 - changelog.d/11558.misc | 1 - changelog.d/11560.misc | 1 - changelog.d/11564.misc | 1 - changelog.d/11565.misc | 1 - changelog.d/11566.misc | 1 - changelog.d/11570.misc | 1 - changelog.d/11571.misc | 1 - changelog.d/11574.misc | 1 - changelog.d/11575.misc | 1 - changelog.d/11580.misc | 1 - changelog.d/11582.misc | 1 - changelog.d/11588.removal | 1 - changelog.d/11589.misc | 1 - changelog.d/11590.misc | 1 - changelog.d/11592.bugfix | 1 - changelog.d/11594.misc | 1 - changelog.d/11595.misc | 1 - changelog.d/11596.misc | 1 - changelog.d/11602.bugfix | 1 - changelog.d/11603.misc | 1 - changelog.d/11607.misc | 1 - changelog.d/11618.misc | 1 - changelog.d/11619.misc | 1 - changelog.d/11622.misc | 1 - changelog.d/11623.bugfix | 1 - changelog.d/11632.bugfix | 1 - changelog.d/11633.misc | 1 - changelog.d/11634.misc | 1 - changelog.d/11638.misc | 1 - changelog.d/11640.doc | 1 - changelog.d/11642.bugfix | 1 - changelog.d/11643.misc | 1 - changelog.d/11652.misc | 1 - changelog.d/11653.misc | 1 - changelog.d/11654.misc | 1 - changelog.d/11657.misc | 1 - changelog.d/11664.feature | 1 - changelog.d/11665.misc | 1 - changelog.d/11666.feature | 1 - changelog.d/11677.bugfix | 1 - changelog.d/11678.doc | 1 - changelog.d/11680.doc | 1 - changelog.d/11681.doc | 1 - changelog.d/11687.misc | 1 - debian/changelog | 6 +++ synapse/__init__.py | 2 +- 75 files changed, 92 insertions(+), 73 deletions(-) delete mode 100644 changelog.d/10520.misc delete mode 100644 changelog.d/11243.misc delete mode 100644 changelog.d/11267.doc delete mode 100644 changelog.d/11331.misc delete mode 100644 changelog.d/11360.misc delete mode 100644 changelog.d/11378.feature delete mode 100644 changelog.d/11427.doc delete mode 100644 changelog.d/11480.misc delete mode 100644 changelog.d/11487.misc delete mode 100644 changelog.d/11503.misc delete mode 100644 changelog.d/11505.misc delete mode 100644 changelog.d/11516.bugfix delete mode 100644 changelog.d/11520.misc delete mode 100644 changelog.d/11531.misc delete mode 100644 changelog.d/11535.misc delete mode 100644 changelog.d/11536.bugfix delete mode 100644 changelog.d/11538.feature delete mode 100644 changelog.d/11541.misc delete mode 100644 changelog.d/11542.misc delete mode 100644 changelog.d/11543.misc delete mode 100644 changelog.d/11546.misc delete mode 100644 changelog.d/11547.bugfix delete mode 100644 changelog.d/11549.misc delete mode 100644 changelog.d/11550.misc delete mode 100644 changelog.d/11551.misc delete mode 100644 changelog.d/11553.doc delete mode 100644 changelog.d/11555.misc delete mode 100644 changelog.d/11556.misc delete mode 100644 changelog.d/11558.misc delete mode 100644 changelog.d/11560.misc delete mode 100644 changelog.d/11564.misc delete mode 100644 changelog.d/11565.misc delete mode 100644 changelog.d/11566.misc delete mode 100644 changelog.d/11570.misc delete mode 100644 changelog.d/11571.misc delete mode 100644 changelog.d/11574.misc delete mode 100644 changelog.d/11575.misc delete mode 100644 changelog.d/11580.misc delete mode 100644 changelog.d/11582.misc delete mode 100644 changelog.d/11588.removal delete mode 100644 changelog.d/11589.misc delete mode 100644 changelog.d/11590.misc delete mode 100644 changelog.d/11592.bugfix delete mode 100644 changelog.d/11594.misc delete mode 100644 changelog.d/11595.misc delete mode 100644 changelog.d/11596.misc delete mode 100644 changelog.d/11602.bugfix delete mode 100644 changelog.d/11603.misc delete mode 100644 changelog.d/11607.misc delete mode 100644 changelog.d/11618.misc delete mode 100644 changelog.d/11619.misc delete mode 100644 changelog.d/11622.misc delete mode 100644 changelog.d/11623.bugfix delete mode 100644 changelog.d/11632.bugfix delete mode 100644 changelog.d/11633.misc delete mode 100644 changelog.d/11634.misc delete mode 100644 changelog.d/11638.misc delete mode 100644 changelog.d/11640.doc delete mode 100644 changelog.d/11642.bugfix delete mode 100644 changelog.d/11643.misc delete mode 100644 changelog.d/11652.misc delete mode 100644 changelog.d/11653.misc delete mode 100644 changelog.d/11654.misc delete mode 100644 changelog.d/11657.misc delete mode 100644 changelog.d/11664.feature delete mode 100644 changelog.d/11665.misc delete mode 100644 changelog.d/11666.feature delete mode 100644 changelog.d/11677.bugfix delete mode 100644 changelog.d/11678.doc delete mode 100644 changelog.d/11680.doc delete mode 100644 changelog.d/11681.doc delete mode 100644 changelog.d/11687.misc diff --git a/CHANGES.md b/CHANGES.md index 9f6e29631df6..142cf5381e61 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,88 @@ +Synapse 1.50.0rc1 (2022-01-05) +============================== + +Features +-------- + +- Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). ([\#11378](https://github.com/matrix-org/synapse/issues/11378)) +- Add experimental support for part of MSC3202: allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538)) +- Add admin API to get users' account data. ([\#11664](https://github.com/matrix-org/synapse/issues/11664)) +- Include the room topic in the stripped state included with invites and knocking. ([\#11666](https://github.com/matrix-org/synapse/issues/11666)) + + +Bugfixes +-------- + +- Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event. ([\#11516](https://github.com/matrix-org/synapse/issues/11516)) +- Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. ([\#11536](https://github.com/matrix-org/synapse/issues/11536), [\#11642](https://github.com/matrix-org/synapse/issues/11642)) +- Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created. ([\#11547](https://github.com/matrix-org/synapse/issues/11547)) +- Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11592](https://github.com/matrix-org/synapse/issues/11592), [\#11623](https://github.com/matrix-org/synapse/issues/11623)) +- Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11602](https://github.com/matrix-org/synapse/issues/11602)) +- Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. ([\#11632](https://github.com/matrix-org/synapse/issues/11632)) +- Fix a bug in `SimpleHttpClient.get_json` that results in the `Accept` request header being absent. ([\#11677](https://github.com/matrix-org/synapse/issues/11677)) + + +Improved Documentation +---------------------- + +- Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. ([\#11267](https://github.com/matrix-org/synapse/issues/11267)) +- Document the usage of refresh tokens. ([\#11427](https://github.com/matrix-org/synapse/issues/11427)) +- Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. ([\#11553](https://github.com/matrix-org/synapse/issues/11553)) +- Add references for using Postgres to the Docker documentation. ([\#11640](https://github.com/matrix-org/synapse/issues/11640)) +- Fix the documentation link in newly-generated configuration files. ([\#11678](https://github.com/matrix-org/synapse/issues/11678)) +- Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. ([\#11680](https://github.com/matrix-org/synapse/issues/11680)) +- Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. ([\#11681](https://github.com/matrix-org/synapse/issues/11681)) + + +Deprecations and Removals +------------------------- + +- Replace `mock` package by its standard library version. ([\#11588](https://github.com/matrix-org/synapse/issues/11588)) + + +Internal Changes +---------------- + +- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520)) +- Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#11243](https://github.com/matrix-org/synapse/issues/11243)) +- A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. ([\#11331](https://github.com/matrix-org/synapse/issues/11331)) +- Add type hints to `synapse.appservice`. ([\#11360](https://github.com/matrix-org/synapse/issues/11360)) +- Add missing type hints to `synapse.config` module. ([\#11480](https://github.com/matrix-org/synapse/issues/11480)) +- Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. ([\#11487](https://github.com/matrix-org/synapse/issues/11487)) +- Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. ([\#11503](https://github.com/matrix-org/synapse/issues/11503)) +- Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. ([\#11505](https://github.com/matrix-org/synapse/issues/11505), [\#11687](https://github.com/matrix-org/synapse/issues/11687)) +- Use HTTPStatus constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520)) +- Add a receipt types constant for `m.read`. ([\#11531](https://github.com/matrix-org/synapse/issues/11531)) +- Clean up `synapse.rest.admin`. ([\#11535](https://github.com/matrix-org/synapse/issues/11535)) +- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541)) +- Add missing `errcode` to `parse_string` and `parse_boolean`. ([\#11542](https://github.com/matrix-org/synapse/issues/11542)) +- Use HTTPStatus constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543)) +- Add missing type hints to storage classes. ([\#11546](https://github.com/matrix-org/synapse/issues/11546), [\#11549](https://github.com/matrix-org/synapse/issues/11549), [\#11551](https://github.com/matrix-org/synapse/issues/11551), [\#11555](https://github.com/matrix-org/synapse/issues/11555), [\#11575](https://github.com/matrix-org/synapse/issues/11575), [\#11589](https://github.com/matrix-org/synapse/issues/11589), [\#11594](https://github.com/matrix-org/synapse/issues/11594), [\#11652](https://github.com/matrix-org/synapse/issues/11652), [\#11653](https://github.com/matrix-org/synapse/issues/11653), [\#11654](https://github.com/matrix-org/synapse/issues/11654), [\#11657](https://github.com/matrix-org/synapse/issues/11657)) +- Fix an inaccurate and misleading comment in the `/sync` code. ([\#11550](https://github.com/matrix-org/synapse/issues/11550)) +- Add missing type hints to `synapse.logging.context`. ([\#11556](https://github.com/matrix-org/synapse/issues/11556)) +- Stop populating unused database column `state_events.prev_state`. ([\#11558](https://github.com/matrix-org/synapse/issues/11558)) +- Minor efficiency improvements in event persistence. ([\#11560](https://github.com/matrix-org/synapse/issues/11560)) +- Add some safety checks that storage functions are used correctly. ([\#11564](https://github.com/matrix-org/synapse/issues/11564), [\#11580](https://github.com/matrix-org/synapse/issues/11580)) +- Make `get_device` return `None` if the device doesn't exist rather than raising an exception. ([\#11565](https://github.com/matrix-org/synapse/issues/11565)) +- Split the HTML parsing code from the URL preview resource code. ([\#11566](https://github.com/matrix-org/synapse/issues/11566)) +- Remove redundant `COALESCE()`s around `COUNT()`s in database queries. ([\#11570](https://github.com/matrix-org/synapse/issues/11570)) +- Add missing type hints to `synapse.http`. ([\#11571](https://github.com/matrix-org/synapse/issues/11571)) +- Convert `EventStreamResult` from a `namedtuple` to `attrs` to improve type hints. ([\#11574](https://github.com/matrix-org/synapse/issues/11574)) +- Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. ([\#11582](https://github.com/matrix-org/synapse/issues/11582)) +- Add type hints to `synapse/tests/rest/admin`. ([\#11590](https://github.com/matrix-org/synapse/issues/11590)) +- Drop end-of-life Python 3.6 and Postgres 9.6 from CI. ([\#11595](https://github.com/matrix-org/synapse/issues/11595)) +- Update black version and run it on all the files. ([\#11596](https://github.com/matrix-org/synapse/issues/11596)) +- Add opentracing type stubs and fix associated mypy errors. ([\#11603](https://github.com/matrix-org/synapse/issues/11603), [\#11622](https://github.com/matrix-org/synapse/issues/11622)) +- Improve opentracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607)) +- Improve opentracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618)) +- A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619)) +- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633)) +- Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634)) +- Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638)) +- Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) +- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665)) + + Synapse 1.49.2 (2021-12-21) =========================== diff --git a/changelog.d/10520.misc b/changelog.d/10520.misc deleted file mode 100644 index a911e165da80..000000000000 --- a/changelog.d/10520.misc +++ /dev/null @@ -1 +0,0 @@ -Send and handle cross-signing messages using the stable prefix. diff --git a/changelog.d/11243.misc b/changelog.d/11243.misc deleted file mode 100644 index 5ef7fe16d4c9..000000000000 --- a/changelog.d/11243.misc +++ /dev/null @@ -1 +0,0 @@ -Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). diff --git a/changelog.d/11267.doc b/changelog.d/11267.doc deleted file mode 100644 index 3a720158ded4..000000000000 --- a/changelog.d/11267.doc +++ /dev/null @@ -1 +0,0 @@ -Update Synapse install command for FreeBSD as the package is now prefixed with `py38`. Contributed by @itchychips. diff --git a/changelog.d/11331.misc b/changelog.d/11331.misc deleted file mode 100644 index 1ab3a6a97591..000000000000 --- a/changelog.d/11331.misc +++ /dev/null @@ -1 +0,0 @@ -A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. diff --git a/changelog.d/11360.misc b/changelog.d/11360.misc deleted file mode 100644 index 43e25720c5e2..000000000000 --- a/changelog.d/11360.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse.appservice`. diff --git a/changelog.d/11378.feature b/changelog.d/11378.feature deleted file mode 100644 index 524bf84f323e..000000000000 --- a/changelog.d/11378.feature +++ /dev/null @@ -1 +0,0 @@ -Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). \ No newline at end of file diff --git a/changelog.d/11427.doc b/changelog.d/11427.doc deleted file mode 100644 index 01cdfcf2b7e4..000000000000 --- a/changelog.d/11427.doc +++ /dev/null @@ -1 +0,0 @@ -Document the usage of refresh tokens. \ No newline at end of file diff --git a/changelog.d/11480.misc b/changelog.d/11480.misc deleted file mode 100644 index aadc938b2be3..000000000000 --- a/changelog.d/11480.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to `synapse.config` module. diff --git a/changelog.d/11487.misc b/changelog.d/11487.misc deleted file mode 100644 index 376b9078be87..000000000000 --- a/changelog.d/11487.misc +++ /dev/null @@ -1 +0,0 @@ -Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. diff --git a/changelog.d/11503.misc b/changelog.d/11503.misc deleted file mode 100644 index 03a24a922495..000000000000 --- a/changelog.d/11503.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. \ No newline at end of file diff --git a/changelog.d/11505.misc b/changelog.d/11505.misc deleted file mode 100644 index 926b562fade9..000000000000 --- a/changelog.d/11505.misc +++ /dev/null @@ -1 +0,0 @@ -Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. diff --git a/changelog.d/11516.bugfix b/changelog.d/11516.bugfix deleted file mode 100644 index 22bba93671d7..000000000000 --- a/changelog.d/11516.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where relations from other rooms could be included in the bundled aggregations of an event. diff --git a/changelog.d/11520.misc b/changelog.d/11520.misc deleted file mode 100644 index 2d84120e19e8..000000000000 --- a/changelog.d/11520.misc +++ /dev/null @@ -1 +0,0 @@ -Use HTTPStatus constants in place of literals in `tests.rest.client.test_auth`. \ No newline at end of file diff --git a/changelog.d/11531.misc b/changelog.d/11531.misc deleted file mode 100644 index ed6ef3bb3e56..000000000000 --- a/changelog.d/11531.misc +++ /dev/null @@ -1 +0,0 @@ -Add a receipt types constant for `m.read`. diff --git a/changelog.d/11535.misc b/changelog.d/11535.misc deleted file mode 100644 index 580ac354ab7e..000000000000 --- a/changelog.d/11535.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up `synapse.rest.admin`. \ No newline at end of file diff --git a/changelog.d/11536.bugfix b/changelog.d/11536.bugfix deleted file mode 100644 index 4a1b00725479..000000000000 --- a/changelog.d/11536.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. diff --git a/changelog.d/11538.feature b/changelog.d/11538.feature deleted file mode 100644 index b6229e2b4522..000000000000 --- a/changelog.d/11538.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for MSC3202: allowing application services to masquerade as specific devices. \ No newline at end of file diff --git a/changelog.d/11541.misc b/changelog.d/11541.misc deleted file mode 100644 index 31c72c2a20d0..000000000000 --- a/changelog.d/11541.misc +++ /dev/null @@ -1 +0,0 @@ -Support unprefixed versions of fallback key property names. diff --git a/changelog.d/11542.misc b/changelog.d/11542.misc deleted file mode 100644 index f61416503766..000000000000 --- a/changelog.d/11542.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing `errcode` to `parse_string` and `parse_boolean`. \ No newline at end of file diff --git a/changelog.d/11543.misc b/changelog.d/11543.misc deleted file mode 100644 index 99817d71a433..000000000000 --- a/changelog.d/11543.misc +++ /dev/null @@ -1 +0,0 @@ -Use HTTPStatus constants in place of literals in `synapse.http`. \ No newline at end of file diff --git a/changelog.d/11546.misc b/changelog.d/11546.misc deleted file mode 100644 index d451940bf216..000000000000 --- a/changelog.d/11546.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. diff --git a/changelog.d/11547.bugfix b/changelog.d/11547.bugfix deleted file mode 100644 index 3950c4c8d30c..000000000000 --- a/changelog.d/11547.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.17.0 where a pusher created for an email with capital letters would fail to be created. diff --git a/changelog.d/11549.misc b/changelog.d/11549.misc deleted file mode 100644 index d451940bf216..000000000000 --- a/changelog.d/11549.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. diff --git a/changelog.d/11550.misc b/changelog.d/11550.misc deleted file mode 100644 index d5577e0b6300..000000000000 --- a/changelog.d/11550.misc +++ /dev/null @@ -1 +0,0 @@ -Fix an inaccurate and misleading comment in the `/sync` code. \ No newline at end of file diff --git a/changelog.d/11551.misc b/changelog.d/11551.misc deleted file mode 100644 index d451940bf216..000000000000 --- a/changelog.d/11551.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. diff --git a/changelog.d/11553.doc b/changelog.d/11553.doc deleted file mode 100644 index 810ba1692861..000000000000 --- a/changelog.d/11553.doc +++ /dev/null @@ -1 +0,0 @@ -Add details for how to configure a TURN server when behind a NAT. Contibuted by @AndrewFerr. diff --git a/changelog.d/11555.misc b/changelog.d/11555.misc deleted file mode 100644 index d451940bf216..000000000000 --- a/changelog.d/11555.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. diff --git a/changelog.d/11556.misc b/changelog.d/11556.misc deleted file mode 100644 index 53b26aa676b4..000000000000 --- a/changelog.d/11556.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to `synapse.logging.context`. diff --git a/changelog.d/11558.misc b/changelog.d/11558.misc deleted file mode 100644 index 7c334f17e007..000000000000 --- a/changelog.d/11558.misc +++ /dev/null @@ -1 +0,0 @@ -Stop populating unused database column `state_events.prev_state`. diff --git a/changelog.d/11560.misc b/changelog.d/11560.misc deleted file mode 100644 index eb968167f58a..000000000000 --- a/changelog.d/11560.misc +++ /dev/null @@ -1 +0,0 @@ -Minor efficiency improvements in event persistence. diff --git a/changelog.d/11564.misc b/changelog.d/11564.misc deleted file mode 100644 index 2c48e22de0dd..000000000000 --- a/changelog.d/11564.misc +++ /dev/null @@ -1 +0,0 @@ -Add some safety checks that storage functions are used correctly. diff --git a/changelog.d/11565.misc b/changelog.d/11565.misc deleted file mode 100644 index ddcafd32cbac..000000000000 --- a/changelog.d/11565.misc +++ /dev/null @@ -1 +0,0 @@ -Make `get_device` return `None` if the device doesn't exist rather than raising an exception. diff --git a/changelog.d/11566.misc b/changelog.d/11566.misc deleted file mode 100644 index c48e73cd486c..000000000000 --- a/changelog.d/11566.misc +++ /dev/null @@ -1 +0,0 @@ -Split the HTML parsing code from the URL preview resource code. diff --git a/changelog.d/11570.misc b/changelog.d/11570.misc deleted file mode 100644 index d9af8bdb0537..000000000000 --- a/changelog.d/11570.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `COALESCE()`s around `COUNT()`s in database queries. diff --git a/changelog.d/11571.misc b/changelog.d/11571.misc deleted file mode 100644 index 4e396b271e8f..000000000000 --- a/changelog.d/11571.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to `synapse.http`. diff --git a/changelog.d/11574.misc b/changelog.d/11574.misc deleted file mode 100644 index 2b090a3780d0..000000000000 --- a/changelog.d/11574.misc +++ /dev/null @@ -1 +0,0 @@ -Convert `EventStreamResult` from a `namedtuple` to `attrs` to improve type hints. diff --git a/changelog.d/11575.misc b/changelog.d/11575.misc deleted file mode 100644 index d451940bf216..000000000000 --- a/changelog.d/11575.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. diff --git a/changelog.d/11580.misc b/changelog.d/11580.misc deleted file mode 100644 index 2c48e22de0dd..000000000000 --- a/changelog.d/11580.misc +++ /dev/null @@ -1 +0,0 @@ -Add some safety checks that storage functions are used correctly. diff --git a/changelog.d/11582.misc b/changelog.d/11582.misc deleted file mode 100644 index a0291f64e271..000000000000 --- a/changelog.d/11582.misc +++ /dev/null @@ -1 +0,0 @@ -Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. diff --git a/changelog.d/11588.removal b/changelog.d/11588.removal deleted file mode 100644 index f781021e11c7..000000000000 --- a/changelog.d/11588.removal +++ /dev/null @@ -1 +0,0 @@ -Replace `mock` package by its standard library version. diff --git a/changelog.d/11589.misc b/changelog.d/11589.misc deleted file mode 100644 index 8e405b922674..000000000000 --- a/changelog.d/11589.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11590.misc b/changelog.d/11590.misc deleted file mode 100644 index 40e01194df19..000000000000 --- a/changelog.d/11590.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse/tests/rest/admin`. \ No newline at end of file diff --git a/changelog.d/11592.bugfix b/changelog.d/11592.bugfix deleted file mode 100644 index 4116e5dd7c7b..000000000000 --- a/changelog.d/11592.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11594.misc b/changelog.d/11594.misc deleted file mode 100644 index d451940bf216..000000000000 --- a/changelog.d/11594.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. diff --git a/changelog.d/11595.misc b/changelog.d/11595.misc deleted file mode 100644 index 57e54db5a9df..000000000000 --- a/changelog.d/11595.misc +++ /dev/null @@ -1 +0,0 @@ -Drop EOL python 3.6 and postgres 9.6 from CI. \ No newline at end of file diff --git a/changelog.d/11596.misc b/changelog.d/11596.misc deleted file mode 100644 index 3064bc632d57..000000000000 --- a/changelog.d/11596.misc +++ /dev/null @@ -1 +0,0 @@ -Update black version and run it on all the files. \ No newline at end of file diff --git a/changelog.d/11602.bugfix b/changelog.d/11602.bugfix deleted file mode 100644 index e0dfbf1a1520..000000000000 --- a/changelog.d/11602.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. diff --git a/changelog.d/11603.misc b/changelog.d/11603.misc deleted file mode 100644 index def24afb8d23..000000000000 --- a/changelog.d/11603.misc +++ /dev/null @@ -1 +0,0 @@ -Add opentracing type stubs and fix associated mypy errors. \ No newline at end of file diff --git a/changelog.d/11607.misc b/changelog.d/11607.misc deleted file mode 100644 index e82f46776364..000000000000 --- a/changelog.d/11607.misc +++ /dev/null @@ -1 +0,0 @@ -Improve opentracing support for requests which use a `ResponseCache`. diff --git a/changelog.d/11618.misc b/changelog.d/11618.misc deleted file mode 100644 index 4076b30bf7c1..000000000000 --- a/changelog.d/11618.misc +++ /dev/null @@ -1 +0,0 @@ -Improve opentracing support for incoming HTTP requests. diff --git a/changelog.d/11619.misc b/changelog.d/11619.misc deleted file mode 100644 index 2125cbddd2fd..000000000000 --- a/changelog.d/11619.misc +++ /dev/null @@ -1 +0,0 @@ -A number of improvements to opentracing support. diff --git a/changelog.d/11622.misc b/changelog.d/11622.misc deleted file mode 100644 index def24afb8d23..000000000000 --- a/changelog.d/11622.misc +++ /dev/null @@ -1 +0,0 @@ -Add opentracing type stubs and fix associated mypy errors. \ No newline at end of file diff --git a/changelog.d/11623.bugfix b/changelog.d/11623.bugfix deleted file mode 100644 index 4116e5dd7c7b..000000000000 --- a/changelog.d/11623.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11632.bugfix b/changelog.d/11632.bugfix deleted file mode 100644 index c73d41652a87..000000000000 --- a/changelog.d/11632.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. diff --git a/changelog.d/11633.misc b/changelog.d/11633.misc deleted file mode 100644 index 73e814e58ef2..000000000000 --- a/changelog.d/11633.misc +++ /dev/null @@ -1 +0,0 @@ -Drop support for Python 3.6 and Ubuntu 18.04. \ No newline at end of file diff --git a/changelog.d/11634.misc b/changelog.d/11634.misc deleted file mode 100644 index 4069cbc2f483..000000000000 --- a/changelog.d/11634.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the way that the `outlier` flag is set on events received over federation. diff --git a/changelog.d/11638.misc b/changelog.d/11638.misc deleted file mode 100644 index 76dfb56bdac0..000000000000 --- a/changelog.d/11638.misc +++ /dev/null @@ -1 +0,0 @@ -Improve the error messages from `get_create_event_for_room`. diff --git a/changelog.d/11640.doc b/changelog.d/11640.doc deleted file mode 100644 index c4773e4f3ac5..000000000000 --- a/changelog.d/11640.doc +++ /dev/null @@ -1 +0,0 @@ -Add references for using Postgres to the Docker documentation. diff --git a/changelog.d/11642.bugfix b/changelog.d/11642.bugfix deleted file mode 100644 index 4a1b00725479..000000000000 --- a/changelog.d/11642.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which could cause `AssertionError`s to be written to the log when Synapse was restarted after purging events from the database. diff --git a/changelog.d/11643.misc b/changelog.d/11643.misc deleted file mode 100644 index 1c3b3071f66a..000000000000 --- a/changelog.d/11643.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `get_current_events_token` method. diff --git a/changelog.d/11652.misc b/changelog.d/11652.misc deleted file mode 100644 index 8e405b922674..000000000000 --- a/changelog.d/11652.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11653.misc b/changelog.d/11653.misc deleted file mode 100644 index 8e405b922674..000000000000 --- a/changelog.d/11653.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11654.misc b/changelog.d/11654.misc deleted file mode 100644 index 8e405b922674..000000000000 --- a/changelog.d/11654.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11657.misc b/changelog.d/11657.misc deleted file mode 100644 index 8e405b922674..000000000000 --- a/changelog.d/11657.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11664.feature b/changelog.d/11664.feature deleted file mode 100644 index df81783c66a0..000000000000 --- a/changelog.d/11664.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin API to get users' account data. \ No newline at end of file diff --git a/changelog.d/11665.misc b/changelog.d/11665.misc deleted file mode 100644 index e7cc8ff23f07..000000000000 --- a/changelog.d/11665.misc +++ /dev/null @@ -1 +0,0 @@ -Convert `namedtuples` to `attrs`. diff --git a/changelog.d/11666.feature b/changelog.d/11666.feature deleted file mode 100644 index 6f6b127e2278..000000000000 --- a/changelog.d/11666.feature +++ /dev/null @@ -1 +0,0 @@ -Include the room topic in the stripped state included with invites and knocking. diff --git a/changelog.d/11677.bugfix b/changelog.d/11677.bugfix deleted file mode 100644 index 5691064a303c..000000000000 --- a/changelog.d/11677.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix wrong variable reference in `SimpleHttpClient.get_json` that results in the absence of the `Accept` header in the request. diff --git a/changelog.d/11678.doc b/changelog.d/11678.doc deleted file mode 100644 index dff663e78238..000000000000 --- a/changelog.d/11678.doc +++ /dev/null @@ -1 +0,0 @@ -Fix the documentation link in newly-generated configuration files. diff --git a/changelog.d/11680.doc b/changelog.d/11680.doc deleted file mode 100644 index 09399ad9d0e6..000000000000 --- a/changelog.d/11680.doc +++ /dev/null @@ -1 +0,0 @@ -Correct the documentation for `nginx` to use a case-sensitive url pattern. Fixes an error introduced in v1.21.0. diff --git a/changelog.d/11681.doc b/changelog.d/11681.doc deleted file mode 100644 index fef70211cd7f..000000000000 --- a/changelog.d/11681.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify SSO mapping provider documentation by writing `def` or `async def` before the names of methods, as appropriate. \ No newline at end of file diff --git a/changelog.d/11687.misc b/changelog.d/11687.misc deleted file mode 100644 index 926b562fade9..000000000000 --- a/changelog.d/11687.misc +++ /dev/null @@ -1 +0,0 @@ -Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. diff --git a/debian/changelog b/debian/changelog index ebe3e0cbf958..b54c0ff34878 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.50.0~rc1) stable; urgency=medium + + * New synapse release 1.50.0~rc1. + + -- Synapse Packaging team Wed, 05 Jan 2022 12:36:17 +0000 + matrix-synapse-py3 (1.49.2) stable; urgency=medium * New synapse release 1.49.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 95a49c20befc..92aec334e6e7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.49.2" +__version__ = "1.50.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 88a78c6577086527e4569541b09e437a1ca0d1a9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 5 Jan 2022 13:33:28 +0000 Subject: [PATCH 051/474] Cache empty responses from `/user/devices` (#11587) If we've never made a request to a remote homeserver, we should cache the response---even if the response is "this user has no devices". --- changelog.d/11587.bugfix | 1 + synapse/handlers/device.py | 10 ++- synapse/storage/databases/main/devices.py | 8 +- tests/handlers/test_e2e_keys.py | 96 +++++++++++++++++++++++ tests/test_utils/__init__.py | 4 +- 5 files changed, 114 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11587.bugfix diff --git a/changelog.d/11587.bugfix b/changelog.d/11587.bugfix new file mode 100644 index 000000000000..ad2b83edf7c7 --- /dev/null +++ b/changelog.d/11587.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. \ No newline at end of file diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 766542523218..b184a48cb16c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -948,8 +948,16 @@ async def user_device_resync( devices = [] ignore_devices = True else: + prev_stream_id = await self.store.get_device_list_last_stream_id_for_remote( + user_id + ) cached_devices = await self.store.get_cached_devices_for_user(user_id) - if cached_devices == {d["device_id"]: d for d in devices}: + + # To ensure that a user with no devices is cached, we skip the resync only + # if we have a stream_id from previously writing a cache entry. + if prev_stream_id is not None and cached_devices == { + d["device_id"]: d for d in devices + }: logging.info( "Skipping device list resync for %s, as our cache matches already", user_id, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 273adb61fdae..52fbf50db64f 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -713,7 +713,7 @@ def _get_all_device_list_changes_for_remotes(txn): @cached(max_entries=10000) async def get_device_list_last_stream_id_for_remote( self, user_id: str - ) -> Optional[Any]: + ) -> Optional[str]: """Get the last stream_id we got for a user. May be None if we haven't got any information for them. """ @@ -729,7 +729,9 @@ async def get_device_list_last_stream_id_for_remote( cached_method_name="get_device_list_last_stream_id_for_remote", list_name="user_ids", ) - async def get_device_list_last_stream_id_for_remotes(self, user_ids: Iterable[str]): + async def get_device_list_last_stream_id_for_remotes( + self, user_ids: Iterable[str] + ) -> Dict[str, Optional[str]]: rows = await self.db_pool.simple_select_many_batch( table="device_lists_remote_extremeties", column="user_id", @@ -1316,6 +1318,7 @@ def _update_remote_device_list_cache_entry_txn( content: JsonDict, stream_id: str, ) -> None: + """Delete, update or insert a cache entry for this (user, device) pair.""" if content.get("deleted"): self.db_pool.simple_delete_txn( txn, @@ -1375,6 +1378,7 @@ async def update_remote_device_list_cache( def _update_remote_device_list_cache_txn( self, txn: LoggingTransaction, user_id: str, devices: List[dict], stream_id: int ) -> None: + """Replace the list of cached devices for this user with the given list.""" self.db_pool.simple_delete_txn( txn, table="device_lists_remote_cache", keyvalues={"user_id": user_id} ) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index ddcf3ee34886..734ed84d78d0 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -13,8 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Iterable from unittest import mock +from parameterized import parameterized from signedjson import key as key, sign as sign from twisted.internet import defer @@ -23,6 +25,7 @@ from synapse.api.errors import Codes, SynapseError from tests import unittest +from tests.test_utils import make_awaitable class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): @@ -765,6 +768,8 @@ def test_query_devices_remote_sync(self): remote_user_id = "@test:other" local_user_id = "@test:test" + # Pretend we're sharing a room with the user we're querying. If not, + # `_query_devices_for_destination` will return early. self.store.get_rooms_for_user = mock.Mock( return_value=defer.succeed({"some_room_id"}) ) @@ -831,3 +836,94 @@ def test_query_devices_remote_sync(self): } }, ) + + @parameterized.expand( + [ + # The remote homeserver's response indicates that this user has 0/1/2 devices. + ([],), + (["device_1"],), + (["device_1", "device_2"],), + ] + ) + def test_query_all_devices_caches_result(self, device_ids: Iterable[str]): + """Test that requests for all of a remote user's devices are cached. + + We do this by asserting that only one call over federation was made, and that + the two queries to the local homeserver produce the same response. + """ + local_user_id = "@test:test" + remote_user_id = "@test:other" + request_body = {"device_keys": {remote_user_id: []}} + + response_devices = [ + { + "device_id": device_id, + "keys": { + "algorithms": ["dummy"], + "device_id": device_id, + "keys": {f"dummy:{device_id}": "dummy"}, + "signatures": {device_id: {f"dummy:{device_id}": "dummy"}}, + "unsigned": {}, + "user_id": "@test:other", + }, + } + for device_id in device_ids + ] + + response_body = { + "devices": response_devices, + "user_id": remote_user_id, + "stream_id": 12345, # an integer, according to the spec + } + + e2e_handler = self.hs.get_e2e_keys_handler() + + # Pretend we're sharing a room with the user we're querying. If not, + # `_query_devices_for_destination` will return early. + mock_get_rooms = mock.patch.object( + self.store, + "get_rooms_for_user", + new_callable=mock.MagicMock, + return_value=make_awaitable(["some_room_id"]), + ) + mock_request = mock.patch.object( + self.hs.get_federation_client(), + "query_user_devices", + new_callable=mock.MagicMock, + return_value=make_awaitable(response_body), + ) + + with mock_get_rooms, mock_request as mocked_federation_request: + # Make the first query and sanity check it succeeds. + response_1 = self.get_success( + e2e_handler.query_devices( + request_body, + timeout=10, + from_user_id=local_user_id, + from_device_id="some_device_id", + ) + ) + self.assertEqual(response_1["failures"], {}) + + # We should have made a federation request to do so. + mocked_federation_request.assert_called_once() + + # Reset the mock so we can prove we don't make a second federation request. + mocked_federation_request.reset_mock() + + # Repeat the query. + response_2 = self.get_success( + e2e_handler.query_devices( + request_body, + timeout=10, + from_user_id=local_user_id, + from_device_id="some_device_id", + ) + ) + self.assertEqual(response_2["failures"], {}) + + # We should not have made a second federation request. + mocked_federation_request.assert_not_called() + + # The two requests to the local homeserver should be identical. + self.assertEqual(response_1, response_2) diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 15ac2bfeba1e..f05a373aa083 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -19,7 +19,7 @@ import warnings from asyncio import Future from binascii import unhexlify -from typing import Any, Awaitable, Callable, TypeVar +from typing import Awaitable, Callable, TypeVar from unittest.mock import Mock import attr @@ -46,7 +46,7 @@ def get_awaitable_result(awaitable: Awaitable[TV]) -> TV: raise Exception("awaitable has not yet completed") -def make_awaitable(result: Any) -> Awaitable[Any]: +def make_awaitable(result: TV) -> Awaitable[TV]: """ Makes an awaitable, suitable for mocking an `async` function. This uses Futures as they can be awaited multiple times so can be returned From 5f98d8e6fd437d172cc0e784e1f33503ee7ead8b Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 5 Jan 2022 14:05:15 +0000 Subject: [PATCH 052/474] Tweak changelog --- CHANGES.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 142cf5381e61..856164df4f6e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,9 +5,11 @@ Features -------- - Allow guests to send state events per [MSC3419](https://github.com/matrix-org/matrix-doc/pull/3419). ([\#11378](https://github.com/matrix-org/synapse/issues/11378)) -- Add experimental support for part of MSC3202: allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538)) +- Add experimental support for part of [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): allowing application services to masquerade as specific devices. ([\#11538](https://github.com/matrix-org/synapse/issues/11538)) - Add admin API to get users' account data. ([\#11664](https://github.com/matrix-org/synapse/issues/11664)) - Include the room topic in the stripped state included with invites and knocking. ([\#11666](https://github.com/matrix-org/synapse/issues/11666)) +- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520)) +- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541)) Bugfixes @@ -43,7 +45,6 @@ Deprecations and Removals Internal Changes ---------------- -- Send and handle cross-signing messages using the stable prefix. ([\#10520](https://github.com/matrix-org/synapse/issues/10520)) - Allow specific, experimental events to be created without `prev_events`. Used by [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716). ([\#11243](https://github.com/matrix-org/synapse/issues/11243)) - A test helper (`wait_for_background_updates`) no longer depends on classes defining a `store` property. ([\#11331](https://github.com/matrix-org/synapse/issues/11331)) - Add type hints to `synapse.appservice`. ([\#11360](https://github.com/matrix-org/synapse/issues/11360)) @@ -51,12 +52,11 @@ Internal Changes - Add test to ensure we share the same `state_group` across the whole historical batch when using the [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) `/batch_send` endpoint. ([\#11487](https://github.com/matrix-org/synapse/issues/11487)) - Refactor `tests.util.setup_test_homeserver` and `tests.server.setup_test_homeserver`. ([\#11503](https://github.com/matrix-org/synapse/issues/11503)) - Move `glob_to_regex` and `re_word_boundary` to `matrix-python-common`. ([\#11505](https://github.com/matrix-org/synapse/issues/11505), [\#11687](https://github.com/matrix-org/synapse/issues/11687)) -- Use HTTPStatus constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520)) +- Use `HTTPStatus` constants in place of literals in `tests.rest.client.test_auth`. ([\#11520](https://github.com/matrix-org/synapse/issues/11520)) - Add a receipt types constant for `m.read`. ([\#11531](https://github.com/matrix-org/synapse/issues/11531)) - Clean up `synapse.rest.admin`. ([\#11535](https://github.com/matrix-org/synapse/issues/11535)) -- Support unprefixed versions of fallback key property names. ([\#11541](https://github.com/matrix-org/synapse/issues/11541)) - Add missing `errcode` to `parse_string` and `parse_boolean`. ([\#11542](https://github.com/matrix-org/synapse/issues/11542)) -- Use HTTPStatus constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543)) +- Use `HTTPStatus` constants in place of literals in `synapse.http`. ([\#11543](https://github.com/matrix-org/synapse/issues/11543)) - Add missing type hints to storage classes. ([\#11546](https://github.com/matrix-org/synapse/issues/11546), [\#11549](https://github.com/matrix-org/synapse/issues/11549), [\#11551](https://github.com/matrix-org/synapse/issues/11551), [\#11555](https://github.com/matrix-org/synapse/issues/11555), [\#11575](https://github.com/matrix-org/synapse/issues/11575), [\#11589](https://github.com/matrix-org/synapse/issues/11589), [\#11594](https://github.com/matrix-org/synapse/issues/11594), [\#11652](https://github.com/matrix-org/synapse/issues/11652), [\#11653](https://github.com/matrix-org/synapse/issues/11653), [\#11654](https://github.com/matrix-org/synapse/issues/11654), [\#11657](https://github.com/matrix-org/synapse/issues/11657)) - Fix an inaccurate and misleading comment in the `/sync` code. ([\#11550](https://github.com/matrix-org/synapse/issues/11550)) - Add missing type hints to `synapse.logging.context`. ([\#11556](https://github.com/matrix-org/synapse/issues/11556)) @@ -67,20 +67,19 @@ Internal Changes - Split the HTML parsing code from the URL preview resource code. ([\#11566](https://github.com/matrix-org/synapse/issues/11566)) - Remove redundant `COALESCE()`s around `COUNT()`s in database queries. ([\#11570](https://github.com/matrix-org/synapse/issues/11570)) - Add missing type hints to `synapse.http`. ([\#11571](https://github.com/matrix-org/synapse/issues/11571)) -- Convert `EventStreamResult` from a `namedtuple` to `attrs` to improve type hints. ([\#11574](https://github.com/matrix-org/synapse/issues/11574)) - Add [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) and [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) to `/versions` -> `unstable_features` to detect server support. ([\#11582](https://github.com/matrix-org/synapse/issues/11582)) - Add type hints to `synapse/tests/rest/admin`. ([\#11590](https://github.com/matrix-org/synapse/issues/11590)) - Drop end-of-life Python 3.6 and Postgres 9.6 from CI. ([\#11595](https://github.com/matrix-org/synapse/issues/11595)) - Update black version and run it on all the files. ([\#11596](https://github.com/matrix-org/synapse/issues/11596)) - Add opentracing type stubs and fix associated mypy errors. ([\#11603](https://github.com/matrix-org/synapse/issues/11603), [\#11622](https://github.com/matrix-org/synapse/issues/11622)) -- Improve opentracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607)) -- Improve opentracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618)) +- Improve OpenTracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607)) +- Improve OpenTracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618)) - A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619)) - Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633)) - Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634)) - Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638)) - Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) -- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665)) +- Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574)) Synapse 1.49.2 (2021-12-21) From ffd71029ab8dfef76184ffedd54af6de1c083b81 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 5 Jan 2022 14:10:00 +0000 Subject: [PATCH 053/474] Add support removal notice to the upgrade notes --- docs/upgrade.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/upgrade.md b/docs/upgrade.md index 136c806c417a..30bb0dcd9cfd 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,17 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.50.0 + +## Dropping support for old Python and Postgres versions + +In line with our [deprecation policy](deprecation_policy.md), +we've dropped support for Python 3.6 and PostgreSQL 9.6, as they are no +longer supported upstream. + +This release of Synapse requires Python 3.7+ and PostgreSQL 10+. + + # Upgrading to v1.47.0 ## Removal of old Room Admin API From 102f4d3598a387f90ff6a9c5c86a3f4de360931e Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 5 Jan 2022 14:14:57 +0000 Subject: [PATCH 054/474] Mention drop of support in changelog --- CHANGES.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 856164df4f6e..e8cd60e9e5b7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,9 @@ Synapse 1.50.0rc1 (2022-01-05) ============================== +Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. + + Features -------- From 877b45e8120c5f80ed0965b7c072fd0b8d7ce36a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 Jan 2022 12:08:03 -0500 Subject: [PATCH 055/474] Include `io.element.thread` capability for MSC3440. (#11690) --- changelog.d/11690.misc | 1 + synapse/rest/client/capabilities.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/11690.misc diff --git a/changelog.d/11690.misc b/changelog.d/11690.misc new file mode 100644 index 000000000000..76cd28686202 --- /dev/null +++ b/changelog.d/11690.misc @@ -0,0 +1 @@ +Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 2a3e24ae7e55..5c0e3a568007 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -73,6 +73,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "enabled": self.config.registration.enable_3pid_changes } + if self.config.experimental.msc3440_enabled: + response["capabilities"]["io.element.thread"] = {"enabled": True} + return 200, response From 83acdb23fe318f8a41d2a6b800d994d46754a903 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 Jan 2022 12:09:15 -0500 Subject: [PATCH 056/474] Re-run towncrier. --- CHANGES.md | 1 + changelog.d/11690.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/11690.misc diff --git a/CHANGES.md b/CHANGES.md index e8cd60e9e5b7..486cfae86f00 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -83,6 +83,7 @@ Internal Changes - Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638)) - Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) - Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574)) +- Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690)) Synapse 1.49.2 (2021-12-21) diff --git a/changelog.d/11690.misc b/changelog.d/11690.misc deleted file mode 100644 index 76cd28686202..000000000000 --- a/changelog.d/11690.misc +++ /dev/null @@ -1 +0,0 @@ -Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. From d8f94eeec23eba6274896e1aa2f92aa97ff10bee Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 5 Jan 2022 09:53:05 -0800 Subject: [PATCH 057/474] Run `pyupgrade --py37-plus --keep-percent-format` on Synapse (#11685) * newsfragment * fix newsfragment number * update changelog * remove extra space --- changelog.d/11685.misc | 1 + synapse/rest/admin/media.py | 2 +- synapse/storage/databases/main/session.py | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11685.misc diff --git a/changelog.d/11685.misc b/changelog.d/11685.misc new file mode 100644 index 000000000000..c4566b2012e3 --- /dev/null +++ b/changelog.d/11685.misc @@ -0,0 +1 @@ +Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 7236e4027fa7..299f5c9eb0f2 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -466,7 +466,7 @@ async def on_DELETE( ) deleted_media, total = await self.media_repository.delete_local_media_ids( - ([row["media_id"] for row in media]) + [row["media_id"] for row in media] ) return HTTPStatus.OK, {"deleted_media": deleted_media, "total": total} diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py index 5a97120437d0..e8c776b97a9b 100644 --- a/synapse/storage/databases/main/session.py +++ b/synapse/storage/databases/main/session.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); From daea7bcc34a0b5c4834fd06d204c620a23eab52c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 5 Jan 2022 18:16:10 +0000 Subject: [PATCH 058/474] Tweak changelog for #11677 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 486cfae86f00..77a56dd48102 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -24,7 +24,6 @@ Bugfixes - Fix a long-standing bug where responses included bundled aggregations when they should not, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11592](https://github.com/matrix-org/synapse/issues/11592), [\#11623](https://github.com/matrix-org/synapse/issues/11623)) - Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11602](https://github.com/matrix-org/synapse/issues/11602)) - Fix a bug introduced in Synapse 1.19.3 which could sometimes cause `AssertionError`s when backfilling rooms over federation. ([\#11632](https://github.com/matrix-org/synapse/issues/11632)) -- Fix a bug in `SimpleHttpClient.get_json` that results in the `Accept` request header being absent. ([\#11677](https://github.com/matrix-org/synapse/issues/11677)) Improved Documentation @@ -84,6 +83,7 @@ Internal Changes - Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) - Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574)) - Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690)) +- Send the `Accept` header in HTTP requests made using `SimpleHttpClient.get_json`. ([\#11677](https://github.com/matrix-org/synapse/issues/11677)) Synapse 1.49.2 (2021-12-21) From 3b51c763ba5601e155e3e27a46cddf0370da83eb Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 5 Jan 2022 20:46:50 +0100 Subject: [PATCH 059/474] Fix get federation status of destination if no error occured (#11593) --- changelog.d/11593.bugfix | 1 + synapse/rest/admin/federation.py | 26 +++++-- .../storage/databases/main/transactions.py | 11 +++ tests/rest/admin/test_federation.py | 75 ++++++++++++++----- 4 files changed, 88 insertions(+), 25 deletions(-) create mode 100644 changelog.d/11593.bugfix diff --git a/changelog.d/11593.bugfix b/changelog.d/11593.bugfix new file mode 100644 index 000000000000..963fd0e58ecf --- /dev/null +++ b/changelog.d/11593.bugfix @@ -0,0 +1 @@ +Fix an error in to get federation status of a destination server even if no error has occurred. This admin API was new introduced in Synapse 1.49.0. diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index 50d88c91091b..8cd3fa189e8d 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -111,25 +111,37 @@ async def on_GET( ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) + if not await self._store.is_destination_known(destination): + raise NotFoundError("Unknown destination") + destination_retry_timings = await self._store.get_destination_retry_timings( destination ) - if not destination_retry_timings: - raise NotFoundError("Unknown destination") - last_successful_stream_ordering = ( await self._store.get_destination_last_successful_stream_ordering( destination ) ) - response = { + response: JsonDict = { "destination": destination, - "failure_ts": destination_retry_timings.failure_ts, - "retry_last_ts": destination_retry_timings.retry_last_ts, - "retry_interval": destination_retry_timings.retry_interval, "last_successful_stream_ordering": last_successful_stream_ordering, } + if destination_retry_timings: + response = { + **response, + "failure_ts": destination_retry_timings.failure_ts, + "retry_last_ts": destination_retry_timings.retry_last_ts, + "retry_interval": destination_retry_timings.retry_interval, + } + else: + response = { + **response, + "failure_ts": None, + "retry_last_ts": 0, + "retry_interval": 0, + } + return HTTPStatus.OK, response diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 6c299cafa511..4b78b4d098a9 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -560,3 +560,14 @@ def get_destinations_paginate_txn( return await self.db_pool.runInteraction( "get_destinations_paginate_txn", get_destinations_paginate_txn ) + + async def is_destination_known(self, destination: str) -> bool: + """Check if a destination is known to the server.""" + result = await self.db_pool.simple_select_one_onecol( + table="destinations", + keyvalues={"destination": destination}, + retcol="1", + allow_none=True, + desc="is_destination_known", + ) + return bool(result) diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 742f19425729..b70350b6f1d7 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -314,15 +314,12 @@ def _order_test( retry_interval, last_successful_stream_ordering, ) in dest: - self.get_success( - self.store.set_destination_retry_timings( - destination, failure_ts, retry_last_ts, retry_interval - ) - ) - self.get_success( - self.store.set_destination_last_successful_stream_ordering( - destination, last_successful_stream_ordering - ) + self._create_destination( + destination, + failure_ts, + retry_last_ts, + retry_interval, + last_successful_stream_ordering, ) # order by default (destination) @@ -413,11 +410,9 @@ def _search_test( _search_test(None, "foo") _search_test(None, "bar") - def test_get_single_destination(self) -> None: - """ - Get one specific destinations. - """ - self._create_destinations(5) + def test_get_single_destination_with_retry_timings(self) -> None: + """Get one specific destination which has retry timings.""" + self._create_destinations(1) channel = self.make_request( "GET", @@ -432,6 +427,53 @@ def test_get_single_destination(self) -> None: # convert channel.json_body into a List self._check_fields([channel.json_body]) + def test_get_single_destination_no_retry_timings(self) -> None: + """Get one specific destination which has no retry timings.""" + self._create_destination("sub0.example.com") + + channel = self.make_request( + "GET", + self.url + "/sub0.example.com", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual("sub0.example.com", channel.json_body["destination"]) + self.assertEqual(0, channel.json_body["retry_last_ts"]) + self.assertEqual(0, channel.json_body["retry_interval"]) + self.assertIsNone(channel.json_body["failure_ts"]) + self.assertIsNone(channel.json_body["last_successful_stream_ordering"]) + + def _create_destination( + self, + destination: str, + failure_ts: Optional[int] = None, + retry_last_ts: int = 0, + retry_interval: int = 0, + last_successful_stream_ordering: Optional[int] = None, + ) -> None: + """Create one specific destination + + Args: + destination: the destination we have successfully sent to + failure_ts: when the server started failing (ms since epoch) + retry_last_ts: time of last retry attempt in unix epoch ms + retry_interval: how long until next retry in ms + last_successful_stream_ordering: the stream_ordering of the most + recent successfully-sent PDU + """ + self.get_success( + self.store.set_destination_retry_timings( + destination, failure_ts, retry_last_ts, retry_interval + ) + ) + if last_successful_stream_ordering is not None: + self.get_success( + self.store.set_destination_last_successful_stream_ordering( + destination, last_successful_stream_ordering + ) + ) + def _create_destinations(self, number_destinations: int) -> None: """Create a number of destinations @@ -440,10 +482,7 @@ def _create_destinations(self, number_destinations: int) -> None: """ for i in range(0, number_destinations): dest = f"sub{i}.example.com" - self.get_success(self.store.set_destination_retry_timings(dest, 50, 50, 50)) - self.get_success( - self.store.set_destination_last_successful_stream_ordering(dest, 100) - ) + self._create_destination(dest, 50, 50, 50, 100) def _check_fields(self, content: List[JsonDict]) -> None: """Checks that the expected destination attributes are present in content From feb3e006d7b75d523018980b03a8a827a8440a86 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 5 Jan 2022 13:33:43 -0700 Subject: [PATCH 060/474] Fix space hierarchy endpoint to match MSC2946 (#11667) Fixes minor discrepancies between the /hierarchy endpoint described in MSC2946 and the implementation. Note that the changes impact the stable and unstable /hierarchy and unstable /spaces endpoints for both client and federation APIs. --- changelog.d/11667.bugfix | 1 + synapse/handlers/room_summary.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11667.bugfix diff --git a/changelog.d/11667.bugfix b/changelog.d/11667.bugfix new file mode 100644 index 000000000000..bf65fd4c8b27 --- /dev/null +++ b/changelog.d/11667.bugfix @@ -0,0 +1 @@ +Fix `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index b2cfe537dfb1..9ef88feb8ab7 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -209,7 +209,7 @@ async def get_space_summary( # Before returning to the client, remove the allowed_room_ids # and allowed_spaces keys. room.pop("allowed_room_ids", None) - room.pop("allowed_spaces", None) + room.pop("allowed_spaces", None) # historical rooms_result.append(room) events.extend(room_entry.children_state_events) @@ -988,12 +988,14 @@ async def _build_room_entry(self, room_id: str, for_federation: bool) -> JsonDic "canonical_alias": stats["canonical_alias"], "num_joined_members": stats["joined_members"], "avatar_url": stats["avatar"], + # plural join_rules is a documentation error but kept for historical + # purposes. Should match /publicRooms. "join_rules": stats["join_rules"], + "join_rule": stats["join_rules"], "world_readable": ( stats["history_visibility"] == HistoryVisibility.WORLD_READABLE ), "guest_can_join": stats["guest_access"] == "can_join", - "creation_ts": create_event.origin_server_ts, "room_type": create_event.content.get(EventContentFields.ROOM_TYPE), } From c9eb678b73838c2c90db1232c18b5cf658839609 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 6 Jan 2022 12:44:36 +0000 Subject: [PATCH 061/474] Remove debug logging for #4422 (#11693) as per https://github.com/matrix-org/synapse/pull/11532#discussion_r769123269 --- changelog.d/11693.misc | 1 + synapse/handlers/sync.py | 38 +------------------------------------- 2 files changed, 2 insertions(+), 37 deletions(-) create mode 100644 changelog.d/11693.misc diff --git a/changelog.d/11693.misc b/changelog.d/11693.misc new file mode 100644 index 000000000000..521a1796b8eb --- /dev/null +++ b/changelog.d/11693.misc @@ -0,0 +1 @@ +Remove debug logging for #4422, which has been closed since Synapse 0.99. \ No newline at end of file diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7baf3f199c51..4b3f1ea0595c 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -60,10 +60,6 @@ logger = logging.getLogger(__name__) -# Debug logger for https://github.com/matrix-org/synapse/issues/4422 -issue4422_logger = logging.getLogger("synapse.handler.sync.4422_debug") - - # Counts the number of times we returned a non-empty sync. `type` is one of # "initial_sync", "full_state_sync" or "incremental_sync", `lazy_loaded` is # "true" or "false" depending on if the request asked for lazy loaded members or @@ -1161,13 +1157,8 @@ async def generate_sync_result( num_events = 0 - # debug for https://github.com/matrix-org/synapse/issues/4422 + # debug for https://github.com/matrix-org/synapse/issues/9424 for joined_room in sync_result_builder.joined: - room_id = joined_room.room_id - if room_id in newly_joined_rooms: - issue4422_logger.debug( - "Sync result for newly joined room %s: %r", room_id, joined_room - ) num_events += len(joined_room.timeline.events) log_kv( @@ -1740,18 +1731,6 @@ async def _get_rooms_changed( old_mem_ev_id, allow_none=True ) - # debug for #4422 - if has_join: - prev_membership = None - if old_mem_ev: - prev_membership = old_mem_ev.membership - issue4422_logger.debug( - "Previous membership for room %s with join: %s (event %s)", - room_id, - prev_membership, - old_mem_ev_id, - ) - if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: newly_joined_rooms.append(room_id) @@ -1893,13 +1872,6 @@ async def _get_rooms_changed( upto_token=since_token, ) - if newly_joined: - # debugging for https://github.com/matrix-org/synapse/issues/4422 - issue4422_logger.debug( - "RoomSyncResultBuilder events for newly joined room %s: %r", - room_id, - entry.events, - ) room_entries.append(entry) return _RoomChanges( @@ -2077,14 +2049,6 @@ async def _generate_room_entry( # `_load_filtered_recents` can't find any events the user should see # (e.g. due to having ignored the sender of the last 50 events). - if newly_joined: - # debug for https://github.com/matrix-org/synapse/issues/4422 - issue4422_logger.debug( - "Timeline events after filtering in newly-joined room %s: %r", - room_id, - batch, - ) - # When we join the room (or the client requests full_state), we should # send down any existing tags. Usually the user won't have tags in a # newly joined room, unless either a) they've joined before or b) the From 2ef1fea8d2021e7a42259d21b57b32e9a7f04f1a Mon Sep 17 00:00:00 2001 From: lukasdenk <63459921+lukasdenk@users.noreply.github.com> Date: Thu, 6 Jan 2022 14:16:42 +0100 Subject: [PATCH 062/474] Make room creations denied by `user_may_create_room` cause an `M_FORBIDDEN` error to be returned, not `M_UNKNOWN` (#11672) Co-authored-by: reivilibre --- changelog.d/11672.feature | 1 + synapse/handlers/room.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11672.feature diff --git a/changelog.d/11672.feature b/changelog.d/11672.feature new file mode 100644 index 000000000000..ce8b3e9547f1 --- /dev/null +++ b/changelog.d/11672.feature @@ -0,0 +1 @@ +Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b9c1cbffa5c5..3d3a0f6ac30b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -393,7 +393,9 @@ async def clone_existing_room( user_id = requester.user.to_string() if not await self.spam_checker.user_may_create_room(user_id): - raise SynapseError(403, "You are not permitted to create rooms") + raise SynapseError( + 403, "You are not permitted to create rooms", Codes.FORBIDDEN + ) creation_content: JsonDict = { "room_version": new_room_version.identifier, @@ -685,7 +687,9 @@ async def create_room( invite_3pid_list, ) ): - raise SynapseError(403, "You are not permitted to create rooms") + raise SynapseError( + 403, "You are not permitted to create rooms", Codes.FORBIDDEN + ) if ratelimit: await self.request_ratelimiter.ratelimit(requester) From eec34b1f2a53af45807cb718a26861be3f2fd43c Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 6 Jan 2022 16:36:26 +0000 Subject: [PATCH 063/474] Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. (#11696) --- changelog.d/11696.misc | 1 + synapse/util/__init__.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/11696.misc diff --git a/changelog.d/11696.misc b/changelog.d/11696.misc new file mode 100644 index 000000000000..e8f39dde18f8 --- /dev/null +++ b/changelog.d/11696.misc @@ -0,0 +1 @@ +Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. \ No newline at end of file diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f157132210dd..511f52534b3c 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -31,6 +31,13 @@ if typing.TYPE_CHECKING: pass +# FIXME Mjolnir imports glob_to_regex from this file, but it was moved to +# matrix_common. +# As a temporary workaround, we import glob_to_regex here for +# compatibility with current versions of Mjolnir. +# See https://github.com/matrix-org/mjolnir/pull/174 +from matrix_common.regex import glob_to_regex # noqa + logger = logging.getLogger(__name__) From 70ce9aea7183890cca435e2c4b462e24221d1d2e Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 6 Jan 2022 09:09:30 -0800 Subject: [PATCH 064/474] Strip unauthorized fields from `unsigned` object in events received over federation (#11530) * add some tests to verify we are stripping unauthorized fields out of unsigned * add function to strip unauthorized fields from the unsigned object of event * newsfragment * update newsfragment number * add check to on_send_membership_event * refactor tests * fix lint error * slightly refactor tests and add some comments * slight refactor * refactor tests * fix import error * slight refactor * remove unsigned filtration code from synapse/handlers/federation_event.py * lint * move unsigned filtering code to event base * refactor tests * update newsfragment * requested changes * remove unused retun values --- changelog.d/11530.bugfix | 2 + synapse/federation/federation_base.py | 25 ++++++++++ tests/test_federation.py | 72 +++++++++++++++++++++++++++ 3 files changed, 99 insertions(+) create mode 100644 changelog.d/11530.bugfix diff --git a/changelog.d/11530.bugfix b/changelog.d/11530.bugfix new file mode 100644 index 000000000000..7ea9ba4e4988 --- /dev/null +++ b/changelog.d/11530.bugfix @@ -0,0 +1,2 @@ +Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events +received over federation. \ No newline at end of file diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index addc0bf00045..896168c05c0a 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -230,6 +230,10 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB # origin, etc etc) assert_params_in_dict(pdu_json, ("type", "depth")) + # Strip any unauthorized values from "unsigned" if they exist + if "unsigned" in pdu_json: + _strip_unsigned_values(pdu_json) + depth = pdu_json["depth"] if not isinstance(depth, int): raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON) @@ -245,3 +249,24 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB event = make_event_from_dict(pdu_json, room_version) return event + + +def _strip_unsigned_values(pdu_dict: JsonDict) -> None: + """ + Strip any unsigned values unless specifically allowed, as defined by the whitelist. + + pdu: the json dict to strip values from. Note that the dict is mutated by this + function + """ + unsigned = pdu_dict["unsigned"] + + if not isinstance(unsigned, dict): + pdu_dict["unsigned"] = {} + + if pdu_dict["type"] == "m.room.member": + whitelist = ["knock_room_state", "invite_room_state", "age"] + else: + whitelist = ["age"] + + filtered_unsigned = {k: v for k, v in unsigned.items() if k in whitelist} + pdu_dict["unsigned"] = filtered_unsigned diff --git a/tests/test_federation.py b/tests/test_federation.py index 3eef1c4c05c8..2b9804aba005 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -17,7 +17,9 @@ from twisted.internet.defer import succeed from synapse.api.errors import FederationError +from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict +from synapse.federation.federation_base import event_from_pdu_json from synapse.logging.context import LoggingContext from synapse.types import UserID, create_requester from synapse.util import Clock @@ -276,3 +278,73 @@ def test_cross_signing_keys_retry(self): "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), ) self.assertTrue(remote_self_signing_key in self_signing_key["keys"].values()) + + +class StripUnsignedFromEventsTestCase(unittest.TestCase): + def test_strip_unauthorized_unsigned_values(self): + event1 = { + "sender": "@baduser:test.serv", + "state_key": "@baduser:test.serv", + "event_id": "$event1:test.serv", + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.member", + "origin": "test.servx", + "content": {"membership": "join"}, + "auth_events": [], + "unsigned": {"malicious garbage": "hackz", "more warez": "more hackz"}, + } + filtered_event = event_from_pdu_json(event1, RoomVersions.V1) + # Make sure unauthorized fields are stripped from unsigned + self.assertNotIn("more warez", filtered_event.unsigned) + + def test_strip_event_maintains_allowed_fields(self): + event2 = { + "sender": "@baduser:test.serv", + "state_key": "@baduser:test.serv", + "event_id": "$event2:test.serv", + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.member", + "origin": "test.servx", + "auth_events": [], + "content": {"membership": "join"}, + "unsigned": { + "malicious garbage": "hackz", + "more warez": "more hackz", + "age": 14, + "invite_room_state": [], + }, + } + + filtered_event2 = event_from_pdu_json(event2, RoomVersions.V1) + self.assertIn("age", filtered_event2.unsigned) + self.assertEqual(14, filtered_event2.unsigned["age"]) + self.assertNotIn("more warez", filtered_event2.unsigned) + # Invite_room_state is allowed in events of type m.room.member + self.assertIn("invite_room_state", filtered_event2.unsigned) + self.assertEqual([], filtered_event2.unsigned["invite_room_state"]) + + def test_strip_event_removes_fields_based_on_event_type(self): + event3 = { + "sender": "@baduser:test.serv", + "state_key": "@baduser:test.serv", + "event_id": "$event3:test.serv", + "depth": 1000, + "origin_server_ts": 1, + "type": "m.room.power_levels", + "origin": "test.servx", + "content": {}, + "auth_events": [], + "unsigned": { + "malicious garbage": "hackz", + "more warez": "more hackz", + "age": 14, + "invite_room_state": [], + }, + } + filtered_event3 = event_from_pdu_json(event3, RoomVersions.V1) + self.assertIn("age", filtered_event3.unsigned) + # Invite_room_state field is only permitted in event type m.room.member + self.assertNotIn("invite_room_state", filtered_event3.unsigned) + self.assertNotIn("more warez", filtered_event3.unsigned) From e87540abb1647b513f6385edccc5164b20f2a6ab Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Thu, 6 Jan 2022 17:54:21 +0000 Subject: [PATCH 065/474] Re-run Towncrier to add in the changelog entry for the Mjolnir workaround --- CHANGES.md | 1 + changelog.d/11696.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/11696.misc diff --git a/CHANGES.md b/CHANGES.md index 77a56dd48102..0ba0cb4eca65 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -84,6 +84,7 @@ Internal Changes - Convert `namedtuples` to `attrs`. ([\#11665](https://github.com/matrix-org/synapse/issues/11665), [\#11574](https://github.com/matrix-org/synapse/issues/11574)) - Update the `/capabilities` response to include whether support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) is available. ([\#11690](https://github.com/matrix-org/synapse/issues/11690)) - Send the `Accept` header in HTTP requests made using `SimpleHttpClient.get_json`. ([\#11677](https://github.com/matrix-org/synapse/issues/11677)) +- Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. ([\#11696](https://github.com/matrix-org/synapse/issues/11696)) Synapse 1.49.2 (2021-12-21) diff --git a/changelog.d/11696.misc b/changelog.d/11696.misc deleted file mode 100644 index e8f39dde18f8..000000000000 --- a/changelog.d/11696.misc +++ /dev/null @@ -1 +0,0 @@ -Work around Mjolnir compatibility issue by adding an import for `glob_to_regex` in `synapse.util`, where it moved from. \ No newline at end of file From 201c48c8de35547c5e13b28a2616a8b7f880bad6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 6 Jan 2022 13:08:48 -0500 Subject: [PATCH 066/474] Remove a Python 2-ism and improve type hints. (#11699) On Python 2, indexing a byte-string gives back a byte-string, while on Python 3 it gives back the ASCII equivalent as an int. --- changelog.d/11699.misc | 1 + synapse/types.py | 19 +++++++------------ 2 files changed, 8 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11699.misc diff --git a/changelog.d/11699.misc b/changelog.d/11699.misc new file mode 100644 index 000000000000..ffae5f296061 --- /dev/null +++ b/changelog.d/11699.misc @@ -0,0 +1 @@ +Remove fallback code for Python 2. diff --git a/synapse/types.py b/synapse/types.py index 42aeaf6270e1..74a2c5185705 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -21,6 +21,7 @@ ClassVar, Dict, Mapping, + Match, MutableMapping, Optional, Tuple, @@ -380,7 +381,7 @@ def map_username_to_mxid_localpart( onto different mxids Returns: - unicode: string suitable for a mxid localpart + string suitable for a mxid localpart """ if not isinstance(username, bytes): username = username.encode("utf-8") @@ -388,29 +389,23 @@ def map_username_to_mxid_localpart( # first we sort out upper-case characters if case_sensitive: - def f1(m): + def f1(m: Match[bytes]) -> bytes: return b"_" + m.group().lower() username = UPPER_CASE_PATTERN.sub(f1, username) else: username = username.lower() - # then we sort out non-ascii characters - def f2(m): - g = m.group()[0] - if isinstance(g, str): - # on python 2, we need to do a ord(). On python 3, the - # byte itself will do. - g = ord(g) - return b"=%02x" % (g,) + # then we sort out non-ascii characters by converting to the hex equivalent. + def f2(m: Match[bytes]) -> bytes: + return b"=%02x" % (m.group()[0],) username = NON_MXID_CHARACTER_PATTERN.sub(f2, username) # we also do the =-escaping to mxids starting with an underscore. username = re.sub(b"^_", b"=5f", username) - # we should now only have ascii bytes left, so can decode back to a - # unicode. + # we should now only have ascii bytes left, so can decode back to a string. return username.decode("ascii") From 6c68e874b1eafe75db51e46064c0d3af702b4358 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 6 Jan 2022 14:00:34 -0500 Subject: [PATCH 067/474] Remove the /send_relation endpoint. (#11682) This was removed from MSC2674 before that was approved and is not used by any known clients. --- changelog.d/11682.removal | 1 + synapse/rest/client/relations.py | 125 ++-------------------------- tests/rest/client/test_relations.py | 26 +++--- 3 files changed, 19 insertions(+), 133 deletions(-) create mode 100644 changelog.d/11682.removal diff --git a/changelog.d/11682.removal b/changelog.d/11682.removal new file mode 100644 index 000000000000..50bdf35b2003 --- /dev/null +++ b/changelog.d/11682.removal @@ -0,0 +1 @@ +Remove the unstable `/send_relation` endpoint. diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 5815650ee6cb..3823498012d7 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -19,28 +19,20 @@ """ import logging -from typing import TYPE_CHECKING, Awaitable, Optional, Tuple +from typing import TYPE_CHECKING, Optional, Tuple -from synapse.api.constants import EventTypes, RelationTypes -from synapse.api.errors import ShadowBanError, SynapseError +from synapse.api.constants import RelationTypes +from synapse.api.errors import SynapseError from synapse.http.server import HttpServer -from synapse.http.servlet import ( - RestServlet, - parse_integer, - parse_json_object_from_request, - parse_string, -) +from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.rest.client.transactions import HttpTransactionCache +from synapse.rest.client._base import client_patterns from synapse.storage.relations import ( AggregationPaginationToken, PaginationChunk, RelationPaginationToken, ) from synapse.types import JsonDict -from synapse.util.stringutils import random_string - -from ._base import client_patterns if TYPE_CHECKING: from synapse.server import HomeServer @@ -48,112 +40,6 @@ logger = logging.getLogger(__name__) -class RelationSendServlet(RestServlet): - """Helper API for sending events that have relation data. - - Example API shape to send a 👍 reaction to a room: - - POST /rooms/!foo/send_relation/$bar/m.annotation/m.reaction?key=%F0%9F%91%8D - {} - - { - "event_id": "$foobar" - } - """ - - PATTERN = ( - "/rooms/(?P[^/]*)/send_relation" - "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)" - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.auth = hs.get_auth() - self.event_creation_handler = hs.get_event_creation_handler() - self.txns = HttpTransactionCache(hs) - - def register(self, http_server: HttpServer) -> None: - http_server.register_paths( - "POST", - client_patterns(self.PATTERN + "$", releases=()), - self.on_PUT_or_POST, - self.__class__.__name__, - ) - http_server.register_paths( - "PUT", - client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), - self.on_PUT, - self.__class__.__name__, - ) - - def on_PUT( - self, - request: SynapseRequest, - room_id: str, - parent_id: str, - relation_type: str, - event_type: str, - txn_id: Optional[str] = None, - ) -> Awaitable[Tuple[int, JsonDict]]: - return self.txns.fetch_or_execute_request( - request, - self.on_PUT_or_POST, - request, - room_id, - parent_id, - relation_type, - event_type, - txn_id, - ) - - async def on_PUT_or_POST( - self, - request: SynapseRequest, - room_id: str, - parent_id: str, - relation_type: str, - event_type: str, - txn_id: Optional[str] = None, - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - - if event_type == EventTypes.Member: - # Add relations to a membership is meaningless, so we just deny it - # at the CS API rather than trying to handle it correctly. - raise SynapseError(400, "Cannot send member events with relations") - - content = parse_json_object_from_request(request) - - aggregation_key = parse_string(request, "key", encoding="utf-8") - - content["m.relates_to"] = { - "event_id": parent_id, - "rel_type": relation_type, - } - if aggregation_key is not None: - content["m.relates_to"]["key"] = aggregation_key - - event_dict = { - "type": event_type, - "content": content, - "room_id": room_id, - "sender": requester.user.to_string(), - } - - try: - ( - event, - _, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, event_dict=event_dict, txn_id=txn_id - ) - event_id = event.event_id - except ShadowBanError: - event_id = "$" + random_string(43) - - return 200, {"event_id": event_id} - - class RelationPaginationServlet(RestServlet): """API to paginate relations on an event by topological ordering, optionally filtered by relation type and event type. @@ -431,7 +317,6 @@ async def on_GET( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - RelationSendServlet(hs).register(http_server) RelationPaginationServlet(hs).register(http_server) RelationAggregationPaginationServlet(hs).register(http_server) RelationAggregationGroupPaginationServlet(hs).register(http_server) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c026d526ef7b..ff4e81d0695f 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -93,11 +93,6 @@ def test_send_relation(self): channel.json_body, ) - def test_deny_membership(self): - """Test that we deny relations on membership events""" - channel = self._send_relation(RelationTypes.ANNOTATION, EventTypes.Member) - self.assertEquals(400, channel.code, channel.json_body) - def test_deny_invalid_event(self): """Test that we deny relations on non-existant events""" channel = self._send_relation( @@ -1119,7 +1114,8 @@ def _send_relation( relation_type: One of `RelationTypes` event_type: The type of the event to create key: The aggregation key used for m.annotation relation type. - content: The content of the created event. + content: The content of the created event. Will be modified to configure + the m.relates_to key based on the other provided parameters. access_token: The access token used to send the relation, defaults to `self.user_token` parent_id: The event_id this relation relates to. If None, then self.parent_id @@ -1130,17 +1126,21 @@ def _send_relation( if not access_token: access_token = self.user_token - query = "" - if key: - query = "?key=" + urllib.parse.quote_plus(key.encode("utf-8")) - original_id = parent_id if parent_id else self.parent_id + if content is None: + content = {} + content["m.relates_to"] = { + "event_id": original_id, + "rel_type": relation_type, + } + if key is not None: + content["m.relates_to"]["key"] = key + channel = self.make_request( "POST", - "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" - % (self.room, original_id, relation_type, event_type, query), - content or {}, + f"/_matrix/client/v3/rooms/{self.room}/send/{event_type}", + content, access_token=access_token, ) return channel From 7fe7c454380a39a295aa77e75ab460975e9eb6a9 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 7 Jan 2022 12:51:20 +0000 Subject: [PATCH 068/474] Move changelog entry for drop of support to 'Deprecations and Removals' --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0ba0cb4eca65..f91109f88570 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -42,6 +42,7 @@ Deprecations and Removals ------------------------- - Replace `mock` package by its standard library version. ([\#11588](https://github.com/matrix-org/synapse/issues/11588)) +- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633)) Internal Changes @@ -77,7 +78,6 @@ Internal Changes - Improve OpenTracing support for requests which use a `ResponseCache`. ([\#11607](https://github.com/matrix-org/synapse/issues/11607)) - Improve OpenTracing support for incoming HTTP requests. ([\#11618](https://github.com/matrix-org/synapse/issues/11618)) - A number of improvements to opentracing support. ([\#11619](https://github.com/matrix-org/synapse/issues/11619)) -- Drop support for Python 3.6 and Ubuntu 18.04. ([\#11633](https://github.com/matrix-org/synapse/issues/11633)) - Refactor the way that the `outlier` flag is set on events received over federation. ([\#11634](https://github.com/matrix-org/synapse/issues/11634)) - Improve the error messages from `get_create_event_for_room`. ([\#11638](https://github.com/matrix-org/synapse/issues/11638)) - Remove redundant `get_current_events_token` method. ([\#11643](https://github.com/matrix-org/synapse/issues/11643)) From 6bf81a7a61d8d5248be5def955104c44fcb78dae Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 7 Jan 2022 09:10:46 -0500 Subject: [PATCH 069/474] Bundle aggregations outside of the serialization method. (#11612) This makes the serialization of events synchronous (and it no longer access the database), but we must manually calculate and provide the bundled aggregations. Overall this should cause no change in behavior, but is prep work for other improvements. --- changelog.d/11612.misc | 1 + synapse/events/utils.py | 126 ++++++------------- synapse/handlers/events.py | 2 +- synapse/handlers/initial_sync.py | 16 ++- synapse/handlers/message.py | 2 +- synapse/handlers/pagination.py | 8 +- synapse/handlers/room.py | 10 ++ synapse/handlers/search.py | 10 +- synapse/rest/admin/rooms.py | 16 +-- synapse/rest/client/events.py | 2 +- synapse/rest/client/notifications.py | 2 +- synapse/rest/client/relations.py | 11 +- synapse/rest/client/room.py | 28 +++-- synapse/rest/client/sync.py | 39 +++--- synapse/server.py | 2 +- synapse/storage/databases/main/relations.py | 128 +++++++++++++++++++- tests/rest/client/test_retention.py | 2 +- 17 files changed, 249 insertions(+), 156 deletions(-) create mode 100644 changelog.d/11612.misc diff --git a/changelog.d/11612.misc b/changelog.d/11612.misc new file mode 100644 index 000000000000..2d886169c547 --- /dev/null +++ b/changelog.d/11612.misc @@ -0,0 +1 @@ +Avoid database access in the JSON serialization process. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 2038e72924a9..de0e0c17312a 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -14,17 +14,7 @@ # limitations under the License. import collections.abc import re -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - List, - Mapping, - Optional, - Union, -) +from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Union from frozendict import frozendict @@ -32,14 +22,10 @@ from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersion from synapse.types import JsonDict -from synapse.util.async_helpers import yieldable_gather_results from synapse.util.frozenutils import unfreeze from . import EventBase -if TYPE_CHECKING: - from synapse.server import HomeServer - # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # (? JsonDict: """Serializes a single event. @@ -418,66 +399,41 @@ async def serialize_event( serialized_event = serialize_event(event, time_now, **kwargs) # Check if there are any bundled aggregations to include with the event. - # - # Do not bundle aggregations if any of the following at true: - # - # * Support is disabled via the configuration or the caller. - # * The event is a state event. - # * The event has been redacted. - if ( - self._msc1849_enabled - and bundle_aggregations - and not event.is_state() - and not event.internal_metadata.is_redacted() - ): - await self._injected_bundled_aggregations(event, time_now, serialized_event) + if bundle_aggregations: + event_aggregations = bundle_aggregations.get(event.event_id) + if event_aggregations: + self._injected_bundled_aggregations( + event, + time_now, + bundle_aggregations[event.event_id], + serialized_event, + ) return serialized_event - async def _injected_bundled_aggregations( - self, event: EventBase, time_now: int, serialized_event: JsonDict + def _injected_bundled_aggregations( + self, + event: EventBase, + time_now: int, + aggregations: JsonDict, + serialized_event: JsonDict, ) -> None: """Potentially injects bundled aggregations into the unsigned portion of the serialized event. Args: event: The event being serialized. time_now: The current time in milliseconds + aggregations: The bundled aggregation to serialize. serialized_event: The serialized event which may be modified. """ - # Do not bundle aggregations for an event which represents an edit or an - # annotation. It does not make sense for them to have related events. - relates_to = event.content.get("m.relates_to") - if isinstance(relates_to, (dict, frozendict)): - relation_type = relates_to.get("rel_type") - if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): - return - - event_id = event.event_id - room_id = event.room_id - - # The bundled aggregations to include. - aggregations = {} - - annotations = await self.store.get_aggregation_groups_for_event( - event_id, room_id - ) - if annotations.chunk: - aggregations[RelationTypes.ANNOTATION] = annotations.to_dict() + # Make a copy in-case the object is cached. + aggregations = aggregations.copy() - references = await self.store.get_relations_for_event( - event_id, room_id, RelationTypes.REFERENCE, direction="f" - ) - if references.chunk: - aggregations[RelationTypes.REFERENCE] = references.to_dict() - - edit = None - if event.type == EventTypes.Message: - edit = await self.store.get_applicable_edit(event_id, room_id) - - if edit: + if RelationTypes.REPLACE in aggregations: # If there is an edit replace the content, preserving existing # relations. + edit = aggregations[RelationTypes.REPLACE] # Ensure we take copies of the edit content, otherwise we risk modifying # the original event. @@ -502,27 +458,19 @@ async def _injected_bundled_aggregations( } # If this event is the start of a thread, include a summary of the replies. - if self._msc3440_enabled: - ( - thread_count, - latest_thread_event, - ) = await self.store.get_thread_summary(event_id, room_id) - if latest_thread_event: - aggregations[RelationTypes.THREAD] = { - # Don't bundle aggregations as this could recurse forever. - "latest_event": await self.serialize_event( - latest_thread_event, time_now, bundle_aggregations=False - ), - "count": thread_count, - } - - # If any bundled aggregations were found, include them. - if aggregations: - serialized_event["unsigned"].setdefault("m.relations", {}).update( - aggregations + if RelationTypes.THREAD in aggregations: + # Serialize the latest thread event. + latest_thread_event = aggregations[RelationTypes.THREAD]["latest_event"] + + # Don't bundle aggregations as this could recurse forever. + aggregations[RelationTypes.THREAD]["latest_event"] = self.serialize_event( + latest_thread_event, time_now, bundle_aggregations=None ) - async def serialize_events( + # Include the bundled aggregations in the event. + serialized_event["unsigned"].setdefault("m.relations", {}).update(aggregations) + + def serialize_events( self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any ) -> List[JsonDict]: """Serializes multiple events. @@ -535,9 +483,9 @@ async def serialize_events( Returns: The list of serialized events """ - return await yieldable_gather_results( - self.serialize_event, events, time_now=time_now, **kwargs - ) + return [ + self.serialize_event(event, time_now=time_now, **kwargs) for event in events + ] def copy_power_levels_contents( diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 1b996c420d20..a3add8a58679 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -119,7 +119,7 @@ async def get_stream( events.extend(to_add) - chunks = await self._event_serializer.serialize_events( + chunks = self._event_serializer.serialize_events( events, time_now, as_client_event=as_client_event, diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 601bab67f9cc..346a06ff49b7 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -170,7 +170,7 @@ async def handle_room(event: RoomsForUser) -> None: d["inviter"] = event.sender invite_event = await self.store.get_event(event.event_id) - d["invite"] = await self._event_serializer.serialize_event( + d["invite"] = self._event_serializer.serialize_event( invite_event, time_now, as_client_event=as_client_event, @@ -222,7 +222,7 @@ async def handle_room(event: RoomsForUser) -> None: d["messages"] = { "chunk": ( - await self._event_serializer.serialize_events( + self._event_serializer.serialize_events( messages, time_now=time_now, as_client_event=as_client_event, @@ -232,7 +232,7 @@ async def handle_room(event: RoomsForUser) -> None: "end": await end_token.to_string(self.store), } - d["state"] = await self._event_serializer.serialize_events( + d["state"] = self._event_serializer.serialize_events( current_state.values(), time_now=time_now, as_client_event=as_client_event, @@ -376,16 +376,14 @@ async def _room_initial_sync_parted( "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - await self._event_serializer.serialize_events(messages, time_now) + self._event_serializer.serialize_events(messages, time_now) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), }, "state": ( # Don't bundle aggregations as this is a deprecated API. - await self._event_serializer.serialize_events( - room_state.values(), time_now - ) + self._event_serializer.serialize_events(room_state.values(), time_now) ), "presence": [], "receipts": [], @@ -404,7 +402,7 @@ async def _room_initial_sync_joined( # TODO: These concurrently time_now = self.clock.time_msec() # Don't bundle aggregations as this is a deprecated API. - state = await self._event_serializer.serialize_events( + state = self._event_serializer.serialize_events( current_state.values(), time_now ) @@ -480,7 +478,7 @@ async def get_receipts() -> List[JsonDict]: "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - await self._event_serializer.serialize_events(messages, time_now) + self._event_serializer.serialize_events(messages, time_now) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5e3d3886eb1d..b37250aa3895 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -246,7 +246,7 @@ async def get_state_events( room_state = room_state_events[membership_event_id] now = self.clock.time_msec() - events = await self._event_serializer.serialize_events(room_state.values(), now) + events = self._event_serializer.serialize_events(room_state.values(), now) return events async def get_joined_members(self, requester: Requester, room_id: str) -> dict: diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 7469cc55a251..472688f04506 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -537,14 +537,16 @@ async def get_messages( state_dict = await self.store.get_events(list(state_ids.values())) state = state_dict.values() + aggregations = await self.store.get_bundled_aggregations(events) + time_now = self.clock.time_msec() chunk = { "chunk": ( - await self._event_serializer.serialize_events( + self._event_serializer.serialize_events( events, time_now, - bundle_aggregations=True, + bundle_aggregations=aggregations, as_client_event=as_client_event, ) ), @@ -553,7 +555,7 @@ async def get_messages( } if state: - chunk["state"] = await self._event_serializer.serialize_events( + chunk["state"] = self._event_serializer.serialize_events( state, time_now, as_client_event=as_client_event ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3d3a0f6ac30b..3d47163f25cf 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1181,6 +1181,16 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: # `filtered` rather than the event we retrieved from the datastore. results["event"] = filtered[0] + # Fetch the aggregations. + aggregations = await self.store.get_bundled_aggregations([results["event"]]) + aggregations.update( + await self.store.get_bundled_aggregations(results["events_before"]) + ) + aggregations.update( + await self.store.get_bundled_aggregations(results["events_after"]) + ) + results["aggregations"] = aggregations + if results["events_after"]: last_event_id = results["events_after"][-1].event_id else: diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index ab7eaab2fb56..0b153a682261 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -420,10 +420,10 @@ async def search( time_now = self.clock.time_msec() for context in contexts.values(): - context["events_before"] = await self._event_serializer.serialize_events( + context["events_before"] = self._event_serializer.serialize_events( context["events_before"], time_now ) - context["events_after"] = await self._event_serializer.serialize_events( + context["events_after"] = self._event_serializer.serialize_events( context["events_after"], time_now ) @@ -441,9 +441,7 @@ async def search( results.append( { "rank": rank_map[e.event_id], - "result": ( - await self._event_serializer.serialize_event(e, time_now) - ), + "result": self._event_serializer.serialize_event(e, time_now), "context": contexts.get(e.event_id, {}), } ) @@ -457,7 +455,7 @@ async def search( if state_results: s = {} for room_id, state_events in state_results.items(): - s[room_id] = await self._event_serializer.serialize_events( + s[room_id] = self._event_serializer.serialize_events( state_events, time_now ) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 6030373ebcbb..2e714ac87bfe 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -424,7 +424,7 @@ async def on_GET( event_ids = await self.store.get_current_state_ids(room_id) events = await self.store.get_events(event_ids.values()) now = self.clock.time_msec() - room_state = await self._event_serializer.serialize_events(events.values(), now) + room_state = self._event_serializer.serialize_events(events.values(), now) ret = {"state": room_state} return HTTPStatus.OK, ret @@ -744,22 +744,22 @@ async def on_GET( ) time_now = self.clock.time_msec() - results["events_before"] = await self._event_serializer.serialize_events( + results["events_before"] = self._event_serializer.serialize_events( results["events_before"], time_now, - bundle_aggregations=True, + bundle_aggregations=results["aggregations"], ) - results["event"] = await self._event_serializer.serialize_event( + results["event"] = self._event_serializer.serialize_event( results["event"], time_now, - bundle_aggregations=True, + bundle_aggregations=results["aggregations"], ) - results["events_after"] = await self._event_serializer.serialize_events( + results["events_after"] = self._event_serializer.serialize_events( results["events_after"], time_now, - bundle_aggregations=True, + bundle_aggregations=results["aggregations"], ) - results["state"] = await self._event_serializer.serialize_events( + results["state"] = self._event_serializer.serialize_events( results["state"], time_now ) diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 13b72a045a4a..672c821061ff 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -91,7 +91,7 @@ async def on_GET( time_now = self.clock.time_msec() if event: - result = await self._event_serializer.serialize_event(event, time_now) + result = self._event_serializer.serialize_event(event, time_now) return 200, result else: return 404, "Event not found." diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index acd0c9e135d1..8e427a96a320 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -72,7 +72,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "actions": pa.actions, "ts": pa.received_ts, "event": ( - await self._event_serializer.serialize_event( + self._event_serializer.serialize_event( notif_events[pa.event_id], self.clock.time_msec(), event_format=format_event_for_client_v2_without_room_id, diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 3823498012d7..37d949a71e74 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -113,13 +113,14 @@ async def on_GET( now = self.clock.time_msec() # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. - original_event = await self._event_serializer.serialize_event( - event, now, bundle_aggregations=False + original_event = self._event_serializer.serialize_event( + event, now, bundle_aggregations=None ) # The relations returned for the requested event do include their # bundled aggregations. - serialized_events = await self._event_serializer.serialize_events( - events, now, bundle_aggregations=True + aggregations = await self.store.get_bundled_aggregations(events) + serialized_events = self._event_serializer.serialize_events( + events, now, bundle_aggregations=aggregations ) return_value = pagination_chunk.to_dict() @@ -308,7 +309,7 @@ async def on_GET( ) now = self.clock.time_msec() - serialized_events = await self._event_serializer.serialize_events(events, now) + serialized_events = self._event_serializer.serialize_events(events, now) return_value = result.to_dict() return_value["chunk"] = serialized_events diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 40330749e546..da6014900a9c 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -642,6 +642,7 @@ class RoomEventServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.clock = hs.get_clock() + self._store = hs.get_datastore() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() self.auth = hs.get_auth() @@ -660,10 +661,13 @@ async def on_GET( # https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-event-eventid raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) - time_now = self.clock.time_msec() if event: - event_dict = await self._event_serializer.serialize_event( - event, time_now, bundle_aggregations=True + # Ensure there are bundled aggregations available. + aggregations = await self._store.get_bundled_aggregations([event]) + + time_now = self.clock.time_msec() + event_dict = self._event_serializer.serialize_event( + event, time_now, bundle_aggregations=aggregations ) return 200, event_dict @@ -708,16 +712,20 @@ async def on_GET( raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() - results["events_before"] = await self._event_serializer.serialize_events( - results["events_before"], time_now, bundle_aggregations=True + results["events_before"] = self._event_serializer.serialize_events( + results["events_before"], + time_now, + bundle_aggregations=results["aggregations"], ) - results["event"] = await self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=True + results["event"] = self._event_serializer.serialize_event( + results["event"], time_now, bundle_aggregations=results["aggregations"] ) - results["events_after"] = await self._event_serializer.serialize_events( - results["events_after"], time_now, bundle_aggregations=True + results["events_after"] = self._event_serializer.serialize_events( + results["events_after"], + time_now, + bundle_aggregations=results["aggregations"], ) - results["state"] = await self._event_serializer.serialize_events( + results["state"] = self._event_serializer.serialize_events( results["state"], time_now ) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index e99a943d0d4b..a3e57e4b20ab 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -17,7 +17,6 @@ from typing import ( TYPE_CHECKING, Any, - Awaitable, Callable, Dict, Iterable, @@ -395,7 +394,7 @@ async def encode_invited( """ invited = {} for room in rooms: - invite = await self._event_serializer.serialize_event( + invite = self._event_serializer.serialize_event( room.invite, time_now, token_id=token_id, @@ -432,7 +431,7 @@ async def encode_knocked( """ knocked = {} for room in rooms: - knock = await self._event_serializer.serialize_event( + knock = self._event_serializer.serialize_event( room.knock, time_now, token_id=token_id, @@ -525,21 +524,14 @@ async def encode_room( The room, encoded in our response format """ - def serialize(events: Iterable[EventBase]) -> Awaitable[List[JsonDict]]: + def serialize( + events: Iterable[EventBase], + aggregations: Optional[Dict[str, Dict[str, Any]]] = None, + ) -> List[JsonDict]: return self._event_serializer.serialize_events( events, time_now=time_now, - # Don't bother to bundle aggregations if the timeline is unlimited, - # as clients will have all the necessary information. - # bundle_aggregations=room.timeline.limited, - # - # richvdh 2021-12-15: disable this temporarily as it has too high an - # overhead for initialsyncs. We need to figure out a way that the - # bundling can be done *before* the events are stored in the - # SyncResponseCache so that this part can be synchronous. - # - # Ensure to re-enable the test at tests/rest/client/test_relations.py::RelationsTestCase.test_bundled_aggregations. - bundle_aggregations=False, + bundle_aggregations=aggregations, token_id=token_id, event_format=event_formatter, only_event_fields=only_fields, @@ -561,8 +553,21 @@ def serialize(events: Iterable[EventBase]) -> Awaitable[List[JsonDict]]: event.room_id, ) - serialized_state = await serialize(state_events) - serialized_timeline = await serialize(timeline_events) + serialized_state = serialize(state_events) + # Don't bother to bundle aggregations if the timeline is unlimited, + # as clients will have all the necessary information. + # bundle_aggregations=room.timeline.limited, + # + # richvdh 2021-12-15: disable this temporarily as it has too high an + # overhead for initialsyncs. We need to figure out a way that the + # bundling can be done *before* the events are stored in the + # SyncResponseCache so that this part can be synchronous. + # + # Ensure to re-enable the test at tests/rest/client/test_relations.py::RelationsTestCase.test_bundled_aggregations. + # if room.timeline.limited: + # aggregations = await self.store.get_bundled_aggregations(timeline_events) + aggregations = None + serialized_timeline = serialize(timeline_events, aggregations) account_data = room.account_data diff --git a/synapse/server.py b/synapse/server.py index 185e40e4da0f..3032f0b738a8 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -759,7 +759,7 @@ def get_oidc_handler(self) -> "OidcHandler": @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer(self) + return EventClientSerializer() @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 4ff6aed253a3..c6c4bd18da3e 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -13,14 +13,30 @@ # limitations under the License. import logging -from typing import List, Optional, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, + cast, +) import attr +from frozendict import frozendict -from synapse.api.constants import RelationTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.events import EventBase from synapse.storage._base import SQLBaseStore -from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, + make_in_list_sql_clause, +) from synapse.storage.databases.main.stream import generate_pagination_where_clause from synapse.storage.relations import ( AggregationPaginationToken, @@ -29,10 +45,24 @@ ) from synapse.util.caches.descriptors import cached +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class RelationsWorkerStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self._msc1849_enabled = hs.config.experimental.msc1849_enabled + self._msc3440_enabled = hs.config.experimental.msc3440_enabled + @cached(tree=True) async def get_relations_for_event( self, @@ -515,6 +545,98 @@ def _get_if_user_has_annotated_event(txn: LoggingTransaction) -> bool: "get_if_user_has_annotated_event", _get_if_user_has_annotated_event ) + async def _get_bundled_aggregation_for_event( + self, event: EventBase + ) -> Optional[Dict[str, Any]]: + """Generate bundled aggregations for an event. + + Note that this does not use a cache, but depends on cached methods. + + Args: + event: The event to calculate bundled aggregations for. + + Returns: + The bundled aggregations for an event, if bundled aggregations are + enabled and the event can have bundled aggregations. + """ + # State events and redacted events do not get bundled aggregations. + if event.is_state() or event.internal_metadata.is_redacted(): + return None + + # Do not bundle aggregations for an event which represents an edit or an + # annotation. It does not make sense for them to have related events. + relates_to = event.content.get("m.relates_to") + if isinstance(relates_to, (dict, frozendict)): + relation_type = relates_to.get("rel_type") + if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): + return None + + event_id = event.event_id + room_id = event.room_id + + # The bundled aggregations to include, a mapping of relation type to a + # type-specific value. Some types include the direct return type here + # while others need more processing during serialization. + aggregations: Dict[str, Any] = {} + + annotations = await self.get_aggregation_groups_for_event(event_id, room_id) + if annotations.chunk: + aggregations[RelationTypes.ANNOTATION] = annotations.to_dict() + + references = await self.get_relations_for_event( + event_id, room_id, RelationTypes.REFERENCE, direction="f" + ) + if references.chunk: + aggregations[RelationTypes.REFERENCE] = references.to_dict() + + edit = None + if event.type == EventTypes.Message: + edit = await self.get_applicable_edit(event_id, room_id) + + if edit: + aggregations[RelationTypes.REPLACE] = edit + + # If this event is the start of a thread, include a summary of the replies. + if self._msc3440_enabled: + ( + thread_count, + latest_thread_event, + ) = await self.get_thread_summary(event_id, room_id) + if latest_thread_event: + aggregations[RelationTypes.THREAD] = { + # Don't bundle aggregations as this could recurse forever. + "latest_event": latest_thread_event, + "count": thread_count, + } + + # Store the bundled aggregations in the event metadata for later use. + return aggregations + + async def get_bundled_aggregations( + self, events: Iterable[EventBase] + ) -> Dict[str, Dict[str, Any]]: + """Generate bundled aggregations for events. + + Args: + events: The iterable of events to calculate bundled aggregations for. + + Returns: + A map of event ID to the bundled aggregation for the event. Not all + events may have bundled aggregations in the results. + """ + # If bundled aggregations are disabled, nothing to do. + if not self._msc1849_enabled: + return {} + + # TODO Parallelize. + results = {} + for event in events: + event_result = await self._get_bundled_aggregation_for_event(event) + if event_result is not None: + results[event.event_id] = event_result + + return results + class RelationsStore(RelationsWorkerStore): pass diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index b58452195a82..fe5b536d9705 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -228,7 +228,7 @@ def get_event(self, event_id, expect_none=False): self.assertIsNotNone(event) time_now = self.clock.time_msec() - serialized = self.get_success(self.serializer.serialize_event(event, time_now)) + serialized = self.serializer.serialize_event(event, time_now) return serialized From 2bb4bd126946df46aacf6849f0acb01e78f7d807 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 7 Jan 2022 16:43:21 +0000 Subject: [PATCH 070/474] Test that bans win a join against a race when computing `/sync` response (#11701) --- changelog.d/11701.misc | 1 + tests/handlers/test_sync.py | 97 +++++++++++++++++++++++++++++++++++-- tests/rest/client/utils.py | 10 ++++ 3 files changed, 105 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11701.misc diff --git a/changelog.d/11701.misc b/changelog.d/11701.misc new file mode 100644 index 000000000000..68905e041240 --- /dev/null +++ b/changelog.d/11701.misc @@ -0,0 +1 @@ +Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. \ No newline at end of file diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 638186f173f0..07a760e91aed 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -11,15 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from typing import Optional -from unittest.mock import Mock +from unittest.mock import MagicMock, Mock, patch from synapse.api.constants import EventTypes, JoinRules from synapse.api.errors import Codes, ResourceLimitError from synapse.api.filtering import Filtering from synapse.api.room_versions import RoomVersions -from synapse.handlers.sync import SyncConfig +from synapse.handlers.sync import SyncConfig, SyncResult from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer @@ -27,6 +26,7 @@ import tests.unittest import tests.utils +from tests.test_utils import make_awaitable class SyncTestCase(tests.unittest.HomeserverTestCase): @@ -186,6 +186,97 @@ def test_unknown_room_version(self): self.assertNotIn(invite_room, [r.room_id for r in result.invited]) self.assertNotIn(knock_room, [r.room_id for r in result.knocked]) + def test_ban_wins_race_with_join(self): + """Rooms shouldn't appear under "joined" if a join loses a race to a ban. + + A complicated edge case. Imagine the following scenario: + + * you attempt to join a room + * racing with that is a ban which comes in over federation, which ends up with + an earlier stream_ordering than the join. + * you get a sync response with a sync token which is _after_ the ban, but before + the join + * now your join lands; it is a valid event because its `prev_event`s predate the + ban, but will not make it into current_state_events (because bans win over + joins in state res, essentially). + * When we do a sync from the incremental sync, the only event in the timeline + is your join ... and yet you aren't joined. + + The ban coming in over federation isn't crucial for this behaviour; the key + requirements are: + 1. the homeserver generates a join event with prev_events that precede the ban + (so that it passes the "are you banned" test) + 2. the join event has a stream_ordering after that of the ban. + + We use monkeypatching to artificially trigger condition (1). + """ + # A local user Alice creates a room. + owner = self.register_user("alice", "password") + owner_tok = self.login(owner, "password") + room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok) + + # Do a sync as Alice to get the latest event in the room. + alice_sync_result: SyncResult = self.get_success( + self.sync_handler.wait_for_sync_for_user( + create_requester(owner), generate_sync_config(owner) + ) + ) + self.assertEqual(len(alice_sync_result.joined), 1) + self.assertEqual(alice_sync_result.joined[0].room_id, room_id) + last_room_creation_event_id = ( + alice_sync_result.joined[0].timeline.events[-1].event_id + ) + + # Eve, a ne'er-do-well, registers. + eve = self.register_user("eve", "password") + eve_token = self.login(eve, "password") + + # Alice preemptively bans Eve. + self.helper.ban(room_id, owner, eve, tok=owner_tok) + + # Eve syncs. + eve_requester = create_requester(eve) + eve_sync_config = generate_sync_config(eve) + eve_sync_after_ban: SyncResult = self.get_success( + self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config) + ) + + # Sanity check this sync result. We shouldn't be joined to the room. + self.assertEqual(eve_sync_after_ban.joined, []) + + # Eve tries to join the room. We monkey patch the internal logic which selects + # the prev_events used when creating the join event, such that the ban does not + # precede the join. + mocked_get_prev_events = patch.object( + self.hs.get_datastore(), + "get_prev_events_for_room", + new_callable=MagicMock, + return_value=make_awaitable([last_room_creation_event_id]), + ) + with mocked_get_prev_events: + self.helper.join(room_id, eve, tok=eve_token) + + # Eve makes a second, incremental sync. + eve_incremental_sync_after_join: SyncResult = self.get_success( + self.sync_handler.wait_for_sync_for_user( + eve_requester, + eve_sync_config, + since_token=eve_sync_after_ban.next_batch, + ) + ) + # Eve should not see herself as joined to the room. + self.assertEqual(eve_incremental_sync_after_join.joined, []) + + # If we did a third initial sync, we should _still_ see eve is not joined to the room. + eve_initial_sync_after_join: SyncResult = self.get_success( + self.sync_handler.wait_for_sync_for_user( + eve_requester, + eve_sync_config, + since_token=None, + ) + ) + self.assertEqual(eve_initial_sync_after_join.joined, []) + _request_key = 0 diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 1af5e5cee504..842438358021 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -196,6 +196,16 @@ def leave(self, room=None, user=None, expect_code=200, tok=None): expect_code=expect_code, ) + def ban(self, room: str, src: str, targ: str, **kwargs: object): + """A convenience helper: `change_membership` with `membership` preset to "ban".""" + self.change_membership( + room=room, + src=src, + targ=targ, + membership=Membership.BAN, + **kwargs, + ) + def change_membership( self, room: str, From d3cf0730f8bf3de58d37060eff0dca3f69b4a0e1 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 7 Jan 2022 19:13:41 +0000 Subject: [PATCH 071/474] Optionally use an on-disk sqlite db in tests (#11702) * Optionally use an on-disk sqlite db in tests When debugging a test it is sometimes useful to inspect the state of the DB. This is not easy when the db is in-memory: one cannot attach the sqlite CLI to another process's DB. With this change, if SYNAPSE_TEST_PERSIST_SQLITE_DB is set, we use `_trial_temp/test.db` as our sqlite database. One can then use `sqlite3 _trial_temp/test.db` and query to your heart's content. The DB is destroyed and recreated between different test cases. Co-authored-by: Patrick Cloke --- changelog.d/11702.misc | 1 + tests/server.py | 19 ++++++++++++++++++- tests/utils.py | 4 ++++ 3 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11702.misc diff --git a/changelog.d/11702.misc b/changelog.d/11702.misc new file mode 100644 index 000000000000..fc1069cae034 --- /dev/null +++ b/changelog.d/11702.misc @@ -0,0 +1 @@ +Add the option to write sqlite test dbs to disk when running tests. \ No newline at end of file diff --git a/tests/server.py b/tests/server.py index ca2b7a5b9771..a0cd14ea45b4 100644 --- a/tests/server.py +++ b/tests/server.py @@ -14,6 +14,8 @@ import hashlib import json import logging +import os +import os.path import time import uuid import warnings @@ -71,6 +73,7 @@ POSTGRES_HOST, POSTGRES_PASSWORD, POSTGRES_USER, + SQLITE_PERSIST_DB, USE_POSTGRES_FOR_TESTS, MockClock, default_config, @@ -739,9 +742,23 @@ def setup_test_homeserver( }, } else: + if SQLITE_PERSIST_DB: + # The current working directory is in _trial_temp, so this gets created within that directory. + test_db_location = os.path.abspath("test.db") + logger.debug("Will persist db to %s", test_db_location) + # Ensure each test gets a clean database. + try: + os.remove(test_db_location) + except FileNotFoundError: + pass + else: + logger.debug("Removed existing DB at %s", test_db_location) + else: + test_db_location = ":memory:" + database_config = { "name": "sqlite3", - "args": {"database": ":memory:", "cp_min": 1, "cp_max": 1}, + "args": {"database": test_db_location, "cp_min": 1, "cp_max": 1}, } if "db_txn_limit" in kwargs: diff --git a/tests/utils.py b/tests/utils.py index 6d013e851845..c06fc320f3df 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -42,6 +42,10 @@ POSTGRES_PASSWORD = os.environ.get("SYNAPSE_POSTGRES_PASSWORD", None) POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),) +# When debugging a specific test, it's occasionally useful to write the +# DB to disk and query it with the sqlite CLI. +SQLITE_PERSIST_DB = os.environ.get("SYNAPSE_TEST_PERSIST_SQLITE_DB") is not None + # the dbname we will connect to in order to create the base database. POSTGRES_DBNAME_FOR_INITIAL_CREATE = "postgres" From 8e57584a5859a9002759963eb546d523d2498a01 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 7 Jan 2022 19:27:58 -0500 Subject: [PATCH 072/474] Support spaces with > 50 rooms in the /hierarchy endpoint. (#11695) By returning all of the m.space.child state of the space, not just the first 50. The number of rooms returned is still capped at 50. For the federation API this implies that the requesting server will need to individually query for any other rooms it is not joined to. --- changelog.d/11695.bugfix | 1 + synapse/handlers/room_summary.py | 30 +++++++++++++++++++-------- tests/handlers/test_room_summary.py | 32 +++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11695.bugfix diff --git a/changelog.d/11695.bugfix b/changelog.d/11695.bugfix new file mode 100644 index 000000000000..7799aefb8258 --- /dev/null +++ b/changelog.d/11695.bugfix @@ -0,0 +1 @@ +Fix a bug where the only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 9ef88feb8ab7..7c60cb0bdd27 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -153,6 +153,9 @@ async def get_space_summary( rooms_result: List[JsonDict] = [] events_result: List[JsonDict] = [] + if max_rooms_per_space is None or max_rooms_per_space > MAX_ROOMS_PER_SPACE: + max_rooms_per_space = MAX_ROOMS_PER_SPACE + while room_queue and len(rooms_result) < MAX_ROOMS: queue_entry = room_queue.popleft() room_id = queue_entry.room_id @@ -167,7 +170,7 @@ async def get_space_summary( # The client-specified max_rooms_per_space limit doesn't apply to the # room_id specified in the request, so we ignore it if this is the # first room we are processing. - max_children = max_rooms_per_space if processed_rooms else None + max_children = max_rooms_per_space if processed_rooms else MAX_ROOMS if is_in_room: room_entry = await self._summarize_local_room( @@ -395,7 +398,7 @@ async def _get_room_hierarchy( None, room_id, suggested_only, - # TODO Handle max children. + # Do not limit the maximum children. max_children=None, ) @@ -525,6 +528,10 @@ async def federation_space_summary( rooms_result: List[JsonDict] = [] events_result: List[JsonDict] = [] + # Set a limit on the number of rooms to return. + if max_rooms_per_space is None or max_rooms_per_space > MAX_ROOMS_PER_SPACE: + max_rooms_per_space = MAX_ROOMS_PER_SPACE + while room_queue and len(rooms_result) < MAX_ROOMS: room_id = room_queue.popleft() if room_id in processed_rooms: @@ -583,7 +590,9 @@ async def get_federation_hierarchy( # Iterate through each child and potentially add it, but not its children, # to the response. - for child_room in root_room_entry.children_state_events: + for child_room in itertools.islice( + root_room_entry.children_state_events, MAX_ROOMS_PER_SPACE + ): room_id = child_room.get("state_key") assert isinstance(room_id, str) # If the room is unknown, skip it. @@ -633,8 +642,8 @@ async def _summarize_local_room( suggested_only: True if only suggested children should be returned. Otherwise, all children are returned. max_children: - The maximum number of children rooms to include. This is capped - to a server-set limit. + The maximum number of children rooms to include. A value of None + means no limit. Returns: A room entry if the room should be returned. None, otherwise. @@ -656,8 +665,13 @@ async def _summarize_local_room( # we only care about suggested children child_events = filter(_is_suggested_child_event, child_events) - if max_children is None or max_children > MAX_ROOMS_PER_SPACE: - max_children = MAX_ROOMS_PER_SPACE + # TODO max_children is legacy code for the /spaces endpoint. + if max_children is not None: + child_iter: Iterable[EventBase] = itertools.islice( + child_events, max_children + ) + else: + child_iter = child_events stripped_events: List[JsonDict] = [ { @@ -668,7 +682,7 @@ async def _summarize_local_room( "sender": e.sender, "origin_server_ts": e.origin_server_ts, } - for e in itertools.islice(child_events, max_children) + for e in child_iter ] return _RoomEntry(room_id, room_entry, stripped_events) diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index e5a6a6c747bf..ce3ebcf2f2fa 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -253,6 +253,38 @@ def test_simple_space(self): ) self._assert_hierarchy(result, expected) + def test_large_space(self): + """Test a space with a large number of rooms.""" + rooms = [self.room] + # Make at least 51 rooms that are part of the space. + for _ in range(55): + room = self.helper.create_room_as(self.user, tok=self.token) + self._add_child(self.space, room, self.token) + rooms.append(room) + + result = self.get_success(self.handler.get_space_summary(self.user, self.space)) + # The spaces result should have the space and the first 50 rooms in it, + # along with the links from space -> room for those 50 rooms. + expected = [(self.space, rooms[:50])] + [(room, []) for room in rooms[:49]] + self._assert_rooms(result, expected) + + # The result should have the space and the rooms in it, along with the links + # from space -> room. + expected = [(self.space, rooms)] + [(room, []) for room in rooms] + + # Make two requests to fully paginate the results. + result = self.get_success( + self.handler.get_room_hierarchy(create_requester(self.user), self.space) + ) + result2 = self.get_success( + self.handler.get_room_hierarchy( + create_requester(self.user), self.space, from_token=result["next_batch"] + ) + ) + # Combine the results. + result["rooms"] += result2["rooms"] + self._assert_hierarchy(result, expected) + def test_visibility(self): """A user not in a space cannot inspect it.""" user2 = self.register_user("user2", "pass") From 3be63654e40a137c469318e67048211800b9f985 Mon Sep 17 00:00:00 2001 From: kegsay Date: Mon, 10 Jan 2022 11:46:40 +0000 Subject: [PATCH 073/474] Prettier complement logs (#11707) * Prettier complement logs * Changelog --- .github/workflows/tests.yml | 3 ++- changelog.d/11707.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11707.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cb72e1a233ec..6fa61ab55926 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -374,7 +374,8 @@ jobs: working-directory: complement/dockerfiles # Run Complement - - run: go test -v -tags synapse_blacklist,msc2403 ./tests/... + - run: set -o pipefail && go test -v -json -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt + shell: bash env: COMPLEMENT_BASE_IMAGE: complement-synapse:latest working-directory: complement diff --git a/changelog.d/11707.misc b/changelog.d/11707.misc new file mode 100644 index 000000000000..ef1e01cac82f --- /dev/null +++ b/changelog.d/11707.misc @@ -0,0 +1 @@ +Improve Complement test output for Gitub Actions. From c43dd4d01b2344014004dff3834a86cd59e85adc Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 10 Jan 2022 13:40:46 +0000 Subject: [PATCH 074/474] Deal with mypy errors w/ type-hinted pynacl 1.5.0 (#11714) * Deal with mypy errors w/ type-hinted pynacl 1.5.0 Fixes #11644. I really don't like that we're monkey patching pynacl SignedKey instances with alg and version objects. But I'm too scared to make the changes necessary right now. (Ideally I would replace `signedjson.types.SingingKey` with a runtime class which wraps or inherits from `nacl.signing.SigningKey`.) C.f. https://github.com/matrix-org/python-signedjson/issues/16 --- changelog.d/11714.misc | 1 + tests/crypto/test_event_signing.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11714.misc diff --git a/changelog.d/11714.misc b/changelog.d/11714.misc new file mode 100644 index 000000000000..7f39bf0e3dca --- /dev/null +++ b/changelog.d/11714.misc @@ -0,0 +1 @@ +Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. \ No newline at end of file diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 1c920157f506..a72a0103d3b8 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -14,6 +14,7 @@ import nacl.signing +import signedjson.types from unpaddedbase64 import decode_base64 from synapse.api.room_versions import RoomVersions @@ -35,7 +36,12 @@ class EventSigningTestCase(unittest.TestCase): def setUp(self): - self.signing_key = nacl.signing.SigningKey(SIGNING_KEY_SEED) + # NB: `signedjson` expects `nacl.signing.SigningKey` instances which have been + # monkeypatched to include new `alg` and `version` attributes. This is captured + # by the `signedjson.types.SigningKey` protocol. + self.signing_key: signedjson.types.SigningKey = nacl.signing.SigningKey( + SIGNING_KEY_SEED + ) self.signing_key.alg = KEY_ALG self.signing_key.version = KEY_VER From ffd227c3822b6e20b9dc203b3eae253adc0cf663 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 10 Jan 2022 15:38:22 +0000 Subject: [PATCH 075/474] Fix docstring on `add_account_data_for_user`. (#11716) --- changelog.d/11716.misc | 1 + synapse/handlers/account_data.py | 2 +- synapse/storage/databases/main/account_data.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11716.misc diff --git a/changelog.d/11716.misc b/changelog.d/11716.misc new file mode 100644 index 000000000000..08f731049806 --- /dev/null +++ b/changelog.d/11716.misc @@ -0,0 +1 @@ +Fix docstring on `add_account_data_for_user`. \ No newline at end of file diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 96273e2f81c3..bad48713bcb1 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -77,7 +77,7 @@ async def add_account_data_to_room( async def add_account_data_for_user( self, user_id: str, account_data_type: str, content: JsonDict ) -> int: - """Add some account_data to a room for a user. + """Add some global account_data for a user. Args: user_id: The user to add a tag for. diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 32a553fdd7bd..93db71d1b489 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -450,7 +450,7 @@ async def add_account_data_to_room( async def add_account_data_for_user( self, user_id: str, account_data_type: str, content: JsonDict ) -> int: - """Add some account_data to a room for a user. + """Add some global account_data for a user. Args: user_id: The user to add a tag for. From 7c3408d1a88a24c2db917ab48cb15d13ac683427 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 10 Jan 2022 17:06:42 +0000 Subject: [PATCH 076/474] Document the `SYNAPSE_TEST_PERSIST_SQLITE_DB` unit test env var (#11715) --- changelog.d/11715.doc | 1 + docs/development/contributing_guide.md | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 changelog.d/11715.doc diff --git a/changelog.d/11715.doc b/changelog.d/11715.doc new file mode 100644 index 000000000000..32b7c10b0bf9 --- /dev/null +++ b/changelog.d/11715.doc @@ -0,0 +1 @@ +Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index abdb8084382b..121f7c0687f5 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -169,6 +169,27 @@ To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`: SYNAPSE_TEST_LOG_LEVEL=DEBUG trial tests ``` +By default, tests will use an in-memory SQLite database for test data. For additional +help with debugging, one can use an on-disk SQLite database file instead, in order to +review database state during and after running tests. This can be done by setting +the `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable. Doing so will cause the +database state to be stored in a file named `test.db` under the trial process' +working directory. Typically, this ends up being `_trial_temp/test.db`. For example: + +```sh +SYNAPSE_TEST_PERSIST_SQLITE_DB=1 trial tests +``` + +The database file can then be inspected with: + +```sh +sqlite3 _trial_temp/test.db +``` + +Note that the database file is cleared at the beginning of each test run. Thus it +will always only contain the data generated by the *last run test*. Though generally +when debugging, one is only running a single test anyway. + ### Running tests under PostgreSQL Invoking `trial` as above will use an in-memory SQLite database. This is great for From 338e70c6170fe6f964f2080b2585eb2add9f5b77 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Mon, 10 Jan 2022 23:18:56 +0000 Subject: [PATCH 077/474] Complement environment variable name change and update .gitignore. (#11718) --- .gitignore | 4 ++++ changelog.d/11718.misc | 1 + scripts-dev/complement.sh | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11718.misc diff --git a/.gitignore b/.gitignore index fe137f337019..8eb4eda73d71 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,7 @@ __pycache__/ # docs book/ + +# complement +/complement-master +/master.tar.gz diff --git a/changelog.d/11718.misc b/changelog.d/11718.misc new file mode 100644 index 000000000000..91dc5b5874f9 --- /dev/null +++ b/changelog.d/11718.misc @@ -0,0 +1 @@ +Complement environment variable name change and update `.gitignore`. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 53295b58fca9..820427be9e48 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -47,7 +47,7 @@ if [[ -n "$WORKERS" ]]; then COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile # And provide some more configuration to complement. export COMPLEMENT_CA=true - export COMPLEMENT_VERSION_CHECK_ITERATIONS=500 + export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=25 else export COMPLEMENT_BASE_IMAGE=complement-synapse COMPLEMENT_DOCKERFILE=Synapse.Dockerfile From d41c4654db03dc76812e98222feb62668a1bf94c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 12 Jan 2022 10:37:57 +0000 Subject: [PATCH 078/474] Use buildkit's cache feature to speed up docker builds (#11691) Having spent much of the last week attempting to run complement tests from somewhere with damp string instead of internet... something had to be done. --- .github/workflows/tests.yml | 2 ++ changelog.d/11691.misc | 1 + docker/Dockerfile | 26 +++++++++++++++++++++----- scripts-dev/complement.sh | 4 ++++ 4 files changed, 28 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11691.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6fa61ab55926..4f58069702dc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -366,6 +366,8 @@ jobs: # Build initial Synapse image - run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile . working-directory: synapse + env: + DOCKER_BUILDKIT: 1 # Build a ready-to-run Synapse image based on the initial image above. # This new image includes a config file, keys for signing and TLS, and diff --git a/changelog.d/11691.misc b/changelog.d/11691.misc new file mode 100644 index 000000000000..383d0b306488 --- /dev/null +++ b/changelog.d/11691.misc @@ -0,0 +1 @@ +Use buildkit's cache feature to speed up docker builds. diff --git a/docker/Dockerfile b/docker/Dockerfile index 2bdc607e66e6..306f75ae56cc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,14 +1,17 @@ # Dockerfile to build the matrixdotorg/synapse docker images. # +# Note that it uses features which are only available in BuildKit - see +# https://docs.docker.com/go/buildkit/ for more information. +# # To build the image, run `docker build` command from the root of the # synapse repository: # -# docker build -f docker/Dockerfile . +# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile . # # There is an optional PYTHON_VERSION build argument which sets the # version of python to build against: for example: # -# docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.6 . +# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.9 . # ARG PYTHON_VERSION=3.8 @@ -19,7 +22,16 @@ ARG PYTHON_VERSION=3.8 FROM docker.io/python:${PYTHON_VERSION}-slim as builder # install the OS build deps -RUN apt-get update && apt-get install -y \ +# +# RUN --mount is specific to buildkit and is documented at +# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. +# Here we use it to set up a cache for apt, to improve rebuild speeds on +# slow connections. +# +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update && apt-get install -y \ build-essential \ libffi-dev \ libjpeg-dev \ @@ -44,7 +56,8 @@ COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py # used while you develop on the source # # This is aiming at installing the `install_requires` and `extras_require` from `setup.py` -RUN pip install --prefix="/install" --no-warn-script-location \ +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install --prefix="/install" --no-warn-script-location \ /synapse[all] # Copy over the rest of the project @@ -66,7 +79,10 @@ LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/syna LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git' LABEL org.opencontainers.image.licenses='Apache-2.0' -RUN apt-get update && apt-get install -y \ +RUN \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update && apt-get install -y \ curl \ gosu \ libjpeg62-turbo \ diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 820427be9e48..67a22d3ed3e7 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -23,6 +23,9 @@ # Exit if a line returns a non-zero exit code set -e +# enable buildkit for the docker builds +export DOCKER_BUILDKIT=1 + # Change to the repository root cd "$(dirname $0)/.." @@ -65,4 +68,5 @@ if [[ -n "$1" ]]; then fi # Run the tests! +echo "Images built; running complement" go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... From 99ba5ae7b73b74e27c17f68f5998fee985a969b2 Mon Sep 17 00:00:00 2001 From: haslersn Date: Wed, 12 Jan 2022 15:43:48 +0100 Subject: [PATCH 079/474] Fix documentation of supported PostgreSQL version (#11725) Signed-off-by: Sebastian Hasler --- changelog.d/11725.doc | 1 + docs/postgres.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11725.doc diff --git a/changelog.d/11725.doc b/changelog.d/11725.doc new file mode 100644 index 000000000000..46eb9b814fc3 --- /dev/null +++ b/changelog.d/11725.doc @@ -0,0 +1 @@ +Document that now the minimum supported PostgreSQL version is 10. diff --git a/docs/postgres.md b/docs/postgres.md index e4861c1f127f..0562021da526 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -1,6 +1,6 @@ # Using Postgres -Synapse supports PostgreSQL versions 9.6 or later. +Synapse supports PostgreSQL versions 10 or later. ## Install postgres client libraries From 2185b2818487f78861c80dce2eaaaa26a1993dd3 Mon Sep 17 00:00:00 2001 From: haslersn Date: Wed, 12 Jan 2022 15:43:48 +0100 Subject: [PATCH 080/474] Fix documentation of supported PostgreSQL version (#11725) Signed-off-by: Sebastian Hasler --- changelog.d/11725.doc | 1 + docs/postgres.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11725.doc diff --git a/changelog.d/11725.doc b/changelog.d/11725.doc new file mode 100644 index 000000000000..46eb9b814fc3 --- /dev/null +++ b/changelog.d/11725.doc @@ -0,0 +1 @@ +Document that now the minimum supported PostgreSQL version is 10. diff --git a/docs/postgres.md b/docs/postgres.md index e4861c1f127f..0562021da526 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -1,6 +1,6 @@ # Using Postgres -Synapse supports PostgreSQL versions 9.6 or later. +Synapse supports PostgreSQL versions 10 or later. ## Install postgres client libraries From 1b1aed38e3df1daca709b9bb57b1443b9e6da2bd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 10 Jan 2022 13:40:46 +0000 Subject: [PATCH 081/474] Deal with mypy errors w/ type-hinted pynacl 1.5.0 (#11714) * Deal with mypy errors w/ type-hinted pynacl 1.5.0 Fixes #11644. I really don't like that we're monkey patching pynacl SignedKey instances with alg and version objects. But I'm too scared to make the changes necessary right now. (Ideally I would replace `signedjson.types.SingingKey` with a runtime class which wraps or inherits from `nacl.signing.SigningKey`.) C.f. https://github.com/matrix-org/python-signedjson/issues/16 --- changelog.d/11714.misc | 1 + tests/crypto/test_event_signing.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11714.misc diff --git a/changelog.d/11714.misc b/changelog.d/11714.misc new file mode 100644 index 000000000000..7f39bf0e3dca --- /dev/null +++ b/changelog.d/11714.misc @@ -0,0 +1 @@ +Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. \ No newline at end of file diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 1c920157f506..a72a0103d3b8 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -14,6 +14,7 @@ import nacl.signing +import signedjson.types from unpaddedbase64 import decode_base64 from synapse.api.room_versions import RoomVersions @@ -35,7 +36,12 @@ class EventSigningTestCase(unittest.TestCase): def setUp(self): - self.signing_key = nacl.signing.SigningKey(SIGNING_KEY_SEED) + # NB: `signedjson` expects `nacl.signing.SigningKey` instances which have been + # monkeypatched to include new `alg` and `version` attributes. This is captured + # by the `signedjson.types.SigningKey` protocol. + self.signing_key: signedjson.types.SigningKey = nacl.signing.SigningKey( + SIGNING_KEY_SEED + ) self.signing_key.alg = KEY_ALG self.signing_key.version = KEY_VER From 22abfca8d9e8c486d9cf4624e8422a84cc361c83 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 12 Jan 2022 15:21:13 +0000 Subject: [PATCH 082/474] Fix a bug introduced in Synapse v1.0.0 whereby device list updates would not be sent to remote homeservers if there were too many to send at once. (#11729) Co-authored-by: Brendan Abolivier --- changelog.d/11729.bugfix | 1 + synapse/storage/databases/main/devices.py | 8 +++- tests/storage/test_devices.py | 50 ++++++++++++++++++++++- 3 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11729.bugfix diff --git a/changelog.d/11729.bugfix b/changelog.d/11729.bugfix new file mode 100644 index 000000000000..8438ce568630 --- /dev/null +++ b/changelog.d/11729.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. \ No newline at end of file diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 273adb61fdae..324bd5f879d3 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -270,6 +270,10 @@ async def get_device_updates_by_remote( # The most recent request's opentracing_context is used as the # context which created the Edu. + # This is the stream ID that we will return for the consumer to resume + # following this stream later. + last_processed_stream_id = from_stream_id + query_map = {} cross_signing_keys_by_user = {} for user_id, device_id, update_stream_id, update_context in updates: @@ -295,6 +299,8 @@ async def get_device_updates_by_remote( if update_stream_id > previous_update_stream_id: query_map[key] = (update_stream_id, update_context) + last_processed_stream_id = update_stream_id + results = await self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) @@ -307,7 +313,7 @@ async def get_device_updates_by_remote( # FIXME: remove this when enough servers have upgraded results.append(("org.matrix.signing_key_update", result)) - return now_stream_id, results + return last_processed_stream_id, results def _get_device_updates_by_remote_txn( self, diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index 6790aa524291..5cd7f6bb7ab2 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -94,7 +94,7 @@ def test_count_devices_by_users(self): def test_get_device_updates_by_remote(self): device_ids = ["device_id1", "device_id2"] - # Add two device updates with a single stream_id + # Add two device updates with sequential `stream_id`s self.get_success( self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"]) ) @@ -107,6 +107,54 @@ def test_get_device_updates_by_remote(self): # Check original device_ids are contained within these updates self._check_devices_in_updates(device_ids, device_updates) + def test_get_device_updates_by_remote_can_limit_properly(self): + """ + Tests that `get_device_updates_by_remote` returns an appropriate + stream_id to resume fetching from (without skipping any results). + """ + + # Add some device updates with sequential `stream_id`s + device_ids = [ + "device_id1", + "device_id2", + "device_id3", + "device_id4", + "device_id5", + ] + self.get_success( + self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"]) + ) + + # Get all device updates ever meant for this remote + next_stream_id, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", -1, limit=3) + ) + + # Check the first three original device_ids are contained within these updates + self._check_devices_in_updates(device_ids[:3], device_updates) + + # Get the next batch of device updates + next_stream_id, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3) + ) + + # Check the last two original device_ids are contained within these updates + self._check_devices_in_updates(device_ids[3:], device_updates) + + # Add some more device updates to ensure it still resumes properly + device_ids = ["device_id6", "device_id7"] + self.get_success( + self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"]) + ) + + # Get the next batch of device updates + next_stream_id, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3) + ) + + # Check the newly-added device_ids are contained within these updates + self._check_devices_in_updates(device_ids, device_updates) + def _check_devices_in_updates(self, expected_device_ids, device_updates): """Check that an specific device ids exist in a list of device update EDUs""" self.assertEqual(len(device_updates), len(expected_device_ids)) From 2560b1b6b2f74b5724253396c0e3665fa1f7968c Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Wed, 12 Jan 2022 18:09:36 +0200 Subject: [PATCH 083/474] Allow tracking puppeted users for MAU (#11561) Currently when puppeting another user, the user doing the puppeting is tracked for client IPs and MAU (if configured). When tracking MAU is important, it becomes necessary to be possible to also track the client IPs and MAU of puppeted users. As an example a client that manages user creation and creation of tokens via the Synapse admin API, passing those tokens for the client to use. This PR adds optional configuration to enable tracking of puppeted users into monthly active users. The default behaviour stays the same. Signed-off-by: Jason Robinson --- changelog.d/11561.feature | 1 + docs/sample_config.yaml | 6 ++++++ synapse/api/auth.py | 13 +++++++++++++ synapse/config/api.py | 10 ++++++++++ tests/api/test_auth.py | 33 +++++++++++++++++++++++++++++++++ 5 files changed, 63 insertions(+) create mode 100644 changelog.d/11561.feature diff --git a/changelog.d/11561.feature b/changelog.d/11561.feature new file mode 100644 index 000000000000..19dada883bb2 --- /dev/null +++ b/changelog.d/11561.feature @@ -0,0 +1 @@ +Add `track_puppeted_user_ips` config flag to track puppeted user IP addresses. This also includes them in monthly active user counts. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 810a14b07706..26894fae349a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1503,6 +1503,12 @@ room_prejoin_state: #additional_event_types: # - org.example.custom.event.type +# If enabled, puppeted user IP's can also be tracked. By default when +# puppeting another user, the user who has created the access token +# for puppeting is tracked. If this is enabled, both requests are tracked. +# Implicitly enables MAU tracking for puppeted users. +#track_puppeted_user_ips: false + # A list of application service config files to use # diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 4a32d430bdfc..683241201ca6 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -71,6 +71,7 @@ def __init__(self, hs: "HomeServer"): self._auth_blocking = AuthBlocking(self.hs) self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips + self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips self._macaroon_secret_key = hs.config.key.macaroon_secret_key self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users @@ -246,6 +247,18 @@ async def _wrapped_get_user_by_req( user_agent=user_agent, device_id=device_id, ) + # Track also the puppeted user client IP if enabled and the user is puppeting + if ( + user_info.user_id != user_info.token_owner + and self._track_puppeted_user_ips + ): + await self.store.insert_client_ip( + user_id=user_info.user_id, + access_token=access_token, + ip=ip_addr, + user_agent=user_agent, + device_id=device_id, + ) if is_guest and not allow_guest: raise AuthError( diff --git a/synapse/config/api.py b/synapse/config/api.py index 25538b82d555..bdbe9f0fa280 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -29,6 +29,7 @@ class ApiConfig(Config): def read_config(self, config: JsonDict, **kwargs): validate_config(_MAIN_SCHEMA, config, ()) self.room_prejoin_state = list(self._get_prejoin_state_types(config)) + self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False) def generate_config_section(cls, **kwargs) -> str: formatted_default_state_types = "\n".join( @@ -59,6 +60,12 @@ def generate_config_section(cls, **kwargs) -> str: # #additional_event_types: # - org.example.custom.event.type + + # If enabled, puppeted user IP's can also be tracked. By default when + # puppeting another user, the user who has created the access token + # for puppeting is tracked. If this is enabled, both requests are tracked. + # Implicitly enables MAU tracking for puppeted users. + #track_puppeted_user_ips: false """ % { "formatted_default_state_types": formatted_default_state_types } @@ -138,5 +145,8 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: "properties": { "room_prejoin_state": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA, "room_invite_state_types": _ROOM_INVITE_STATE_TYPES_SCHEMA, + "track_puppeted_user_ips": { + "type": "boolean", + }, }, } diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index a2dfa1ed0507..4b53b6d40b31 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -274,6 +274,39 @@ def test_get_user_by_req_appservice_valid_token_invalid_device_id(self): self.assertEquals(failure.value.code, 400) self.assertEquals(failure.value.errcode, Codes.EXCLUSIVE) + def test_get_user_by_req__puppeted_token__not_tracking_puppeted_mau(self): + self.store.get_user_by_access_token = simple_async_mock( + TokenLookupResult( + user_id="@baldrick:matrix.org", + device_id="device", + token_owner="@admin:matrix.org", + ) + ) + self.store.insert_client_ip = simple_async_mock(None) + request = Mock(args={}) + request.getClientIP.return_value = "127.0.0.1" + request.args[b"access_token"] = [self.test_token] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_success(self.auth.get_user_by_req(request)) + self.store.insert_client_ip.assert_called_once() + + def test_get_user_by_req__puppeted_token__tracking_puppeted_mau(self): + self.auth._track_puppeted_user_ips = True + self.store.get_user_by_access_token = simple_async_mock( + TokenLookupResult( + user_id="@baldrick:matrix.org", + device_id="device", + token_owner="@admin:matrix.org", + ) + ) + self.store.insert_client_ip = simple_async_mock(None) + request = Mock(args={}) + request.getClientIP.return_value = "127.0.0.1" + request.args[b"access_token"] = [self.test_token] + request.requestHeaders.getRawHeaders = mock_getRawHeaders() + self.get_success(self.auth.get_user_by_req(request)) + self.assertEquals(self.store.insert_client_ip.call_count, 2) + def test_get_user_from_macaroon(self): self.store.get_user_by_access_token = simple_async_mock( TokenLookupResult(user_id="@baldrick:matrix.org", device_id="device") From b92a2ff7971c65f764b19fe4165e5f79f68cb1f1 Mon Sep 17 00:00:00 2001 From: Andy Balaam Date: Thu, 13 Jan 2022 13:10:42 +0000 Subject: [PATCH 084/474] Fix typo in demo docs: differnt (#11735) --- changelog.d/11735.doc | 1 + demo/README | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11735.doc diff --git a/changelog.d/11735.doc b/changelog.d/11735.doc new file mode 100644 index 000000000000..d8822f6b52aa --- /dev/null +++ b/changelog.d/11735.doc @@ -0,0 +1 @@ +Fix typo in demo docs: differnt. diff --git a/demo/README b/demo/README index 0bec820ad657..a5a95bd19666 100644 --- a/demo/README +++ b/demo/README @@ -22,5 +22,5 @@ Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db} -Also note that when joining a public room on a differnt HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name. +Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name. From 10a88ba91cb16ccf757984f0a7d41ddf8b4dc07f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 Jan 2022 08:49:28 -0500 Subject: [PATCH 085/474] Use auto_attribs/native type hints for attrs classes. (#11692) --- changelog.d/11692.misc | 1 + synapse/api/room_versions.py | 30 ++++---- synapse/config/emailconfig.py | 24 +++--- synapse/config/server.py | 4 +- synapse/config/workers.py | 24 +++--- synapse/crypto/keyring.py | 18 ++--- synapse/events/snapshot.py | 18 ++--- .../sender/per_destination_queue.py | 12 +-- synapse/handlers/auth.py | 14 ++-- synapse/handlers/e2e_keys.py | 10 +-- synapse/handlers/sso.py | 32 ++++---- synapse/http/connectproxyclient.py | 4 +- synapse/http/matrixfederationclient.py | 18 ++--- synapse/http/site.py | 4 +- synapse/logging/_remote.py | 10 +-- synapse/logging/context.py | 20 ++--- synapse/logging/opentracing.py | 2 +- synapse/metrics/__init__.py | 20 ++--- synapse/notifier.py | 14 ++-- synapse/push/__init__.py | 38 +++++----- synapse/push/bulk_push_rule_evaluator.py | 18 ++--- synapse/replication/tcp/streams/events.py | 34 ++++----- synapse/rest/media/v1/media_storage.py | 6 +- synapse/state/__init__.py | 8 +- synapse/storage/database.py | 8 +- .../storage/databases/main/end_to_end_keys.py | 6 +- synapse/storage/databases/main/events.py | 14 ++-- .../databases/main/events_bg_updates.py | 12 +-- .../storage/databases/main/registration.py | 18 ++--- synapse/storage/databases/main/roommember.py | 6 +- synapse/storage/databases/main/ui_auth.py | 12 +-- synapse/storage/keys.py | 6 +- synapse/storage/prepare_database.py | 6 +- synapse/storage/relations.py | 20 ++--- synapse/storage/state.py | 6 +- synapse/storage/util/id_generators.py | 8 +- synapse/streams/config.py | 10 +-- synapse/types.py | 75 +++++++++---------- synapse/util/async_helpers.py | 6 +- synapse/util/caches/dictionary_cache.py | 11 ++- 40 files changed, 300 insertions(+), 307 deletions(-) create mode 100644 changelog.d/11692.misc diff --git a/changelog.d/11692.misc b/changelog.d/11692.misc new file mode 100644 index 000000000000..0cdfca54e7d6 --- /dev/null +++ b/changelog.d/11692.misc @@ -0,0 +1 @@ +Use `auto_attribs` and native type hints for attrs classes. diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 0a895bba480a..a747a4081497 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -46,41 +46,41 @@ class RoomDisposition: UNSTABLE = "unstable" -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class RoomVersion: """An object which describes the unique attributes of a room version.""" - identifier = attr.ib(type=str) # the identifier for this version - disposition = attr.ib(type=str) # one of the RoomDispositions - event_format = attr.ib(type=int) # one of the EventFormatVersions - state_res = attr.ib(type=int) # one of the StateResolutionVersions - enforce_key_validity = attr.ib(type=bool) + identifier: str # the identifier for this version + disposition: str # one of the RoomDispositions + event_format: int # one of the EventFormatVersions + state_res: int # one of the StateResolutionVersions + enforce_key_validity: bool # Before MSC2432, m.room.aliases had special auth rules and redaction rules - special_case_aliases_auth = attr.ib(type=bool) + special_case_aliases_auth: bool # Strictly enforce canonicaljson, do not allow: # * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] # * Floats # * NaN, Infinity, -Infinity - strict_canonicaljson = attr.ib(type=bool) + strict_canonicaljson: bool # MSC2209: Check 'notifications' key while verifying # m.room.power_levels auth rules. - limit_notifications_power_levels = attr.ib(type=bool) + limit_notifications_power_levels: bool # MSC2174/MSC2176: Apply updated redaction rules algorithm. - msc2176_redaction_rules = attr.ib(type=bool) + msc2176_redaction_rules: bool # MSC3083: Support the 'restricted' join_rule. - msc3083_join_rules = attr.ib(type=bool) + msc3083_join_rules: bool # MSC3375: Support for the proper redaction rules for MSC3083. This mustn't # be enabled if MSC3083 is not. - msc3375_redaction_rules = attr.ib(type=bool) + msc3375_redaction_rules: bool # MSC2403: Allows join_rules to be set to 'knock', changes auth rules to allow sending # m.room.membership event with membership 'knock'. - msc2403_knocking = attr.ib(type=bool) + msc2403_knocking: bool # MSC2716: Adds m.room.power_levels -> content.historical field to control # whether "insertion", "chunk", "marker" events can be sent - msc2716_historical = attr.ib(type=bool) + msc2716_historical: bool # MSC2716: Adds support for redacting "insertion", "chunk", and "marker" events - msc2716_redactions = attr.ib(type=bool) + msc2716_redactions: bool class RoomVersions: diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 510b647c6343..949d7dd5ac98 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -55,19 +55,19 @@ ---------------------------------------------------------------------------------------""" -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class EmailSubjectConfig: - message_from_person_in_room = attr.ib(type=str) - message_from_person = attr.ib(type=str) - messages_from_person = attr.ib(type=str) - messages_in_room = attr.ib(type=str) - messages_in_room_and_others = attr.ib(type=str) - messages_from_person_and_others = attr.ib(type=str) - invite_from_person = attr.ib(type=str) - invite_from_person_to_room = attr.ib(type=str) - invite_from_person_to_space = attr.ib(type=str) - password_reset = attr.ib(type=str) - email_validation = attr.ib(type=str) + message_from_person_in_room: str + message_from_person: str + messages_from_person: str + messages_in_room: str + messages_in_room_and_others: str + messages_from_person_and_others: str + invite_from_person: str + invite_from_person_to_room: str + invite_from_person_to_space: str + password_reset: str + email_validation: str class EmailConfig(Config): diff --git a/synapse/config/server.py b/synapse/config/server.py index 1de2dea9b024..2c2b461cac2e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -200,8 +200,8 @@ class HttpListenerConfig: """Object describing the http-specific parts of the config of a listener""" x_forwarded: bool = False - resources: List[HttpResourceConfig] = attr.ib(factory=list) - additional_resources: Dict[str, dict] = attr.ib(factory=dict) + resources: List[HttpResourceConfig] = attr.Factory(list) + additional_resources: Dict[str, dict] = attr.Factory(dict) tag: Optional[str] = None diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 576f519188bb..bdaba6db3787 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -51,12 +51,12 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: return obj -@attr.s +@attr.s(auto_attribs=True) class InstanceLocationConfig: """The host and port to talk to an instance via HTTP replication.""" - host = attr.ib(type=str) - port = attr.ib(type=int) + host: str + port: int @attr.s @@ -77,34 +77,28 @@ class WriterLocations: can only be a single instance. """ - events = attr.ib( + events: List[str] = attr.ib( default=["master"], - type=List[str], converter=_instance_to_list_converter, ) - typing = attr.ib( + typing: List[str] = attr.ib( default=["master"], - type=List[str], converter=_instance_to_list_converter, ) - to_device = attr.ib( + to_device: List[str] = attr.ib( default=["master"], - type=List[str], converter=_instance_to_list_converter, ) - account_data = attr.ib( + account_data: List[str] = attr.ib( default=["master"], - type=List[str], converter=_instance_to_list_converter, ) - receipts = attr.ib( + receipts: List[str] = attr.ib( default=["master"], - type=List[str], converter=_instance_to_list_converter, ) - presence = attr.ib( + presence: List[str] = attr.ib( default=["master"], - type=List[str], converter=_instance_to_list_converter, ) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 993b04099e28..72d4a69aac35 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -58,7 +58,7 @@ logger = logging.getLogger(__name__) -@attr.s(slots=True, cmp=False) +@attr.s(slots=True, frozen=True, cmp=False, auto_attribs=True) class VerifyJsonRequest: """ A request to verify a JSON object. @@ -78,10 +78,10 @@ class VerifyJsonRequest: key_ids: The set of key_ids to that could be used to verify the JSON object """ - server_name = attr.ib(type=str) - get_json_object = attr.ib(type=Callable[[], JsonDict]) - minimum_valid_until_ts = attr.ib(type=int) - key_ids = attr.ib(type=List[str]) + server_name: str + get_json_object: Callable[[], JsonDict] + minimum_valid_until_ts: int + key_ids: List[str] @staticmethod def from_json_object( @@ -124,7 +124,7 @@ class KeyLookupError(ValueError): pass -@attr.s(slots=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class _FetchKeyRequest: """A request for keys for a given server. @@ -138,9 +138,9 @@ class _FetchKeyRequest: key_ids: The IDs of the keys to attempt to fetch """ - server_name = attr.ib(type=str) - minimum_valid_until_ts = attr.ib(type=int) - key_ids = attr.ib(type=List[str]) + server_name: str + minimum_valid_until_ts: int + key_ids: List[str] class Keyring: diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index f251402ed8f2..0eab1aefd637 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -28,7 +28,7 @@ from synapse.storage.databases.main import DataStore -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class EventContext: """ Holds information relevant to persisting an event @@ -103,15 +103,15 @@ class EventContext: accessed via get_prev_state_ids. """ - rejected = attr.ib(default=False, type=Union[bool, str]) - _state_group = attr.ib(default=None, type=Optional[int]) - state_group_before_event = attr.ib(default=None, type=Optional[int]) - prev_group = attr.ib(default=None, type=Optional[int]) - delta_ids = attr.ib(default=None, type=Optional[StateMap[str]]) - app_service = attr.ib(default=None, type=Optional[ApplicationService]) + rejected: Union[bool, str] = False + _state_group: Optional[int] = None + state_group_before_event: Optional[int] = None + prev_group: Optional[int] = None + delta_ids: Optional[StateMap[str]] = None + app_service: Optional[ApplicationService] = None - _current_state_ids = attr.ib(default=None, type=Optional[StateMap[str]]) - _prev_state_ids = attr.ib(default=None, type=Optional[StateMap[str]]) + _current_state_ids: Optional[StateMap[str]] = None + _prev_state_ids: Optional[StateMap[str]] = None @staticmethod def with_state( diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 391b30fbb559..8152e80b88d2 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -607,18 +607,18 @@ def _start_catching_up(self) -> None: self._pending_pdus = [] -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _TransactionQueueManager: """A helper async context manager for pulling stuff off the queues and tracking what was last successfully sent, etc. """ - queue = attr.ib(type=PerDestinationQueue) + queue: PerDestinationQueue - _device_stream_id = attr.ib(type=Optional[int], default=None) - _device_list_id = attr.ib(type=Optional[int], default=None) - _last_stream_ordering = attr.ib(type=Optional[int], default=None) - _pdus = attr.ib(type=List[EventBase], factory=list) + _device_stream_id: Optional[int] = None + _device_list_id: Optional[int] = None + _last_stream_ordering: Optional[int] = None + _pdus: List[EventBase] = attr.Factory(list) async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]: # First we calculate the EDUs we want to send, if any. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 84724b207c9d..2389c9ac5214 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -168,25 +168,25 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: } -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class SsoLoginExtraAttributes: """Data we track about SAML2 sessions""" # time the session was created, in milliseconds - creation_time = attr.ib(type=int) - extra_attributes = attr.ib(type=JsonDict) + creation_time: int + extra_attributes: JsonDict -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class LoginTokenAttributes: """Data we store in a short-term login token""" - user_id = attr.ib(type=str) + user_id: str - auth_provider_id = attr.ib(type=str) + auth_provider_id: str """The SSO Identity Provider that the user authenticated with, to get this token.""" - auth_provider_session_id = attr.ib(type=Optional[str]) + auth_provider_session_id: Optional[str] """The session ID advertised by the SSO Identity Provider.""" diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 14360b4e4045..d4dfddf63fb4 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1321,14 +1321,14 @@ def _one_time_keys_match(old_key_json: str, new_key: JsonDict) -> bool: return old_key == new_key_copy -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class SignatureListItem: """An item in the signature list as used by upload_signatures_for_device_keys.""" - signing_key_id = attr.ib(type=str) - target_user_id = attr.ib(type=str) - target_device_id = attr.ib(type=str) - signature = attr.ib(type=JsonDict) + signing_key_id: str + target_user_id: str + target_device_id: str + signature: JsonDict class SigningKeyEduUpdater: diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 65c27bc64a5e..0bb8b0929e7e 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -126,45 +126,45 @@ async def handle_redirect_request( raise NotImplementedError() -@attr.s +@attr.s(auto_attribs=True) class UserAttributes: # the localpart of the mxid that the mapper has assigned to the user. # if `None`, the mapper has not picked a userid, and the user should be prompted to # enter one. - localpart = attr.ib(type=Optional[str]) - display_name = attr.ib(type=Optional[str], default=None) - emails = attr.ib(type=Collection[str], default=attr.Factory(list)) + localpart: Optional[str] + display_name: Optional[str] = None + emails: Collection[str] = attr.Factory(list) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class UsernameMappingSession: """Data we track about SSO sessions""" # A unique identifier for this SSO provider, e.g. "oidc" or "saml". - auth_provider_id = attr.ib(type=str) + auth_provider_id: str # user ID on the IdP server - remote_user_id = attr.ib(type=str) + remote_user_id: str # attributes returned by the ID mapper - display_name = attr.ib(type=Optional[str]) - emails = attr.ib(type=Collection[str]) + display_name: Optional[str] + emails: Collection[str] # An optional dictionary of extra attributes to be provided to the client in the # login response. - extra_login_attributes = attr.ib(type=Optional[JsonDict]) + extra_login_attributes: Optional[JsonDict] # where to redirect the client back to - client_redirect_url = attr.ib(type=str) + client_redirect_url: str # expiry time for the session, in milliseconds - expiry_time_ms = attr.ib(type=int) + expiry_time_ms: int # choices made by the user - chosen_localpart = attr.ib(type=Optional[str], default=None) - use_display_name = attr.ib(type=bool, default=True) - emails_to_use = attr.ib(type=Collection[str], default=()) - terms_accepted_version = attr.ib(type=Optional[str], default=None) + chosen_localpart: Optional[str] = None + use_display_name: bool = True + emails_to_use: Collection[str] = () + terms_accepted_version: Optional[str] = None # the HTTP cookie used to track the mapping session id diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index fbafffd69bd6..203e995bb77d 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -32,9 +32,9 @@ class ProxyConnectError(ConnectError): pass -@attr.s +@attr.s(auto_attribs=True) class ProxyCredentials: - username_password = attr.ib(type=bytes) + username_password: bytes def as_proxy_authorization_value(self) -> bytes: """ diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index deedde0b5b37..2e668363b2f5 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -123,37 +123,37 @@ def finish(self) -> T: pass -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class MatrixFederationRequest: - method = attr.ib(type=str) + method: str """HTTP method """ - path = attr.ib(type=str) + path: str """HTTP path """ - destination = attr.ib(type=str) + destination: str """The remote server to send the HTTP request to. """ - json = attr.ib(default=None, type=Optional[JsonDict]) + json: Optional[JsonDict] = None """JSON to send in the body. """ - json_callback = attr.ib(default=None, type=Optional[Callable[[], JsonDict]]) + json_callback: Optional[Callable[[], JsonDict]] = None """A callback to generate the JSON. """ - query = attr.ib(default=None, type=Optional[dict]) + query: Optional[dict] = None """Query arguments. """ - txn_id = attr.ib(default=None, type=Optional[str]) + txn_id: Optional[str] = None """Unique ID for this request (for logging) """ - uri = attr.ib(init=False, type=bytes) + uri: bytes = attr.ib(init=False) """The URI of this request """ diff --git a/synapse/http/site.py b/synapse/http/site.py index 80f7a2ff58e5..c180a1d3231b 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -534,9 +534,9 @@ def getClientAddress(self) -> IAddress: @implementer(IAddress) -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, auto_attribs=True) class _XForwardedForAddress: - host = attr.ib(type=str) + host: str class SynapseSite(Site): diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index 8202d0494d72..475756f1db64 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -39,7 +39,7 @@ logger = logging.getLogger(__name__) -@attr.s +@attr.s(slots=True, auto_attribs=True) @implementer(IPushProducer) class LogProducer: """ @@ -54,10 +54,10 @@ class LogProducer: # This is essentially ITCPTransport, but that is missing certain fields # (connected and registerProducer) which are part of the implementation. - transport = attr.ib(type=Connection) - _format = attr.ib(type=Callable[[logging.LogRecord], str]) - _buffer = attr.ib(type=deque) - _paused = attr.ib(default=False, type=bool, init=False) + transport: Connection + _format: Callable[[logging.LogRecord], str] + _buffer: Deque[logging.LogRecord] + _paused: bool = attr.ib(default=False, init=False) def pauseProducing(self): self._paused = True diff --git a/synapse/logging/context.py b/synapse/logging/context.py index d4ee89337642..c31c2960ad95 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -193,7 +193,7 @@ def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage": return res -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class ContextRequest: """ A bundle of attributes from the SynapseRequest object. @@ -205,15 +205,15 @@ class ContextRequest: their children. """ - request_id = attr.ib(type=str) - ip_address = attr.ib(type=str) - site_tag = attr.ib(type=str) - requester = attr.ib(type=Optional[str]) - authenticated_entity = attr.ib(type=Optional[str]) - method = attr.ib(type=str) - url = attr.ib(type=str) - protocol = attr.ib(type=str) - user_agent = attr.ib(type=str) + request_id: str + ip_address: str + site_tag: str + requester: Optional[str] + authenticated_entity: Optional[str] + method: str + url: str + protocol: str + user_agent: str LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"] diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 622445e9f477..5672d60de348 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -251,7 +251,7 @@ class BaseReporter: # type: ignore[no-redef] class _WrappedRustReporter(BaseReporter): """Wrap the reporter to ensure `report_span` never throws.""" - _reporter = attr.ib(type=Reporter, default=attr.Factory(Reporter)) + _reporter: Reporter = attr.Factory(Reporter) def set_process(self, *args, **kwargs): return self._reporter.set_process(*args, **kwargs) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index ceef57ad883f..269c2b989e7a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -76,19 +76,17 @@ def collect() -> Iterable[Metric]: yield metric -@attr.s(slots=True, hash=True) +@attr.s(slots=True, hash=True, auto_attribs=True) class LaterGauge: - name = attr.ib(type=str) - desc = attr.ib(type=str) - labels = attr.ib(hash=False, type=Optional[Iterable[str]]) + name: str + desc: str + labels: Optional[Iterable[str]] = attr.ib(hash=False) # callback: should either return a value (if there are no labels for this metric), # or dict mapping from a label tuple to a value - caller = attr.ib( - type=Callable[ - [], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]] - ] - ) + caller: Callable[ + [], Union[Mapping[Tuple[str, ...], Union[int, float]], Union[int, float]] + ] def collect(self) -> Iterable[Metric]: @@ -157,7 +155,9 @@ def __init__( # Create a class which have the sub_metrics values as attributes, which # default to 0 on initialization. Used to pass to registered callbacks. self._metrics_class: Type[MetricsEntry] = attr.make_class( - "_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True + "_MetricsEntry", + attrs={x: attr.ib(default=0) for x in sub_metrics}, + slots=True, ) # Counts number of in flight blocks for a given set of label values diff --git a/synapse/notifier.py b/synapse/notifier.py index bbabdb05875c..41fd94d7724b 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -193,15 +193,15 @@ def __bool__(self): return bool(self.events) -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class _PendingRoomEventEntry: - event_pos = attr.ib(type=PersistedEventPosition) - extra_users = attr.ib(type=Collection[UserID]) + event_pos: PersistedEventPosition + extra_users: Collection[UserID] - room_id = attr.ib(type=str) - type = attr.ib(type=str) - state_key = attr.ib(type=Optional[str]) - membership = attr.ib(type=Optional[str]) + room_id: str + type: str + state_key: Optional[str] + membership: Optional[str] class Notifier: diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 820f6f3f7ec0..5176a1c1861d 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -23,25 +23,25 @@ from synapse.server import HomeServer -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class PusherConfig: """Parameters necessary to configure a pusher.""" - id = attr.ib(type=Optional[str]) - user_name = attr.ib(type=str) - access_token = attr.ib(type=Optional[int]) - profile_tag = attr.ib(type=str) - kind = attr.ib(type=str) - app_id = attr.ib(type=str) - app_display_name = attr.ib(type=str) - device_display_name = attr.ib(type=str) - pushkey = attr.ib(type=str) - ts = attr.ib(type=int) - lang = attr.ib(type=Optional[str]) - data = attr.ib(type=Optional[JsonDict]) - last_stream_ordering = attr.ib(type=int) - last_success = attr.ib(type=Optional[int]) - failing_since = attr.ib(type=Optional[int]) + id: Optional[str] + user_name: str + access_token: Optional[int] + profile_tag: str + kind: str + app_id: str + app_display_name: str + device_display_name: str + pushkey: str + ts: int + lang: Optional[str] + data: Optional[JsonDict] + last_stream_ordering: int + last_success: Optional[int] + failing_since: Optional[int] def as_dict(self) -> Dict[str, Any]: """Information that can be retrieved about a pusher after creation.""" @@ -57,12 +57,12 @@ def as_dict(self) -> Dict[str, Any]: } -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class ThrottleParams: """Parameters for controlling the rate of sending pushes via email.""" - last_sent_ts = attr.ib(type=int) - throttle_ms = attr.ib(type=int) + last_sent_ts: int + throttle_ms: int class Pusher(metaclass=abc.ABCMeta): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 009d8e77b05b..bee660893be2 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -298,7 +298,7 @@ def _condition_checker( StateGroup = Union[object, int] -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class RulesForRoomData: """The data stored in the cache by `RulesForRoom`. @@ -307,29 +307,29 @@ class RulesForRoomData: """ # event_id -> (user_id, state) - member_map = attr.ib(type=MemberMap, factory=dict) + member_map: MemberMap = attr.Factory(dict) # user_id -> rules - rules_by_user = attr.ib(type=RulesByUser, factory=dict) + rules_by_user: RulesByUser = attr.Factory(dict) # The last state group we updated the caches for. If the state_group of # a new event comes along, we know that we can just return the cached # result. # On invalidation of the rules themselves (if the user changes them), # we invalidate everything and set state_group to `object()` - state_group = attr.ib(type=StateGroup, factory=object) + state_group: StateGroup = attr.Factory(object) # A sequence number to keep track of when we're allowed to update the # cache. We bump the sequence number when we invalidate the cache. If # the sequence number changes while we're calculating stuff we should # not update the cache with it. - sequence = attr.ib(type=int, default=0) + sequence: int = 0 # A cache of user_ids that we *know* aren't interesting, e.g. user_ids # owned by AS's, or remote users, etc. (I.e. users we will never need to # calculate push for) # These never need to be invalidated as we will never set up push for # them. - uninteresting_user_set = attr.ib(type=Set[str], factory=set) + uninteresting_user_set: Set[str] = attr.Factory(set) class RulesForRoom: @@ -553,7 +553,7 @@ def update_cache( self.data.state_group = state_group -@attr.attrs(slots=True, frozen=True) +@attr.attrs(slots=True, frozen=True, auto_attribs=True) class _Invalidation: # _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules, # which means that it it is stored on the bulk_get_push_rules cache entry. In order @@ -564,8 +564,8 @@ class _Invalidation: # attrs provides suitable __hash__ and __eq__ methods, provided we remember to # set `frozen=True`. - cache = attr.ib(type=LruCache) - room_id = attr.ib(type=str) + cache: LruCache + room_id: str def __call__(self) -> None: rules_data = self.cache.get(self.room_id, None, update_metrics=False) diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index a390cfcb74d5..4f4f1ad45378 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -50,12 +50,12 @@ """ -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class EventsStreamRow: """A parsed row from the events replication stream""" - type = attr.ib() # str: the TypeId of one of the *EventsStreamRows - data = attr.ib() # BaseEventsStreamRow + type: str # the TypeId of one of the *EventsStreamRows + data: "BaseEventsStreamRow" class BaseEventsStreamRow: @@ -79,28 +79,28 @@ def from_data(cls, data): return cls(*data) -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class EventsStreamEventRow(BaseEventsStreamRow): TypeId = "ev" - event_id = attr.ib(type=str) - room_id = attr.ib(type=str) - type = attr.ib(type=str) - state_key = attr.ib(type=Optional[str]) - redacts = attr.ib(type=Optional[str]) - relates_to = attr.ib(type=Optional[str]) - membership = attr.ib(type=Optional[str]) - rejected = attr.ib(type=bool) + event_id: str + room_id: str + type: str + state_key: Optional[str] + redacts: Optional[str] + relates_to: Optional[str] + membership: Optional[str] + rejected: bool -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class EventsStreamCurrentStateRow(BaseEventsStreamRow): TypeId = "state" - room_id = attr.ib() # str - type = attr.ib() # str - state_key = attr.ib() # str - event_id = attr.ib() # str, optional + room_id: str + type: str + state_key: str + event_id: Optional[str] _EventRows: Tuple[Type[BaseEventsStreamRow], ...] = ( diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index fca239d8c7ec..9f6c251caf21 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -343,7 +343,7 @@ class SpamMediaException(NotFoundError): """ -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class ReadableFileWrapper: """Wrapper that allows reading a file in chunks, yielding to the reactor, and writing to a callback. @@ -354,8 +354,8 @@ class ReadableFileWrapper: CHUNK_SIZE = 2 ** 14 - clock = attr.ib(type=Clock) - path = attr.ib(type=str) + clock: Clock + path: str async def write_chunks_to(self, callback: Callable[[bytes], None]) -> None: """Reads the file in chunks and calls the callback with each chunk.""" diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 69ac8c3423ac..923e31587e4b 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -450,19 +450,19 @@ async def resolve_events( return {key: state_map[ev_id] for key, ev_id in new_state.items()} -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _StateResMetrics: """Keeps track of some usage metrics about state res.""" # System and User CPU time, in seconds - cpu_time = attr.ib(type=float, default=0.0) + cpu_time: float = 0.0 # time spent on database transactions (excluding scheduling time). This roughly # corresponds to the amount of work done on the db server, excluding event fetches. - db_time = attr.ib(type=float, default=0.0) + db_time: float = 0.0 # number of events fetched from the db. - db_events = attr.ib(type=int, default=0) + db_events: int = 0 _biggest_room_by_cpu_counter = Counter( diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 2cacc7dd6c5d..a27cc3605c73 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -143,7 +143,7 @@ def make_conn( return db_conn -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class LoggingDatabaseConnection: """A wrapper around a database connection that returns `LoggingTransaction` as its cursor class. @@ -151,9 +151,9 @@ class LoggingDatabaseConnection: This is mainly used on startup to ensure that queries get logged correctly """ - conn = attr.ib(type=Connection) - engine = attr.ib(type=BaseDatabaseEngine) - default_txn_name = attr.ib(type=str) + conn: Connection + engine: BaseDatabaseEngine + default_txn_name: str def cursor( self, *, txn_name=None, after_callbacks=None, exception_callbacks=None diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 57b5ffbad32b..86cab975639c 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -50,16 +50,16 @@ from synapse.server import HomeServer -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class DeviceKeyLookupResult: """The type returned by get_e2e_device_keys_and_signatures""" - display_name = attr.ib(type=Optional[str]) + display_name: Optional[str] # the key data from e2e_device_keys_json. Typically includes fields like # "algorithm", "keys" (including the curve25519 identity key and the ed25519 signing # key) and "signatures" (a map from (user id) to (key id/device_id) to signature.) - keys = attr.ib(type=Optional[JsonDict]) + keys: Optional[JsonDict] class EndToEndKeyBackgroundStore(SQLBaseStore): diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index dd255aefb9dd..cce230559773 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -69,7 +69,7 @@ ) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class DeltaState: """Deltas to use to update the `current_state_events` table. @@ -80,9 +80,9 @@ class DeltaState: should e.g. be removed from `current_state_events` table. """ - to_delete = attr.ib(type=List[Tuple[str, str]]) - to_insert = attr.ib(type=StateMap[str]) - no_longer_in_room = attr.ib(type=bool, default=False) + to_delete: List[Tuple[str, str]] + to_insert: StateMap[str] + no_longer_in_room: bool = False class PersistEventsStore: @@ -2226,17 +2226,17 @@ def _update_backward_extremeties(self, txn, events): ) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _LinkMap: """A helper type for tracking links between chains.""" # Stores the set of links as nested maps: source chain ID -> target chain ID # -> source sequence number -> target sequence number. - maps = attr.ib(type=Dict[int, Dict[int, Dict[int, int]]], factory=dict) + maps: Dict[int, Dict[int, Dict[int, int]]] = attr.Factory(dict) # Stores the links that have been added (with new set to true), as tuples of # `(source chain ID, source sequence no, target chain ID, target sequence no.)` - additions = attr.ib(type=Set[Tuple[int, int, int, int]], factory=set) + additions: Set[Tuple[int, int, int, int]] = attr.Factory(set) def add_link( self, diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index a68f14ba4824..0a96664caf1b 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -65,22 +65,22 @@ class _BackgroundUpdates: REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column" -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: """Return value for _calculate_chain_cover_txn.""" # The last room_id/depth/stream processed. - room_id = attr.ib(type=str) - depth = attr.ib(type=int) - stream = attr.ib(type=int) + room_id: str + depth: int + stream: int # Number of rows processed - processed_count = attr.ib(type=int) + processed_count: int # Map from room_id to last depth/stream processed for each room that we have # processed all events for (i.e. the rooms we can flip the # `has_auth_chain_index` for) - finished_room_map = attr.ib(type=Dict[str, Tuple[int, int]]) + finished_room_map: Dict[str, Tuple[int, int]] class EventsBackgroundUpdatesStore(SQLBaseStore): diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 4175c82a253c..aac94fa46444 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -51,7 +51,7 @@ class ExternalIDReuseException(Exception): pass -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, auto_attribs=True) class TokenLookupResult: """Result of looking up an access token. @@ -69,14 +69,14 @@ class TokenLookupResult: cached. """ - user_id = attr.ib(type=str) - is_guest = attr.ib(type=bool, default=False) - shadow_banned = attr.ib(type=bool, default=False) - token_id = attr.ib(type=Optional[int], default=None) - device_id = attr.ib(type=Optional[str], default=None) - valid_until_ms = attr.ib(type=Optional[int], default=None) - token_owner = attr.ib(type=str) - token_used = attr.ib(type=bool, default=False) + user_id: str + is_guest: bool = False + shadow_banned: bool = False + token_id: Optional[int] = None + device_id: Optional[str] = None + valid_until_ms: Optional[int] = None + token_owner: str = attr.ib() + token_used: bool = False # Make the token owner default to the user ID, which is the common case. @token_owner.default diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index cda80d651160..4489732fdac4 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1177,18 +1177,18 @@ def f(txn): await self.db_pool.runInteraction("forget_membership", f) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _JoinedHostsCache: """The cached data used by the `_get_joined_hosts_cache`.""" # Dict of host to the set of their users in the room at the state group. - hosts_to_joined_users = attr.ib(type=Dict[str, Set[str]], factory=dict) + hosts_to_joined_users: Dict[str, Set[str]] = attr.Factory(dict) # The state group `hosts_to_joined_users` is derived from. Will be an object # if the instance is newly created or if the state is not based on a state # group. (An object is used as a sentinel value to ensure that it never is # equal to anything else). - state_group = attr.ib(type=Union[object, int], factory=object) + state_group: Union[object, int] = attr.Factory(object) def __len__(self): return sum(len(v) for v in self.hosts_to_joined_users.values()) diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index a1a1a6a14a85..2d339b60083b 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -23,19 +23,19 @@ from synapse.util import json_encoder, stringutils -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class UIAuthSessionData: - session_id = attr.ib(type=str) + session_id: str # The dictionary from the client root level, not the 'auth' key. - clientdict = attr.ib(type=JsonDict) + clientdict: JsonDict # The URI and method the session was intiatied with. These are checked at # each stage of the authentication to ensure that the asked for operation # has not changed. - uri = attr.ib(type=str) - method = attr.ib(type=str) + uri: str + method: str # A string description of the operation that the current authentication is # authorising. - description = attr.ib(type=str) + description: str class UIAuthWorkerStore(SQLBaseStore): diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 540adb878174..71584f3f744b 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -21,7 +21,7 @@ logger = logging.getLogger(__name__) -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class FetchKeyResult: - verify_key = attr.ib(type=VerifyKey) # the key itself - valid_until_ts = attr.ib(type=int) # how long we can use this key for + verify_key: VerifyKey # the key itself + valid_until_ts: int # how long we can use this key for diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index e45adfcb5569..1823e1872049 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -696,7 +696,7 @@ def _get_or_create_schema_state( ) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _DirectoryListing: """Helper class to store schema file name and the absolute path to it. @@ -705,5 +705,5 @@ class _DirectoryListing: `file_name` attr is kept first. """ - file_name = attr.ib(type=str) - absolute_path = attr.ib(type=str) + file_name: str + absolute_path: str diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 10a46b5e82ed..b1536c1ca491 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -23,7 +23,7 @@ logger = logging.getLogger(__name__) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class PaginationChunk: """Returned by relation pagination APIs. @@ -35,9 +35,9 @@ class PaginationChunk: None then there are no previous results. """ - chunk = attr.ib(type=List[JsonDict]) - next_batch = attr.ib(type=Optional[Any], default=None) - prev_batch = attr.ib(type=Optional[Any], default=None) + chunk: List[JsonDict] + next_batch: Optional[Any] = None + prev_batch: Optional[Any] = None def to_dict(self) -> Dict[str, Any]: d = {"chunk": self.chunk} @@ -51,7 +51,7 @@ def to_dict(self) -> Dict[str, Any]: return d -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, auto_attribs=True) class RelationPaginationToken: """Pagination token for relation pagination API. @@ -64,8 +64,8 @@ class RelationPaginationToken: stream: The stream ordering of the boundary event. """ - topological = attr.ib(type=int) - stream = attr.ib(type=int) + topological: int + stream: int @staticmethod def from_string(string: str) -> "RelationPaginationToken": @@ -82,7 +82,7 @@ def as_tuple(self) -> Tuple[Any, ...]: return attr.astuple(self) -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, auto_attribs=True) class AggregationPaginationToken: """Pagination token for relation aggregation pagination API. @@ -94,8 +94,8 @@ class AggregationPaginationToken: stream: The MAX stream ordering in the boundary group. """ - count = attr.ib(type=int) - stream = attr.ib(type=int) + count: int + stream: int @staticmethod def from_string(string: str) -> "AggregationPaginationToken": diff --git a/synapse/storage/state.py b/synapse/storage/state.py index b5ba1560d139..df8b2f108877 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -45,7 +45,7 @@ T = TypeVar("T") -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class StateFilter: """A filter used when querying for state. @@ -58,8 +58,8 @@ class StateFilter: appear in `types`. """ - types = attr.ib(type="frozendict[str, Optional[FrozenSet[str]]]") - include_others = attr.ib(default=False, type=bool) + types: "frozendict[str, Optional[FrozenSet[str]]]" + include_others: bool = False def __attrs_post_init__(self): # If `include_others` is set we canonicalise the filter by removing diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index b8112e1c0551..3c13859faabd 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -762,13 +762,13 @@ async def __aexit__( return self.inner.__exit__(exc_type, exc, tb) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _MultiWriterCtxManager: """Async context manager returned by MultiWriterIdGenerator""" - id_gen = attr.ib(type=MultiWriterIdGenerator) - multiple_ids = attr.ib(type=Optional[int], default=None) - stream_ids = attr.ib(type=List[int], factory=list) + id_gen: MultiWriterIdGenerator + multiple_ids: Optional[int] = None + stream_ids: List[int] = attr.Factory(list) async def __aenter__(self) -> Union[int, List[int]]: # It's safe to run this in autocommit mode as fetching values from a diff --git a/synapse/streams/config.py b/synapse/streams/config.py index c08d591f29d3..b52723e2b89c 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -28,14 +28,14 @@ MAX_LIMIT = 1000 -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class PaginationConfig: """A configuration object which stores pagination parameters.""" - from_token = attr.ib(type=Optional[StreamToken]) - to_token = attr.ib(type=Optional[StreamToken]) - direction = attr.ib(type=str) - limit = attr.ib(type=Optional[int]) + from_token: Optional[StreamToken] + to_token: Optional[StreamToken] + direction: str + limit: Optional[int] @classmethod async def from_request( diff --git a/synapse/types.py b/synapse/types.py index 74a2c5185705..f89fb216a656 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -20,6 +20,7 @@ Any, ClassVar, Dict, + List, Mapping, Match, MutableMapping, @@ -80,7 +81,7 @@ class ISynapseReactor( """The interfaces necessary for Synapse to function.""" -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, auto_attribs=True) class Requester: """ Represents the user making a request @@ -98,13 +99,13 @@ class Requester: "puppeting" the user. """ - user = attr.ib(type="UserID") - access_token_id = attr.ib(type=Optional[int]) - is_guest = attr.ib(type=bool) - shadow_banned = attr.ib(type=bool) - device_id = attr.ib(type=Optional[str]) - app_service = attr.ib(type=Optional["ApplicationService"]) - authenticated_entity = attr.ib(type=str) + user: "UserID" + access_token_id: Optional[int] + is_guest: bool + shadow_banned: bool + device_id: Optional[str] + app_service: Optional["ApplicationService"] + authenticated_entity: str def serialize(self): """Converts self to a type that can be serialized as JSON, and then @@ -211,7 +212,7 @@ def get_localpart_from_id(string: str) -> str: DS = TypeVar("DS", bound="DomainSpecificString") -@attr.s(slots=True, frozen=True, repr=False) +@attr.s(slots=True, frozen=True, repr=False, auto_attribs=True) class DomainSpecificString(metaclass=abc.ABCMeta): """Common base class among ID/name strings that have a local part and a domain name, prefixed with a sigil. @@ -224,8 +225,8 @@ class DomainSpecificString(metaclass=abc.ABCMeta): SIGIL: ClassVar[str] = abc.abstractproperty() # type: ignore - localpart = attr.ib(type=str) - domain = attr.ib(type=str) + localpart: str + domain: str # Because this is a frozen class, it is deeply immutable. def __copy__(self): @@ -461,14 +462,12 @@ class RoomStreamToken: attributes, must be hashable. """ - topological = attr.ib( - type=Optional[int], + topological: Optional[int] = attr.ib( validator=attr.validators.optional(attr.validators.instance_of(int)), ) - stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) + stream: int = attr.ib(validator=attr.validators.instance_of(int)) - instance_map = attr.ib( - type="frozendict[str, int]", + instance_map: "frozendict[str, int]" = attr.ib( factory=frozendict, validator=attr.validators.deep_mapping( key_validator=attr.validators.instance_of(str), @@ -477,7 +476,7 @@ class RoomStreamToken: ), ) - def __attrs_post_init__(self): + def __attrs_post_init__(self) -> None: """Validates that both `topological` and `instance_map` aren't set.""" if self.instance_map and self.topological: @@ -593,7 +592,7 @@ async def to_string(self, store: "DataStore") -> str: return "s%d" % (self.stream,) -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class StreamToken: """A collection of positions within multiple streams. @@ -601,20 +600,20 @@ class StreamToken: must be hashable. """ - room_key = attr.ib( - type=RoomStreamToken, validator=attr.validators.instance_of(RoomStreamToken) + room_key: RoomStreamToken = attr.ib( + validator=attr.validators.instance_of(RoomStreamToken) ) - presence_key = attr.ib(type=int) - typing_key = attr.ib(type=int) - receipt_key = attr.ib(type=int) - account_data_key = attr.ib(type=int) - push_rules_key = attr.ib(type=int) - to_device_key = attr.ib(type=int) - device_list_key = attr.ib(type=int) - groups_key = attr.ib(type=int) + presence_key: int + typing_key: int + receipt_key: int + account_data_key: int + push_rules_key: int + to_device_key: int + device_list_key: int + groups_key: int _SEPARATOR = "_" - START: "StreamToken" + START: ClassVar["StreamToken"] @classmethod async def from_string(cls, store: "DataStore", string: str) -> "StreamToken": @@ -674,7 +673,7 @@ def copy_and_replace(self, key, new_value) -> "StreamToken": StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0) -@attr.s(slots=True, frozen=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class PersistedEventPosition: """Position of a newly persisted event with instance that persisted it. @@ -682,8 +681,8 @@ class PersistedEventPosition: RoomStreamToken. """ - instance_name = attr.ib(type=str) - stream = attr.ib(type=int) + instance_name: str + stream: int def persisted_after(self, token: RoomStreamToken) -> bool: return token.get_stream_pos_for_instance(self.instance_name) < self.stream @@ -733,15 +732,15 @@ def to_string(self) -> str: __str__ = to_string -@attr.s(slots=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class ReadReceipt: """Information about a read-receipt""" - room_id = attr.ib() - receipt_type = attr.ib() - user_id = attr.ib() - event_ids = attr.ib() - data = attr.ib() + room_id: str + receipt_type: str + user_id: str + event_ids: List[str] + data: JsonDict def get_verify_key_from_cross_signing_key(key_info): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 150a04b53e17..3f7299aff7eb 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -309,12 +309,12 @@ def gather_results( # type: ignore[misc] return deferred.addCallback(tuple) -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class _LinearizerEntry: # The number of things executing. - count = attr.ib(type=int) + count: int # Deferreds for the things blocked from executing. - deferreds = attr.ib(type=collections.OrderedDict) + deferreds: collections.OrderedDict class Linearizer: diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index 485ddb189373..d267703df0f2 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -33,7 +33,7 @@ # This class can't be generic because it uses slots with attrs. # See: https://github.com/python-attrs/attrs/issues/313 -@attr.s(slots=True) +@attr.s(slots=True, auto_attribs=True) class DictionaryEntry: # should be: Generic[DKT, DV]. """Returned when getting an entry from the cache @@ -41,14 +41,13 @@ class DictionaryEntry: # should be: Generic[DKT, DV]. full: Whether the cache has the full or dict or just some keys. If not full then not all requested keys will necessarily be present in `value` - known_absent: Keys that were looked up in the dict and were not - there. + known_absent: Keys that were looked up in the dict and were not there. value: The full or partial dict value """ - full = attr.ib(type=bool) - known_absent = attr.ib(type=Set[Any]) # should be: Set[DKT] - value = attr.ib(type=Dict[Any, Any]) # should be: Dict[DKT, DV] + full: bool + known_absent: Set[Any] # should be: Set[DKT] + value: Dict[Any, Any] # should be: Dict[DKT, DV] def __len__(self) -> int: return len(self.value) From 20c6d85c6e8f721b8688b7f1361c7a7bab2449fd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 13 Jan 2022 14:35:52 +0000 Subject: [PATCH 086/474] Simplify GC prometheus metrics (#11723) Rather than hooking into the reactor loop, just add a timed task that runs every 100 ms to do the garbage collection. Part 1 of a quest to simplify the reactor monkey-patching. --- changelog.d/11723.misc | 1 + synapse/app/_base.py | 3 +- synapse/metrics/__init__.py | 162 +--------------------------- synapse/metrics/_gc.py | 203 ++++++++++++++++++++++++++++++++++++ 4 files changed, 209 insertions(+), 160 deletions(-) create mode 100644 changelog.d/11723.misc create mode 100644 synapse/metrics/_gc.py diff --git a/changelog.d/11723.misc b/changelog.d/11723.misc new file mode 100644 index 000000000000..f99e02070a93 --- /dev/null +++ b/changelog.d/11723.misc @@ -0,0 +1 @@ +Simplify calculation of prometheus metrics for garbage collection. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 5fc59c1be11d..579adbbca02d 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -60,7 +60,7 @@ from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.logging.context import PreserveLoggingContext -from synapse.metrics import register_threadpool +from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.types import ISynapseReactor @@ -159,6 +159,7 @@ def run() -> None: change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) + install_gc_manager() run_command() # make sure that we run the reactor with the sentinel log context, diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 269c2b989e7a..ba7ca0f2d4ed 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -13,7 +13,6 @@ # limitations under the License. import functools -import gc import itertools import logging import os @@ -41,7 +40,6 @@ from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric from prometheus_client.core import ( REGISTRY, - CounterMetricFamily, GaugeHistogramMetricFamily, GaugeMetricFamily, ) @@ -56,13 +54,13 @@ generate_latest, start_http_server, ) +from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) METRICS_PREFIX = "/_synapse/metrics" -running_on_pypy = platform.python_implementation() == "PyPy" all_gauges: "Dict[str, Union[LaterGauge, InFlightGauge]]" = {} HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat") @@ -369,121 +367,6 @@ def collect(self) -> Iterable[Metric]: REGISTRY.register(CPUMetrics()) -# -# Python GC metrics -# - -gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) -gc_time = Histogram( - "python_gc_time", - "Time taken to GC (sec)", - ["gen"], - buckets=[ - 0.0025, - 0.005, - 0.01, - 0.025, - 0.05, - 0.10, - 0.25, - 0.50, - 1.00, - 2.50, - 5.00, - 7.50, - 15.00, - 30.00, - 45.00, - 60.00, - ], -) - - -class GCCounts: - def collect(self) -> Iterable[Metric]: - cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"]) - for n, m in enumerate(gc.get_count()): - cm.add_metric([str(n)], m) - - yield cm - - -if not running_on_pypy: - REGISTRY.register(GCCounts()) - - -# -# PyPy GC / memory metrics -# - - -class PyPyGCStats: - def collect(self) -> Iterable[Metric]: - - # @stats is a pretty-printer object with __str__() returning a nice table, - # plus some fields that contain data from that table. - # unfortunately, fields are pretty-printed themselves (i. e. '4.5MB'). - stats = gc.get_stats(memory_pressure=False) # type: ignore - # @s contains same fields as @stats, but as actual integers. - s = stats._s # type: ignore - - # also note that field naming is completely braindead - # and only vaguely correlates with the pretty-printed table. - # >>>> gc.get_stats(False) - # Total memory consumed: - # GC used: 8.7MB (peak: 39.0MB) # s.total_gc_memory, s.peak_memory - # in arenas: 3.0MB # s.total_arena_memory - # rawmalloced: 1.7MB # s.total_rawmalloced_memory - # nursery: 4.0MB # s.nursery_size - # raw assembler used: 31.0kB # s.jit_backend_used - # ----------------------------- - # Total: 8.8MB # stats.memory_used_sum - # - # Total memory allocated: - # GC allocated: 38.7MB (peak: 41.1MB) # s.total_allocated_memory, s.peak_allocated_memory - # in arenas: 30.9MB # s.peak_arena_memory - # rawmalloced: 4.1MB # s.peak_rawmalloced_memory - # nursery: 4.0MB # s.nursery_size - # raw assembler allocated: 1.0MB # s.jit_backend_allocated - # ----------------------------- - # Total: 39.7MB # stats.memory_allocated_sum - # - # Total time spent in GC: 0.073 # s.total_gc_time - - pypy_gc_time = CounterMetricFamily( - "pypy_gc_time_seconds_total", - "Total time spent in PyPy GC", - labels=[], - ) - pypy_gc_time.add_metric([], s.total_gc_time / 1000) - yield pypy_gc_time - - pypy_mem = GaugeMetricFamily( - "pypy_memory_bytes", - "Memory tracked by PyPy allocator", - labels=["state", "class", "kind"], - ) - # memory used by JIT assembler - pypy_mem.add_metric(["used", "", "jit"], s.jit_backend_used) - pypy_mem.add_metric(["allocated", "", "jit"], s.jit_backend_allocated) - # memory used by GCed objects - pypy_mem.add_metric(["used", "", "arenas"], s.total_arena_memory) - pypy_mem.add_metric(["allocated", "", "arenas"], s.peak_arena_memory) - pypy_mem.add_metric(["used", "", "rawmalloced"], s.total_rawmalloced_memory) - pypy_mem.add_metric(["allocated", "", "rawmalloced"], s.peak_rawmalloced_memory) - pypy_mem.add_metric(["used", "", "nursery"], s.nursery_size) - pypy_mem.add_metric(["allocated", "", "nursery"], s.nursery_size) - # totals - pypy_mem.add_metric(["used", "totals", "gc"], s.total_gc_memory) - pypy_mem.add_metric(["allocated", "totals", "gc"], s.total_allocated_memory) - pypy_mem.add_metric(["used", "totals", "gc_peak"], s.peak_memory) - pypy_mem.add_metric(["allocated", "totals", "gc_peak"], s.peak_allocated_memory) - yield pypy_mem - - -if running_on_pypy: - REGISTRY.register(PyPyGCStats()) - # # Twisted reactor metrics @@ -612,14 +495,6 @@ def collect(self) -> Iterable[Metric]: REGISTRY.register(ReactorLastSeenMetric()) -# The minimum time in seconds between GCs for each generation, regardless of the current GC -# thresholds and counts. -MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0) - -# The time (in seconds since the epoch) of the last time we did a GC for each generation. -_last_gc = [0.0, 0.0, 0.0] - - F = TypeVar("F", bound=Callable[..., Any]) @@ -658,34 +533,6 @@ def f(*args: Any, **kwargs: Any) -> Any: global last_ticked last_ticked = end - if running_on_pypy: - return ret - - # Check if we need to do a manual GC (since its been disabled), and do - # one if necessary. Note we go in reverse order as e.g. a gen 1 GC may - # promote an object into gen 2, and we don't want to handle the same - # object multiple times. - threshold = gc.get_threshold() - counts = gc.get_count() - for i in (2, 1, 0): - # We check if we need to do one based on a straightforward - # comparison between the threshold and count. We also do an extra - # check to make sure that we don't a GC too often. - if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]: - if i == 0: - logger.debug("Collecting gc %d", i) - else: - logger.info("Collecting gc %d", i) - - start = time.time() - unreachable = gc.collect(i) - end = time.time() - - _last_gc[i] = end - - gc_time.labels(i).observe(end - start) - gc_unreachable.labels(i).set(unreachable) - return ret return cast(F, f) @@ -701,11 +548,6 @@ def f(*args: Any, **kwargs: Any) -> Any: # runUntilCurrent is called when we have pending calls. It is called once # per iteratation after fd polling. reactor.runUntilCurrent = runUntilCurrentTimer(reactor, reactor.runUntilCurrent) # type: ignore - - # We manually run the GC each reactor tick so that we can get some metrics - # about time spent doing GC, - if not running_on_pypy: - gc.disable() except AttributeError: pass @@ -717,4 +559,6 @@ def f(*args: Any, **kwargs: Any) -> Any: "LaterGauge", "InFlightGauge", "GaugeBucketCollector", + "MIN_TIME_BETWEEN_GCS", + "install_gc_manager", ] diff --git a/synapse/metrics/_gc.py b/synapse/metrics/_gc.py new file mode 100644 index 000000000000..2bc909efa0d3 --- /dev/null +++ b/synapse/metrics/_gc.py @@ -0,0 +1,203 @@ +# Copyright 2015-2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gc +import logging +import platform +import time +from typing import Iterable + +from prometheus_client.core import ( + REGISTRY, + CounterMetricFamily, + Gauge, + GaugeMetricFamily, + Histogram, + Metric, +) + +from twisted.internet import task + +"""Prometheus metrics for garbage collection""" + + +logger = logging.getLogger(__name__) + +# The minimum time in seconds between GCs for each generation, regardless of the current GC +# thresholds and counts. +MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0) + +running_on_pypy = platform.python_implementation() == "PyPy" + +# +# Python GC metrics +# + +gc_unreachable = Gauge("python_gc_unreachable_total", "Unreachable GC objects", ["gen"]) +gc_time = Histogram( + "python_gc_time", + "Time taken to GC (sec)", + ["gen"], + buckets=[ + 0.0025, + 0.005, + 0.01, + 0.025, + 0.05, + 0.10, + 0.25, + 0.50, + 1.00, + 2.50, + 5.00, + 7.50, + 15.00, + 30.00, + 45.00, + 60.00, + ], +) + + +class GCCounts: + def collect(self) -> Iterable[Metric]: + cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"]) + for n, m in enumerate(gc.get_count()): + cm.add_metric([str(n)], m) + + yield cm + + +def install_gc_manager() -> None: + """Disable automatic GC, and replace it with a task that runs every 100ms + + This means that (a) we can limit how often GC runs; (b) we can get some metrics + about GC activity. + + It does nothing on PyPy. + """ + + if running_on_pypy: + return + + REGISTRY.register(GCCounts()) + + gc.disable() + + # The time (in seconds since the epoch) of the last time we did a GC for each generation. + _last_gc = [0.0, 0.0, 0.0] + + def _maybe_gc() -> None: + # Check if we need to do a manual GC (since its been disabled), and do + # one if necessary. Note we go in reverse order as e.g. a gen 1 GC may + # promote an object into gen 2, and we don't want to handle the same + # object multiple times. + threshold = gc.get_threshold() + counts = gc.get_count() + end = time.time() + for i in (2, 1, 0): + # We check if we need to do one based on a straightforward + # comparison between the threshold and count. We also do an extra + # check to make sure that we don't a GC too often. + if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]: + if i == 0: + logger.debug("Collecting gc %d", i) + else: + logger.info("Collecting gc %d", i) + + start = time.time() + unreachable = gc.collect(i) + end = time.time() + + _last_gc[i] = end + + gc_time.labels(i).observe(end - start) + gc_unreachable.labels(i).set(unreachable) + + gc_task = task.LoopingCall(_maybe_gc) + gc_task.start(0.1) + + +# +# PyPy GC / memory metrics +# + + +class PyPyGCStats: + def collect(self) -> Iterable[Metric]: + + # @stats is a pretty-printer object with __str__() returning a nice table, + # plus some fields that contain data from that table. + # unfortunately, fields are pretty-printed themselves (i. e. '4.5MB'). + stats = gc.get_stats(memory_pressure=False) # type: ignore + # @s contains same fields as @stats, but as actual integers. + s = stats._s # type: ignore + + # also note that field naming is completely braindead + # and only vaguely correlates with the pretty-printed table. + # >>>> gc.get_stats(False) + # Total memory consumed: + # GC used: 8.7MB (peak: 39.0MB) # s.total_gc_memory, s.peak_memory + # in arenas: 3.0MB # s.total_arena_memory + # rawmalloced: 1.7MB # s.total_rawmalloced_memory + # nursery: 4.0MB # s.nursery_size + # raw assembler used: 31.0kB # s.jit_backend_used + # ----------------------------- + # Total: 8.8MB # stats.memory_used_sum + # + # Total memory allocated: + # GC allocated: 38.7MB (peak: 41.1MB) # s.total_allocated_memory, s.peak_allocated_memory + # in arenas: 30.9MB # s.peak_arena_memory + # rawmalloced: 4.1MB # s.peak_rawmalloced_memory + # nursery: 4.0MB # s.nursery_size + # raw assembler allocated: 1.0MB # s.jit_backend_allocated + # ----------------------------- + # Total: 39.7MB # stats.memory_allocated_sum + # + # Total time spent in GC: 0.073 # s.total_gc_time + + pypy_gc_time = CounterMetricFamily( + "pypy_gc_time_seconds_total", + "Total time spent in PyPy GC", + labels=[], + ) + pypy_gc_time.add_metric([], s.total_gc_time / 1000) + yield pypy_gc_time + + pypy_mem = GaugeMetricFamily( + "pypy_memory_bytes", + "Memory tracked by PyPy allocator", + labels=["state", "class", "kind"], + ) + # memory used by JIT assembler + pypy_mem.add_metric(["used", "", "jit"], s.jit_backend_used) + pypy_mem.add_metric(["allocated", "", "jit"], s.jit_backend_allocated) + # memory used by GCed objects + pypy_mem.add_metric(["used", "", "arenas"], s.total_arena_memory) + pypy_mem.add_metric(["allocated", "", "arenas"], s.peak_arena_memory) + pypy_mem.add_metric(["used", "", "rawmalloced"], s.total_rawmalloced_memory) + pypy_mem.add_metric(["allocated", "", "rawmalloced"], s.peak_rawmalloced_memory) + pypy_mem.add_metric(["used", "", "nursery"], s.nursery_size) + pypy_mem.add_metric(["allocated", "", "nursery"], s.nursery_size) + # totals + pypy_mem.add_metric(["used", "totals", "gc"], s.total_gc_memory) + pypy_mem.add_metric(["allocated", "totals", "gc"], s.total_allocated_memory) + pypy_mem.add_metric(["used", "totals", "gc_peak"], s.peak_memory) + pypy_mem.add_metric(["allocated", "totals", "gc_peak"], s.peak_allocated_memory) + yield pypy_mem + + +if running_on_pypy: + REGISTRY.register(PyPyGCStats()) From 0c40c619aa8c8240ab75970e195fe73695df978b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 Jan 2022 10:45:28 -0500 Subject: [PATCH 087/474] Include bundled aggregations in the sync response cache. (#11659) --- changelog.d/11659.bugfix | 1 + synapse/handlers/sync.py | 10 ++++++++++ synapse/rest/client/sync.py | 17 +++-------------- tests/rest/client/test_relations.py | 10 +++++----- 4 files changed, 19 insertions(+), 19 deletions(-) create mode 100644 changelog.d/11659.bugfix diff --git a/changelog.d/11659.bugfix b/changelog.d/11659.bugfix new file mode 100644 index 000000000000..842f6892fda3 --- /dev/null +++ b/changelog.d/11659.bugfix @@ -0,0 +1 @@ +Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4b3f1ea0595c..e1df9b310635 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -98,6 +98,9 @@ class TimelineBatch: prev_batch: StreamToken events: List[EventBase] limited: bool + # A mapping of event ID to the bundled aggregations for the above events. + # This is only calculated if limited is true. + bundled_aggregations: Optional[Dict[str, Dict[str, Any]]] = None def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used @@ -630,10 +633,17 @@ async def _load_filtered_recents( prev_batch_token = now_token.copy_and_replace("room_key", room_key) + # Don't bother to bundle aggregations if the timeline is unlimited, + # as clients will have all the necessary information. + bundled_aggregations = None + if limited or newly_joined_room: + bundled_aggregations = await self.store.get_bundled_aggregations(recents) + return TimelineBatch( events=recents, prev_batch=prev_batch_token, limited=limited or newly_joined_room, + bundled_aggregations=bundled_aggregations, ) async def get_state_after_event( diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index a3e57e4b20ab..d20ae1421e19 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -554,20 +554,9 @@ def serialize( ) serialized_state = serialize(state_events) - # Don't bother to bundle aggregations if the timeline is unlimited, - # as clients will have all the necessary information. - # bundle_aggregations=room.timeline.limited, - # - # richvdh 2021-12-15: disable this temporarily as it has too high an - # overhead for initialsyncs. We need to figure out a way that the - # bundling can be done *before* the events are stored in the - # SyncResponseCache so that this part can be synchronous. - # - # Ensure to re-enable the test at tests/rest/client/test_relations.py::RelationsTestCase.test_bundled_aggregations. - # if room.timeline.limited: - # aggregations = await self.store.get_bundled_aggregations(timeline_events) - aggregations = None - serialized_timeline = serialize(timeline_events, aggregations) + serialized_timeline = serialize( + timeline_events, room.timeline.bundled_aggregations + ) account_data = room.account_data diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index ff4e81d0695f..ee26751430f0 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -572,11 +572,11 @@ def _find_and_assert_event(events): assert_bundle(channel.json_body["event"]["unsigned"].get("m.relations")) # Request sync. - # channel = self.make_request("GET", "/sync", access_token=self.user_token) - # self.assertEquals(200, channel.code, channel.json_body) - # room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - # self.assertTrue(room_timeline["limited"]) - # _find_and_assert_event(room_timeline["events"]) + channel = self.make_request("GET", "/sync", access_token=self.user_token) + self.assertEquals(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + _find_and_assert_event(room_timeline["events"]) # Note that /relations is tested separately in test_aggregation_get_event_for_thread # since it needs different data configured. From 5ff5f17377432645f65bc614228783cccb69d20c Mon Sep 17 00:00:00 2001 From: Andy Balaam Date: Thu, 13 Jan 2022 16:33:37 +0000 Subject: [PATCH 088/474] Mention python3-venv and libpq-dev dependencies in contribution guide (#11740) --- changelog.d/11740.doc | 1 + docs/development/contributing_guide.md | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11740.doc diff --git a/changelog.d/11740.doc b/changelog.d/11740.doc new file mode 100644 index 000000000000..dce080a5e9a1 --- /dev/null +++ b/changelog.d/11740.doc @@ -0,0 +1 @@ +Mention python3-venv and libpq-dev dependencies in contribution guide. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 121f7c0687f5..c14298169351 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -20,7 +20,9 @@ recommended for development. More information about WSL can be found at . Running Synapse natively on Windows is not officially supported. -The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://wiki.python.org/moin/BeginnersGuide/Download). +The code of Synapse is written in Python 3. To do pretty much anything, you'll need [a recent version of Python 3](https://www.python.org/downloads/). Your Python also needs support for [virtual environments](https://docs.python.org/3/library/venv.html). This is usually built-in, but some Linux distributions like Debian and Ubuntu split it out into its own package. Running `sudo apt install python3-venv` should be enough. + +Synapse can connect to PostgreSQL via the [psycopg2](https://pypi.org/project/psycopg2/) Python library. Building this library from source requires access to PostgreSQL's C header files. On Debian or Ubuntu Linux, these can be installed with `sudo apt install libpq-dev`. The source code of Synapse is hosted on GitHub. You will also need [a recent version of git](https://github.com/git-guides/install-git). From b9632046fb79a3910cda453d0d173c8ee5d39f7b Mon Sep 17 00:00:00 2001 From: qwertyforce Date: Thu, 13 Jan 2022 20:09:15 +0300 Subject: [PATCH 089/474] update room spec url in config files (#11739) * change spec url in config files * Create 11739.txt * .txt -> .doc --- changelog.d/11739.doc | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11739.doc diff --git a/changelog.d/11739.doc b/changelog.d/11739.doc new file mode 100644 index 000000000000..3d64f473f535 --- /dev/null +++ b/changelog.d/11739.doc @@ -0,0 +1 @@ +Update room spec url in config files. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 26894fae349a..907e067e5163 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -164,7 +164,7 @@ presence: # The default room version for newly created rooms. # # Known room versions are listed here: -# https://matrix.org/docs/spec/#complete-list-of-room-versions +# https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions # # For example, for room version 1, default_room_version should be set # to "1". diff --git a/synapse/config/server.py b/synapse/config/server.py index 2c2b461cac2e..5010266b6983 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -883,7 +883,7 @@ def generate_config_section( # The default room version for newly created rooms. # # Known room versions are listed here: - # https://matrix.org/docs/spec/#complete-list-of-room-versions + # https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions # # For example, for room version 1, default_room_version should be set # to "1". From b602ba194bf9d8066be5fabd403f6d60f5b9883a Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 13 Jan 2022 18:12:18 +0000 Subject: [PATCH 090/474] Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. (#11730) Co-authored-by: David Robertson --- changelog.d/11730.bugfix | 1 + synapse/storage/databases/main/devices.py | 94 ++++++++++++++---- tests/storage/test_devices.py | 112 +++++++++++++++++++++- 3 files changed, 190 insertions(+), 17 deletions(-) create mode 100644 changelog.d/11730.bugfix diff --git a/changelog.d/11730.bugfix b/changelog.d/11730.bugfix new file mode 100644 index 000000000000..a0bd7dd1a3ba --- /dev/null +++ b/changelog.d/11730.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. \ No newline at end of file diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 324bd5f879d3..bc7e8760470f 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -191,7 +191,7 @@ async def get_devices_by_auth_provider_session_id( @trace async def get_device_updates_by_remote( self, destination: str, from_stream_id: int, limit: int - ) -> Tuple[int, List[Tuple[str, dict]]]: + ) -> Tuple[int, List[Tuple[str, JsonDict]]]: """Get a stream of device updates to send to the given remote server. Args: @@ -200,9 +200,10 @@ async def get_device_updates_by_remote( limit: Maximum number of device updates to return Returns: - A mapping from the current stream id (ie, the stream id of the last - update included in the response), and the list of updates, where - each update is a pair of EDU type and EDU contents. + - The current stream id (i.e. the stream id of the last update included + in the response); and + - The list of updates, where each update is a pair of EDU type and + EDU contents. """ now_stream_id = self.get_device_stream_token() @@ -221,6 +222,9 @@ async def get_device_updates_by_remote( limit, ) + # We need to ensure `updates` doesn't grow too big. + # Currently: `len(updates) <= limit`. + # Return an empty list if there are no updates if not updates: return now_stream_id, [] @@ -277,16 +281,43 @@ async def get_device_updates_by_remote( query_map = {} cross_signing_keys_by_user = {} for user_id, device_id, update_stream_id, update_context in updates: - if ( + # Calculate the remaining length budget. + # Note that, for now, each entry in `cross_signing_keys_by_user` + # gives rise to two device updates in the result, so those cost twice + # as much (and are the whole reason we need to separately calculate + # the budget; we know len(updates) <= limit otherwise!) + # N.B. len() on dicts is cheap since they store their size. + remaining_length_budget = limit - ( + len(query_map) + 2 * len(cross_signing_keys_by_user) + ) + assert remaining_length_budget >= 0 + + is_master_key_update = ( user_id in master_key_by_user and device_id == master_key_by_user[user_id]["device_id"] - ): - result = cross_signing_keys_by_user.setdefault(user_id, {}) - result["master_key"] = master_key_by_user[user_id]["key_info"] - elif ( + ) + is_self_signing_key_update = ( user_id in self_signing_key_by_user and device_id == self_signing_key_by_user[user_id]["device_id"] + ) + + is_cross_signing_key_update = ( + is_master_key_update or is_self_signing_key_update + ) + + if ( + is_cross_signing_key_update + and user_id not in cross_signing_keys_by_user ): + # This will give rise to 2 device updates. + # If we don't have the budget, stop here! + if remaining_length_budget < 2: + break + + if is_master_key_update: + result = cross_signing_keys_by_user.setdefault(user_id, {}) + result["master_key"] = master_key_by_user[user_id]["key_info"] + elif is_self_signing_key_update: result = cross_signing_keys_by_user.setdefault(user_id, {}) result["self_signing_key"] = self_signing_key_by_user[user_id][ "key_info" @@ -294,23 +325,44 @@ async def get_device_updates_by_remote( else: key = (user_id, device_id) + if key not in query_map and remaining_length_budget < 1: + # We don't have space for a new entry + break + previous_update_stream_id, _ = query_map.get(key, (0, None)) if update_stream_id > previous_update_stream_id: + # FIXME If this overwrites an older update, this discards the + # previous OpenTracing context. + # It might make it harder to track down issues using OpenTracing. + # If there's a good reason why it doesn't matter, a comment here + # about that would not hurt. query_map[key] = (update_stream_id, update_context) + # As this update has been added to the response, advance the stream + # position. last_processed_stream_id = update_stream_id + # In the worst case scenario, each update is for a distinct user and is + # added either to the query_map or to cross_signing_keys_by_user, + # but not both: + # len(query_map) + len(cross_signing_keys_by_user) <= len(updates) here, + # so len(query_map) + len(cross_signing_keys_by_user) <= limit. + results = await self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) - # add the updated cross-signing keys to the results list + # len(results) <= len(query_map) here, + # so len(results) + len(cross_signing_keys_by_user) <= limit. + + # Add the updated cross-signing keys to the results list for user_id, result in cross_signing_keys_by_user.items(): result["user_id"] = user_id results.append(("m.signing_key_update", result)) # also send the unstable version # FIXME: remove this when enough servers have upgraded + # and remove the length budgeting above. results.append(("org.matrix.signing_key_update", result)) return last_processed_stream_id, results @@ -322,7 +374,7 @@ def _get_device_updates_by_remote_txn( from_stream_id: int, now_stream_id: int, limit: int, - ): + ) -> List[Tuple[str, str, int, Optional[str]]]: """Return device update information for a given remote destination Args: @@ -333,7 +385,11 @@ def _get_device_updates_by_remote_txn( limit: Maximum number of device updates to return Returns: - List: List of device updates + List: List of device update tuples: + - user_id + - device_id + - stream_id + - opentracing_context """ # get the list of device updates that need to be sent sql = """ @@ -357,15 +413,21 @@ async def _get_device_update_edus_by_remote( Args: destination: The host the device updates are intended for from_stream_id: The minimum stream_id to filter updates by, exclusive - query_map (Dict[(str, str): (int, str|None)]): Dictionary mapping - user_id/device_id to update stream_id and the relevant json-encoded - opentracing context + query_map: Dictionary mapping (user_id, device_id) to + (update stream_id, the relevant json-encoded opentracing context) Returns: - List of objects representing an device update EDU + List of objects representing a device update EDU. + + Postconditions: + The returned list has a length not exceeding that of the query_map: + len(result) <= len(query_map) """ devices = ( await self.get_e2e_device_keys_and_signatures( + # Because these are (user_id, device_id) tuples with all + # device_ids not being None, the returned list's length will not + # exceed that of query_map. query_map.keys(), include_all_devices=True, include_deleted_devices=True, diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index 5cd7f6bb7ab2..b547bf8d9978 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -125,7 +125,7 @@ def test_get_device_updates_by_remote_can_limit_properly(self): self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"]) ) - # Get all device updates ever meant for this remote + # Get device updates meant for this remote next_stream_id, device_updates = self.get_success( self.store.get_device_updates_by_remote("somehost", -1, limit=3) ) @@ -155,6 +155,116 @@ def test_get_device_updates_by_remote_can_limit_properly(self): # Check the newly-added device_ids are contained within these updates self._check_devices_in_updates(device_ids, device_updates) + # Check there are no more device updates left. + _, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3) + ) + self.assertEqual(device_updates, []) + + def test_get_device_updates_by_remote_cross_signing_key_updates( + self, + ) -> None: + """ + Tests that `get_device_updates_by_remote` limits the length of the return value + properly when cross-signing key updates are present. + Current behaviour is that the cross-signing key updates will always come in pairs, + even if that means leaving an earlier batch one EDU short of the limit. + """ + + assert self.hs.is_mine_id( + "@user_id:test" + ), "Test not valid: this MXID should be considered local" + + self.get_success( + self.store.set_e2e_cross_signing_key( + "@user_id:test", + "master", + { + "keys": { + "ed25519:fakeMaster": "aaafakefakefake1AAAAAAAAAAAAAAAAAAAAAAAAAAA=" + }, + "signatures": { + "@user_id:test": { + "ed25519:fake2": "aaafakefakefake2AAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + }, + }, + ) + ) + self.get_success( + self.store.set_e2e_cross_signing_key( + "@user_id:test", + "self_signing", + { + "keys": { + "ed25519:fakeSelfSigning": "aaafakefakefake3AAAAAAAAAAAAAAAAAAAAAAAAAAA=" + }, + "signatures": { + "@user_id:test": { + "ed25519:fake4": "aaafakefakefake4AAAAAAAAAAAAAAAAAAAAAAAAAAA=" + } + }, + }, + ) + ) + + # Add some device updates with sequential `stream_id`s + # Note that the public cross-signing keys occupy the same space as device IDs, + # so also notify that those have updated. + device_ids = [ + "device_id1", + "device_id2", + "fakeMaster", + "fakeSelfSigning", + ] + + self.get_success( + self.store.add_device_change_to_streams( + "@user_id:test", device_ids, ["somehost"] + ) + ) + + # Get device updates meant for this remote + next_stream_id, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", -1, limit=3) + ) + + # Here we expect the device updates for `device_id1` and `device_id2`. + # That means we only receive 2 updates this time around. + # If we had a higher limit, we would expect to see the pair of + # (unstable-prefixed & unprefixed) signing key updates for the device + # represented by `fakeMaster` and `fakeSelfSigning`. + # Our implementation only sends these two variants together, so we get + # a short batch. + self.assertEqual(len(device_updates), 2, device_updates) + + # Check the first two devices (device_id1, device_id2) came out. + self._check_devices_in_updates(device_ids[:2], device_updates) + + # Get more device updates meant for this remote + next_stream_id, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3) + ) + + # The next 2 updates should be a cross-signing key update + # (the master key update and the self-signing key update are combined into + # one 'signing key update', but the cross-signing key update is emitted + # twice, once with an unprefixed type and once again with an unstable-prefixed type) + # (This is a temporary arrangement for backwards compatibility!) + self.assertEqual(len(device_updates), 2, device_updates) + self.assertEqual( + device_updates[0][0], "m.signing_key_update", device_updates[0] + ) + self.assertEqual( + device_updates[1][0], "org.matrix.signing_key_update", device_updates[1] + ) + + # Check there are no more device updates left. + _, device_updates = self.get_success( + self.store.get_device_updates_by_remote("somehost", next_stream_id, limit=3) + ) + self.assertEqual(device_updates, []) + def _check_devices_in_updates(self, expected_device_ids, device_updates): """Check that an specific device ids exist in a list of device update EDUs""" self.assertEqual(len(device_updates), len(expected_device_ids)) From 4ca8fcdd5ac0af0afc14c25e04035ad811a46096 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 13 Jan 2022 18:12:59 +0000 Subject: [PATCH 091/474] Invite PR submitters to credit themselves (#11744) Co-authored-by: Brendan Abolivier --- .github/PULL_REQUEST_TEMPLATE.md | 1 + changelog.d/11744.misc | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/11744.misc diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6c3a99849925..0dfab4e087cf 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,6 +8,7 @@ - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. + - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [ ] Pull request includes a [sign off](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#sign-off) * [ ] [Code style](https://matrix-org.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) diff --git a/changelog.d/11744.misc b/changelog.d/11744.misc new file mode 100644 index 000000000000..b7df14657ac1 --- /dev/null +++ b/changelog.d/11744.misc @@ -0,0 +1 @@ +Invite PR authors to give themselves credit in the changelog. From d70169bf9b4b4a2e8369ae7bd5af798cb8945fa3 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 13 Jan 2022 20:19:10 +0000 Subject: [PATCH 092/474] Fix missing app variable in mail subject (#11745) documentation claims that you can use the %(app)s variable in password_reset and email_validation subjects, but if you do you end up with an error 500 Co-authored-by: br4nnigan <10244835+br4nnigan@users.noreply.github.com> --- changelog.d/11710.bugfix | 1 + changelog.d/11745.bugfix | 1 + synapse/push/mailer.py | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11710.bugfix create mode 100644 changelog.d/11745.bugfix diff --git a/changelog.d/11710.bugfix b/changelog.d/11710.bugfix new file mode 100644 index 000000000000..6521a37f6eb9 --- /dev/null +++ b/changelog.d/11710.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. diff --git a/changelog.d/11745.bugfix b/changelog.d/11745.bugfix new file mode 100644 index 000000000000..6521a37f6eb9 --- /dev/null +++ b/changelog.d/11745.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index ff904c2b4a2f..dadfc574134c 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -178,7 +178,7 @@ async def send_registration_mail( await self.send_email( email_address, self.email_subjects.email_validation - % {"server_name": self.hs.config.server.server_name}, + % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, template_vars, ) @@ -209,7 +209,7 @@ async def send_add_threepid_mail( await self.send_email( email_address, self.email_subjects.email_validation - % {"server_name": self.hs.config.server.server_name}, + % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, template_vars, ) From 3e0536cd2afb5a640619bd872fc27b068ec3eb9b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 Jan 2022 19:44:18 -0500 Subject: [PATCH 093/474] Replace uses of simple_insert_many with simple_insert_many_values. (#11742) This should be (slightly) more efficient and it is simpler to have a single method for inserting multiple values. --- changelog.d/11742.misc | 1 + synapse/rest/admin/background_updates.py | 44 +++---- synapse/storage/database.py | 54 +------- .../storage/databases/main/account_data.py | 4 +- synapse/storage/databases/main/deviceinbox.py | 30 +++-- synapse/storage/databases/main/devices.py | 37 +++--- synapse/storage/databases/main/directory.py | 6 +- .../storage/databases/main/e2e_room_keys.py | 34 +++-- .../storage/databases/main/end_to_end_keys.py | 42 +++--- .../databases/main/event_push_actions.py | 21 ++- synapse/storage/databases/main/events.py | 122 +++++++++--------- .../databases/main/events_bg_updates.py | 30 ++--- synapse/storage/databases/main/presence.py | 33 +++-- synapse/storage/databases/main/pusher.py | 8 +- .../storage/databases/main/user_directory.py | 12 +- synapse/storage/databases/state/bg_updates.py | 15 ++- synapse/storage/databases/state/store.py | 27 +--- tests/rest/admin/test_registration_tokens.py | 15 +-- tests/storage/test_event_federation.py | 26 ++-- 19 files changed, 263 insertions(+), 298 deletions(-) create mode 100644 changelog.d/11742.misc diff --git a/changelog.d/11742.misc b/changelog.d/11742.misc new file mode 100644 index 000000000000..f65ccdf30a5f --- /dev/null +++ b/changelog.d/11742.misc @@ -0,0 +1 @@ +Minor efficiency improvements when inserting many values into the database. diff --git a/synapse/rest/admin/background_updates.py b/synapse/rest/admin/background_updates.py index 6ec00ce0b9a8..e9bce22a347b 100644 --- a/synapse/rest/admin/background_updates.py +++ b/synapse/rest/admin/background_updates.py @@ -123,34 +123,25 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: job_name = body["job_name"] if job_name == "populate_stats_process_rooms": - jobs = [ - { - "update_name": "populate_stats_process_rooms", - "progress_json": "{}", - }, - ] + jobs = [("populate_stats_process_rooms", "{}", "")] elif job_name == "regenerate_directory": jobs = [ - { - "update_name": "populate_user_directory_createtables", - "progress_json": "{}", - "depends_on": "", - }, - { - "update_name": "populate_user_directory_process_rooms", - "progress_json": "{}", - "depends_on": "populate_user_directory_createtables", - }, - { - "update_name": "populate_user_directory_process_users", - "progress_json": "{}", - "depends_on": "populate_user_directory_process_rooms", - }, - { - "update_name": "populate_user_directory_cleanup", - "progress_json": "{}", - "depends_on": "populate_user_directory_process_users", - }, + ("populate_user_directory_createtables", "{}", ""), + ( + "populate_user_directory_process_rooms", + "{}", + "populate_user_directory_createtables", + ), + ( + "populate_user_directory_process_users", + "{}", + "populate_user_directory_process_rooms", + ), + ( + "populate_user_directory_cleanup", + "{}", + "populate_user_directory_process_users", + ), ] else: raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid job_name") @@ -158,6 +149,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: try: await self._store.db_pool.simple_insert_many( table="background_updates", + keys=("update_name", "progress_json", "depends_on"), values=jobs, desc=f"admin_api_run_{job_name}", ) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a27cc3605c73..57cc1d76e02f 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -934,56 +934,6 @@ def simple_insert_txn( txn.execute(sql, vals) async def simple_insert_many( - self, table: str, values: List[Dict[str, Any]], desc: str - ) -> None: - """Executes an INSERT query on the named table. - - The input is given as a list of dicts, with one dict per row. - Generally simple_insert_many_values should be preferred for new code. - - Args: - table: string giving the table name - values: dict of new column names and values for them - desc: description of the transaction, for logging and metrics - """ - await self.runInteraction(desc, self.simple_insert_many_txn, table, values) - - @staticmethod - def simple_insert_many_txn( - txn: LoggingTransaction, table: str, values: List[Dict[str, Any]] - ) -> None: - """Executes an INSERT query on the named table. - - The input is given as a list of dicts, with one dict per row. - Generally simple_insert_many_values_txn should be preferred for new code. - - Args: - txn: The transaction to use. - table: string giving the table name - values: dict of new column names and values for them - """ - if not values: - return - - # This is a *slight* abomination to get a list of tuples of key names - # and a list of tuples of value names. - # - # i.e. [{"a": 1, "b": 2}, {"c": 3, "d": 4}] - # => [("a", "b",), ("c", "d",)] and [(1, 2,), (3, 4,)] - # - # The sort is to ensure that we don't rely on dictionary iteration - # order. - keys, vals = zip( - *(zip(*(sorted(i.items(), key=lambda kv: kv[0]))) for i in values if i) - ) - - for k in keys: - if k != keys[0]: - raise RuntimeError("All items must have the same keys") - - return DatabasePool.simple_insert_many_values_txn(txn, table, keys[0], vals) - - async def simple_insert_many_values( self, table: str, keys: Collection[str], @@ -1002,11 +952,11 @@ async def simple_insert_many_values( desc: description of the transaction, for logging and metrics """ await self.runInteraction( - desc, self.simple_insert_many_values_txn, table, keys, values + desc, self.simple_insert_many_txn, table, keys, values ) @staticmethod - def simple_insert_many_values_txn( + def simple_insert_many_txn( txn: LoggingTransaction, table: str, keys: Collection[str], diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 93db71d1b489..ef475e18c788 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -536,9 +536,9 @@ def _add_account_data_for_user( self.db_pool.simple_insert_many_txn( txn, table="ignored_users", + keys=("ignorer_user_id", "ignored_user_id"), values=[ - {"ignorer_user_id": user_id, "ignored_user_id": u} - for u in currently_ignored_users - previously_ignored_users + (user_id, u) for u in currently_ignored_users - previously_ignored_users ], ) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 3682cb6a8139..4eca97189bef 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -432,14 +432,21 @@ def add_messages_txn(txn, now_ms, stream_id): self.db_pool.simple_insert_many_txn( txn, table="device_federation_outbox", + keys=( + "destination", + "stream_id", + "queued_ts", + "messages_json", + "instance_name", + ), values=[ - { - "destination": destination, - "stream_id": stream_id, - "queued_ts": now_ms, - "messages_json": json_encoder.encode(edu), - "instance_name": self._instance_name, - } + ( + destination, + stream_id, + now_ms, + json_encoder.encode(edu), + self._instance_name, + ) for destination, edu in remote_messages_by_destination.items() ], ) @@ -571,14 +578,9 @@ def _add_messages_to_local_device_inbox_txn( self.db_pool.simple_insert_many_txn( txn, table="device_inbox", + keys=("user_id", "device_id", "stream_id", "message_json", "instance_name"), values=[ - { - "user_id": user_id, - "device_id": device_id, - "stream_id": stream_id, - "message_json": message_json, - "instance_name": self._instance_name, - } + (user_id, device_id, stream_id, message_json, self._instance_name) for user_id, messages_by_device in local_by_user_then_device.items() for device_id, message_json in messages_by_device.items() ], diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 52fbf50db64f..8748654b5598 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1386,12 +1386,9 @@ def _update_remote_device_list_cache_txn( self.db_pool.simple_insert_many_txn( txn, table="device_lists_remote_cache", + keys=("user_id", "device_id", "content"), values=[ - { - "user_id": user_id, - "device_id": content["device_id"], - "content": json_encoder.encode(content), - } + (user_id, content["device_id"], json_encoder.encode(content)) for content in devices ], ) @@ -1479,8 +1476,9 @@ def _add_device_change_to_stream_txn( self.db_pool.simple_insert_many_txn( txn, table="device_lists_stream", + keys=("stream_id", "user_id", "device_id"), values=[ - {"stream_id": stream_id, "user_id": user_id, "device_id": device_id} + (stream_id, user_id, device_id) for stream_id, device_id in zip(stream_ids, device_ids) ], ) @@ -1507,18 +1505,27 @@ def _add_device_outbound_poke_to_stream_txn( self.db_pool.simple_insert_many_txn( txn, table="device_lists_outbound_pokes", + keys=( + "destination", + "stream_id", + "user_id", + "device_id", + "sent", + "ts", + "opentracing_context", + ), values=[ - { - "destination": destination, - "stream_id": next(next_stream_id), - "user_id": user_id, - "device_id": device_id, - "sent": False, - "ts": now, - "opentracing_context": json_encoder.encode(context) + ( + destination, + next(next_stream_id), + user_id, + device_id, + False, + now, + json_encoder.encode(context) if whitelisted_homeserver(destination) else "{}", - } + ) for destination in hosts for device_id in device_ids ], diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index f76c6121e8a9..5903fdaf007a 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -112,10 +112,8 @@ def alias_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_insert_many_txn( txn, table="room_alias_servers", - values=[ - {"room_alias": room_alias.to_string(), "server": server} - for server in servers - ], + keys=("room_alias", "server"), + values=[(room_alias.to_string(), server) for server in servers], ) self._invalidate_cache_and_stream( diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 0cb48b9dd750..b789a588a54b 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -110,16 +110,16 @@ async def add_e2e_room_keys( values = [] for (room_id, session_id, room_key) in room_keys: values.append( - { - "user_id": user_id, - "version": version_int, - "room_id": room_id, - "session_id": session_id, - "first_message_index": room_key["first_message_index"], - "forwarded_count": room_key["forwarded_count"], - "is_verified": room_key["is_verified"], - "session_data": json_encoder.encode(room_key["session_data"]), - } + ( + user_id, + version_int, + room_id, + session_id, + room_key["first_message_index"], + room_key["forwarded_count"], + room_key["is_verified"], + json_encoder.encode(room_key["session_data"]), + ) ) log_kv( { @@ -131,7 +131,19 @@ async def add_e2e_room_keys( ) await self.db_pool.simple_insert_many( - table="e2e_room_keys", values=values, desc="add_e2e_room_keys" + table="e2e_room_keys", + keys=( + "user_id", + "version", + "room_id", + "session_id", + "first_message_index", + "forwarded_count", + "is_verified", + "session_data", + ), + values=values, + desc="add_e2e_room_keys", ) @trace diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 86cab975639c..1f8447b5076f 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -387,15 +387,16 @@ def _add_e2e_one_time_keys(txn: LoggingTransaction) -> None: self.db_pool.simple_insert_many_txn( txn, table="e2e_one_time_keys_json", + keys=( + "user_id", + "device_id", + "algorithm", + "key_id", + "ts_added_ms", + "key_json", + ), values=[ - { - "user_id": user_id, - "device_id": device_id, - "algorithm": algorithm, - "key_id": key_id, - "ts_added_ms": time_now, - "key_json": json_bytes, - } + (user_id, device_id, algorithm, key_id, time_now, json_bytes) for algorithm, key_id, json_bytes in new_keys ], ) @@ -1186,15 +1187,22 @@ async def store_e2e_cross_signing_signatures( """ await self.db_pool.simple_insert_many( "e2e_cross_signing_signatures", - [ - { - "user_id": user_id, - "key_id": item.signing_key_id, - "target_user_id": item.target_user_id, - "target_device_id": item.target_device_id, - "signature": item.signature, - } + keys=( + "user_id", + "key_id", + "target_user_id", + "target_device_id", + "signature", + ), + values=[ + ( + user_id, + item.signing_key_id, + item.target_user_id, + item.target_device_id, + item.signature, + ) for item in signatures ], - "add_e2e_signing_key", + desc="add_e2e_signing_key", ) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index a98e6b259378..b7c4c62222bd 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -875,14 +875,21 @@ def _rotate_notifs_before_txn( self.db_pool.simple_insert_many_txn( txn, table="event_push_summary", + keys=( + "user_id", + "room_id", + "notif_count", + "unread_count", + "stream_ordering", + ), values=[ - { - "user_id": user_id, - "room_id": room_id, - "notif_count": summary.notif_count, - "unread_count": summary.unread_count, - "stream_ordering": summary.stream_ordering, - } + ( + user_id, + room_id, + summary.notif_count, + summary.unread_count, + summary.stream_ordering, + ) for ((user_id, room_id), summary) in summaries.items() if summary.old_user_id is None ], diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index cce230559773..de3b48524b13 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -442,12 +442,9 @@ def _persist_event_auth_chain_txn( self.db_pool.simple_insert_many_txn( txn, table="event_auth", + keys=("event_id", "room_id", "auth_id"), values=[ - { - "event_id": event.event_id, - "room_id": event.room_id, - "auth_id": auth_id, - } + (event.event_id, event.room_id, auth_id) for event in events for auth_id in event.auth_event_ids() if event.is_state() @@ -675,8 +672,9 @@ def _add_chain_cover_index( db_pool.simple_insert_many_txn( txn, table="event_auth_chains", + keys=("event_id", "chain_id", "sequence_number"), values=[ - {"event_id": event_id, "chain_id": c_id, "sequence_number": seq} + (event_id, c_id, seq) for event_id, (c_id, seq) in new_chain_tuples.items() ], ) @@ -782,13 +780,14 @@ def _add_chain_cover_index( db_pool.simple_insert_many_txn( txn, table="event_auth_chain_links", + keys=( + "origin_chain_id", + "origin_sequence_number", + "target_chain_id", + "target_sequence_number", + ), values=[ - { - "origin_chain_id": source_id, - "origin_sequence_number": source_seq, - "target_chain_id": target_id, - "target_sequence_number": target_seq, - } + (source_id, source_seq, target_id, target_seq) for ( source_id, source_seq, @@ -943,20 +942,28 @@ def _persist_transaction_ids_txn( txn_id = getattr(event.internal_metadata, "txn_id", None) if token_id and txn_id: to_insert.append( - { - "event_id": event.event_id, - "room_id": event.room_id, - "user_id": event.sender, - "token_id": token_id, - "txn_id": txn_id, - "inserted_ts": self._clock.time_msec(), - } + ( + event.event_id, + event.room_id, + event.sender, + token_id, + txn_id, + self._clock.time_msec(), + ) ) if to_insert: self.db_pool.simple_insert_many_txn( txn, table="event_txn_id", + keys=( + "event_id", + "room_id", + "user_id", + "token_id", + "txn_id", + "inserted_ts", + ), values=to_insert, ) @@ -1161,8 +1168,9 @@ def _update_forward_extremities_txn( self.db_pool.simple_insert_many_txn( txn, table="event_forward_extremities", + keys=("event_id", "room_id"), values=[ - {"event_id": ev_id, "room_id": room_id} + (ev_id, room_id) for room_id, new_extrem in new_forward_extremities.items() for ev_id in new_extrem ], @@ -1174,12 +1182,9 @@ def _update_forward_extremities_txn( self.db_pool.simple_insert_many_txn( txn, table="stream_ordering_to_exterm", + keys=("room_id", "event_id", "stream_ordering"), values=[ - { - "room_id": room_id, - "event_id": event_id, - "stream_ordering": max_stream_order, - } + (room_id, event_id, max_stream_order) for room_id, new_extrem in new_forward_extremities.items() for event_id in new_extrem ], @@ -1342,7 +1347,7 @@ def event_dict(event): d.pop("redacted_because", None) return d - self.db_pool.simple_insert_many_values_txn( + self.db_pool.simple_insert_many_txn( txn, table="event_json", keys=("event_id", "room_id", "internal_metadata", "json", "format_version"), @@ -1358,7 +1363,7 @@ def event_dict(event): ), ) - self.db_pool.simple_insert_many_values_txn( + self.db_pool.simple_insert_many_txn( txn, table="events", keys=( @@ -1412,7 +1417,7 @@ def event_dict(event): ) txn.execute(sql + clause, [False] + args) - self.db_pool.simple_insert_many_values_txn( + self.db_pool.simple_insert_many_txn( txn, table="state_events", keys=("event_id", "room_id", "type", "state_key"), @@ -1622,14 +1627,9 @@ def insert_labels_for_event_txn( return self.db_pool.simple_insert_many_txn( txn=txn, table="event_labels", + keys=("event_id", "label", "room_id", "topological_ordering"), values=[ - { - "event_id": event_id, - "label": label, - "room_id": room_id, - "topological_ordering": topological_ordering, - } - for label in labels + (event_id, label, room_id, topological_ordering) for label in labels ], ) @@ -1657,16 +1657,13 @@ def _store_event_reference_hashes_txn(self, txn, events): vals = [] for event in events: ref_alg, ref_hash_bytes = compute_event_reference_hash(event) - vals.append( - { - "event_id": event.event_id, - "algorithm": ref_alg, - "hash": memoryview(ref_hash_bytes), - } - ) + vals.append((event.event_id, ref_alg, memoryview(ref_hash_bytes))) self.db_pool.simple_insert_many_txn( - txn, table="event_reference_hashes", values=vals + txn, + table="event_reference_hashes", + keys=("event_id", "algorithm", "hash"), + values=vals, ) def _store_room_members_txn( @@ -1689,18 +1686,25 @@ def non_null_str_or_none(val: Any) -> Optional[str]: self.db_pool.simple_insert_many_txn( txn, table="room_memberships", + keys=( + "event_id", + "user_id", + "sender", + "room_id", + "membership", + "display_name", + "avatar_url", + ), values=[ - { - "event_id": event.event_id, - "user_id": event.state_key, - "sender": event.user_id, - "room_id": event.room_id, - "membership": event.membership, - "display_name": non_null_str_or_none( - event.content.get("displayname") - ), - "avatar_url": non_null_str_or_none(event.content.get("avatar_url")), - } + ( + event.event_id, + event.state_key, + event.user_id, + event.room_id, + event.membership, + non_null_str_or_none(event.content.get("displayname")), + non_null_str_or_none(event.content.get("avatar_url")), + ) for event in events ], ) @@ -2163,13 +2167,9 @@ def _handle_mult_prev_events(self, txn, events): self.db_pool.simple_insert_many_txn( txn, table="event_edges", + keys=("event_id", "prev_event_id", "room_id", "is_state"), values=[ - { - "event_id": ev.event_id, - "prev_event_id": e_id, - "room_id": ev.room_id, - "is_state": False, - } + (ev.event_id, e_id, ev.room_id, False) for ev in events for e_id in ev.prev_event_ids() ], diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 0a96664caf1b..d5f005966597 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -684,13 +684,14 @@ def _event_store_labels_txn(txn: LoggingTransaction) -> int: self.db_pool.simple_insert_many_txn( txn=txn, table="event_labels", + keys=("event_id", "label", "room_id", "topological_ordering"), values=[ - { - "event_id": event_id, - "label": label, - "room_id": event_json["room_id"], - "topological_ordering": event_json["depth"], - } + ( + event_id, + label, + event_json["room_id"], + event_json["depth"], + ) for label in event_json["content"].get( EventContentFields.LABELS, [] ) @@ -803,29 +804,19 @@ def get_rejected_events( if not has_state: state_events.append( - { - "event_id": event.event_id, - "room_id": event.room_id, - "type": event.type, - "state_key": event.state_key, - } + (event.event_id, event.room_id, event.type, event.state_key) ) if not has_event_auth: # Old, dodgy, events may have duplicate auth events, which we # need to deduplicate as we have a unique constraint. for auth_id in set(event.auth_event_ids()): - auth_events.append( - { - "room_id": event.room_id, - "event_id": event.event_id, - "auth_id": auth_id, - } - ) + auth_events.append((event.event_id, event.room_id, auth_id)) if state_events: await self.db_pool.simple_insert_many( table="state_events", + keys=("event_id", "room_id", "type", "state_key"), values=state_events, desc="_rejected_events_metadata_state_events", ) @@ -833,6 +824,7 @@ def get_rejected_events( if auth_events: await self.db_pool.simple_insert_many( table="event_auth", + keys=("event_id", "room_id", "auth_id"), values=auth_events, desc="_rejected_events_metadata_event_auth", ) diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index cbf9ec38f75d..4f05811a77eb 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -129,18 +129,29 @@ def _update_presence_txn(self, txn, stream_orderings, presence_states): self.db_pool.simple_insert_many_txn( txn, table="presence_stream", + keys=( + "stream_id", + "user_id", + "state", + "last_active_ts", + "last_federation_update_ts", + "last_user_sync_ts", + "status_msg", + "currently_active", + "instance_name", + ), values=[ - { - "stream_id": stream_id, - "user_id": state.user_id, - "state": state.state, - "last_active_ts": state.last_active_ts, - "last_federation_update_ts": state.last_federation_update_ts, - "last_user_sync_ts": state.last_user_sync_ts, - "status_msg": state.status_msg, - "currently_active": state.currently_active, - "instance_name": self._instance_name, - } + ( + stream_id, + state.user_id, + state.state, + state.last_active_ts, + state.last_federation_update_ts, + state.last_user_sync_ts, + state.status_msg, + state.currently_active, + self._instance_name, + ) for stream_id, state in zip(stream_orderings, presence_states) ], ) diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 747b4f31df67..cf64cd63a46f 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -561,13 +561,9 @@ def delete_pushers_txn(txn, stream_ids): self.db_pool.simple_insert_many_txn( txn, table="deleted_pushers", + keys=("stream_id", "app_id", "pushkey", "user_id"), values=[ - { - "stream_id": stream_id, - "app_id": pusher.app_id, - "pushkey": pusher.pushkey, - "user_id": user_id, - } + (stream_id, pusher.app_id, pusher.pushkey, user_id) for stream_id, pusher in zip(stream_ids, pushers) ], ) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 0f9b8575d3a5..f7c778bdf22b 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -105,8 +105,10 @@ def _make_staging_area(txn: LoggingTransaction) -> None: GROUP BY room_id """ txn.execute(sql) - rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()] - self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms) + rooms = list(txn.fetchall()) + self.db_pool.simple_insert_many_txn( + txn, TEMP_TABLE + "_rooms", keys=("room_id", "events"), values=rooms + ) del rooms sql = ( @@ -117,9 +119,11 @@ def _make_staging_area(txn: LoggingTransaction) -> None: txn.execute(sql) txn.execute("SELECT name FROM users") - users = [{"user_id": x[0]} for x in txn.fetchall()] + users = list(txn.fetchall()) - self.db_pool.simple_insert_many_txn(txn, TEMP_TABLE + "_users", users) + self.db_pool.simple_insert_many_txn( + txn, TEMP_TABLE + "_users", keys=("user_id",), values=users + ) new_pos = await self.get_max_stream_id_in_current_state_deltas() await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index eb1118d2cb20..5de70f31d294 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -327,14 +327,15 @@ def reindex_txn(txn: LoggingTransaction) -> Tuple[bool, int]: self.db_pool.simple_insert_many_txn( txn, table="state_groups_state", + keys=( + "state_group", + "room_id", + "type", + "state_key", + "event_id", + ), values=[ - { - "state_group": state_group, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } + (state_group, room_id, key[0], key[1], state_id) for key, state_id in delta_state.items() ], ) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index c4c8c0021bca..7614d76ac646 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -460,14 +460,9 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int: self.db_pool.simple_insert_many_txn( txn, table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), values=[ - { - "state_group": state_group, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } + (state_group, room_id, key[0], key[1], state_id) for key, state_id in delta_ids.items() ], ) @@ -475,14 +470,9 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int: self.db_pool.simple_insert_many_txn( txn, table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), values=[ - { - "state_group": state_group, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } + (state_group, room_id, key[0], key[1], state_id) for key, state_id in current_state_ids.items() ], ) @@ -589,14 +579,9 @@ def _purge_unreferenced_state_groups( self.db_pool.simple_insert_many_txn( txn, table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), values=[ - { - "state_group": sg, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } + (sg, room_id, key[0], key[1], state_id) for key, state_id in curr_state.items() ], ) diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index 81f3ac7f0448..8513b1d2df53 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -223,20 +223,13 @@ def test_create_unable_to_generate_token(self) -> None: # Create all possible single character tokens tokens = [] for c in string.ascii_letters + string.digits + "._~-": - tokens.append( - { - "token": c, - "uses_allowed": None, - "pending": 0, - "completed": 0, - "expiry_time": None, - } - ) + tokens.append((c, None, 0, 0, None)) self.get_success( self.store.db_pool.simple_insert_many( "registration_tokens", - tokens, - "create_all_registration_tokens", + keys=("token", "uses_allowed", "pending", "completed", "expiry_time"), + values=tokens, + desc="create_all_registration_tokens", ) ) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index ecfda7677e0a..632bbc9de73e 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -515,17 +515,23 @@ def test_prune_inbound_federation_queue(self): self.get_success( self.store.db_pool.simple_insert_many( table="federation_inbound_events_staging", + keys=( + "origin", + "room_id", + "received_ts", + "event_id", + "event_json", + "internal_metadata", + ), values=[ - { - "origin": "some_origin", - "room_id": room_id, - "received_ts": 0, - "event_id": f"$fake_event_id_{i + 1}", - "event_json": json_encoder.encode( - {"prev_events": [f"$fake_event_id_{i}"]} - ), - "internal_metadata": "{}", - } + ( + "some_origin", + room_id, + 0, + f"$fake_event_id_{i + 1}", + json_encoder.encode({"prev_events": [f"$fake_event_id_{i}"]}), + "{}", + ) for i in range(500) ], desc="test_prune_inbound_federation_queue", From 867443472c12edc11e791872a9dc1fcd7e4da736 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 14 Jan 2022 11:34:57 +0000 Subject: [PATCH 094/474] 1.50.0rc2 --- CHANGES.md | 26 ++++++++++++++++++++++++++ changelog.d/11714.misc | 1 - changelog.d/11725.doc | 1 - changelog.d/11729.bugfix | 1 - changelog.d/11730.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 7 files changed, 33 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/11714.misc delete mode 100644 changelog.d/11725.doc delete mode 100644 changelog.d/11729.bugfix delete mode 100644 changelog.d/11730.bugfix diff --git a/CHANGES.md b/CHANGES.md index f91109f88570..43f5a7826954 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,29 @@ +Synapse 1.50.0rc2 (2022-01-14) +============================== + +This release candidate fixes a federation-breaking regression introduced in the previous release candidate. The bug broke sending federation traffic to destination servers that had enough outbound device updates to be sent (including at least one cross-signing key update). +It would particularly affect sending to servers that have had downtime, as this would make it more likely that a big enough queue of outbound device updates had built up. + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. ([\#11729](https://github.com/matrix-org/synapse/issues/11729)) +- Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. ([\#11730](https://github.com/matrix-org/synapse/issues/11730)) + + +Improved Documentation +---------------------- + +- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) + + +Internal Changes +---------------- + +- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714)) + + Synapse 1.50.0rc1 (2022-01-05) ============================== diff --git a/changelog.d/11714.misc b/changelog.d/11714.misc deleted file mode 100644 index 7f39bf0e3dca..000000000000 --- a/changelog.d/11714.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. \ No newline at end of file diff --git a/changelog.d/11725.doc b/changelog.d/11725.doc deleted file mode 100644 index 46eb9b814fc3..000000000000 --- a/changelog.d/11725.doc +++ /dev/null @@ -1 +0,0 @@ -Document that now the minimum supported PostgreSQL version is 10. diff --git a/changelog.d/11729.bugfix b/changelog.d/11729.bugfix deleted file mode 100644 index 8438ce568630..000000000000 --- a/changelog.d/11729.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.0.0 whereby some device list updates would not be sent to remote homeservers if there were too many to send at once. \ No newline at end of file diff --git a/changelog.d/11730.bugfix b/changelog.d/11730.bugfix deleted file mode 100644 index a0bd7dd1a3ba..000000000000 --- a/changelog.d/11730.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.50.0rc1 whereby outbound federation could fail because too many EDUs were produced for device updates. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index b54c0ff34878..381980f4683a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.50.0~rc2) stable; urgency=medium + + * New synapse release 1.50.0~rc2. + + -- Synapse Packaging team Fri, 14 Jan 2022 11:18:06 +0000 + matrix-synapse-py3 (1.50.0~rc1) stable; urgency=medium * New synapse release 1.50.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 92aec334e6e7..f2dac4e7de6c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.50.0rc1" +__version__ = "1.50.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 422e33fabff5bb77269b991b253e7975a04227fa Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 14 Jan 2022 12:08:14 +0000 Subject: [PATCH 095/474] Tweak the changelog summary section --- CHANGES.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 43f5a7826954..8f53ce6c5188 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,9 @@ Synapse 1.50.0rc2 (2022-01-14) ============================== -This release candidate fixes a federation-breaking regression introduced in the previous release candidate. The bug broke sending federation traffic to destination servers that had enough outbound device updates to be sent (including at least one cross-signing key update). -It would particularly affect sending to servers that have had downtime, as this would make it more likely that a big enough queue of outbound device updates had built up. +This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1. + +Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. Bugfixes @@ -27,8 +28,6 @@ Internal Changes Synapse 1.50.0rc1 (2022-01-05) ============================== -Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. - Features -------- From 904bb044097a34ff37d06a92da4a1a39f2a9282b Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Fri, 14 Jan 2022 16:11:55 +0200 Subject: [PATCH 096/474] Fix sample_config.yaml in regards track_puppeted_user_ips (#11749) * Fix sample_config.yaml in regards track_puppeted_user_ips Closes #11741 Signed-off-by: Jason Robinson --- changelog.d/11749.feature | 1 + docs/sample_config.yaml | 11 ++++++----- synapse/config/api.py | 11 ++++++----- 3 files changed, 13 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11749.feature diff --git a/changelog.d/11749.feature b/changelog.d/11749.feature new file mode 100644 index 000000000000..19dada883bb2 --- /dev/null +++ b/changelog.d/11749.feature @@ -0,0 +1 @@ +Add `track_puppeted_user_ips` config flag to track puppeted user IP addresses. This also includes them in monthly active user counts. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 907e067e5163..9a501167ee3b 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1503,11 +1503,12 @@ room_prejoin_state: #additional_event_types: # - org.example.custom.event.type -# If enabled, puppeted user IP's can also be tracked. By default when -# puppeting another user, the user who has created the access token -# for puppeting is tracked. If this is enabled, both requests are tracked. -# Implicitly enables MAU tracking for puppeted users. -#track_puppeted_user_ips: false +# By default when puppeting another user, the user who has created the +# access token for puppeting is tracked. If this is enabled, both +# requests are tracked. Implicitly enables MAU tracking for puppeted users. +# Uncomment to also track puppeted user IP's. +# +#track_puppeted_user_ips: true # A list of application service config files to use diff --git a/synapse/config/api.py b/synapse/config/api.py index bdbe9f0fa280..f8e52150a25f 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -61,11 +61,12 @@ def generate_config_section(cls, **kwargs) -> str: #additional_event_types: # - org.example.custom.event.type - # If enabled, puppeted user IP's can also be tracked. By default when - # puppeting another user, the user who has created the access token - # for puppeting is tracked. If this is enabled, both requests are tracked. - # Implicitly enables MAU tracking for puppeted users. - #track_puppeted_user_ips: false + # By default when puppeting another user, the user who has created the + # access token for puppeting is tracked. If this is enabled, both + # requests are tracked. Implicitly enables MAU tracking for puppeted users. + # Uncomment to also track puppeted user IP's. + # + #track_puppeted_user_ips: true """ % { "formatted_default_state_types": formatted_default_state_types } From 18862f20b5495bdc556c54e92fd4b1efdc718ba7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 14 Jan 2022 14:53:33 +0000 Subject: [PATCH 097/474] Remove the 'password_hash' from the Users Admin API endpoint response dictionary (#11576) --- changelog.d/11576.feature | 1 + docs/admin_api/user_admin_api.md | 9 ++--- synapse/handlers/admin.py | 56 +++++++++++++++++++++++--------- synapse/rest/admin/users.py | 13 ++++---- tests/rest/admin/test_user.py | 50 ++++++++++++++++++---------- 5 files changed, 86 insertions(+), 43 deletions(-) create mode 100644 changelog.d/11576.feature diff --git a/changelog.d/11576.feature b/changelog.d/11576.feature new file mode 100644 index 000000000000..5be836ae022d --- /dev/null +++ b/changelog.d/11576.feature @@ -0,0 +1 @@ +Remove the `"password_hash"` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 74933d2fcf0e..c514cadb9dae 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -15,9 +15,10 @@ server admin: [Admin API](../usage/administration/admin_api) It returns a JSON body like the following: -```json +```jsonc { - "displayname": "User", + "name": "@user:example.com", + "displayname": "User", // can be null if not set "threepids": [ { "medium": "email", @@ -32,11 +33,11 @@ It returns a JSON body like the following: "validated_at": 1586458409743 } ], - "avatar_url": "", + "avatar_url": "", // can be null if not set + "is_guest": 0, "admin": 0, "deactivated": 0, "shadow_banned": 0, - "password_hash": "$2b$12$p9B4GkqYdRTPGD", "creation_ts": 1560432506, "appservice_id": null, "consent_server_notice_sent": null, diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 85157a138b71..00ab5e79bf2e 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -55,21 +55,47 @@ async def get_whois(self, user: UserID) -> JsonDict: async def get_user(self, user: UserID) -> Optional[JsonDict]: """Function to get user details""" - ret = await self.store.get_user_by_id(user.to_string()) - if ret: - profile = await self.store.get_profileinfo(user.localpart) - threepids = await self.store.user_get_threepids(user.to_string()) - external_ids = [ - ({"auth_provider": auth_provider, "external_id": external_id}) - for auth_provider, external_id in await self.store.get_external_ids_by_user( - user.to_string() - ) - ] - ret["displayname"] = profile.display_name - ret["avatar_url"] = profile.avatar_url - ret["threepids"] = threepids - ret["external_ids"] = external_ids - return ret + user_info_dict = await self.store.get_user_by_id(user.to_string()) + if user_info_dict is None: + return None + + # Restrict returned information to a known set of fields. This prevents additional + # fields added to get_user_by_id from modifying Synapse's external API surface. + user_info_to_return = { + "name", + "admin", + "deactivated", + "shadow_banned", + "creation_ts", + "appservice_id", + "consent_server_notice_sent", + "consent_version", + "user_type", + "is_guest", + } + + # Restrict returned keys to a known set. + user_info_dict = { + key: value + for key, value in user_info_dict.items() + if key in user_info_to_return + } + + # Add additional user metadata + profile = await self.store.get_profileinfo(user.localpart) + threepids = await self.store.user_get_threepids(user.to_string()) + external_ids = [ + ({"auth_provider": auth_provider, "external_id": external_id}) + for auth_provider, external_id in await self.store.get_external_ids_by_user( + user.to_string() + ) + ] + user_info_dict["displayname"] = profile.display_name + user_info_dict["avatar_url"] = profile.avatar_url + user_info_dict["threepids"] = threepids + user_info_dict["external_ids"] = external_ids + + return user_info_dict async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> Any: """Write all data we have on the user to the given writer. diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 78e795c34764..c2617ee30c48 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -173,12 +173,11 @@ async def on_GET( if not self.hs.is_mine(target_user): raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only look up local users") - ret = await self.admin_handler.get_user(target_user) - - if not ret: + user_info_dict = await self.admin_handler.get_user(target_user) + if not user_info_dict: raise NotFoundError("User not found") - return HTTPStatus.OK, ret + return HTTPStatus.OK, user_info_dict async def on_PUT( self, request: SynapseRequest, user_id: str @@ -399,10 +398,10 @@ async def on_PUT( target_user, requester, body["avatar_url"], True ) - user = await self.admin_handler.get_user(target_user) - assert user is not None + user_info_dict = await self.admin_handler.get_user(target_user) + assert user_info_dict is not None - return 201, user + return HTTPStatus.CREATED, user_info_dict class UserRegisterServlet(RestServlet): diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index e0b9fe8e91b3..9711405735db 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1181,6 +1181,7 @@ def prepare(self, reactor, clock, hs): self.other_user, device_id=None, valid_until_ms=None ) ) + self.url_prefix = "/_synapse/admin/v2/users/%s" self.url_other_user = self.url_prefix % self.other_user @@ -1188,7 +1189,7 @@ def test_requester_is_no_admin(self): """ If the user is not a server admin, an error is returned. """ - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" channel = self.make_request( "GET", @@ -1216,7 +1217,7 @@ def test_user_does_not_exist(self): channel = self.make_request( "GET", - "/_synapse/admin/v2/users/@unknown_person:test", + self.url_prefix % "@unknown_person:test", access_token=self.admin_user_tok, ) @@ -1337,7 +1338,7 @@ def test_create_server_admin(self): """ Check that a new admin user is created successfully. """ - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user (server admin) body = { @@ -1386,7 +1387,7 @@ def test_create_user(self): """ Check that a new regular user is created successfully. """ - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user body = { @@ -1478,7 +1479,7 @@ def test_create_user_mau_limit_reached_active_admin(self): ) # Register new user with admin API - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user channel = self.make_request( @@ -1515,7 +1516,7 @@ def test_create_user_mau_limit_reached_passive_admin(self): ) # Register new user with admin API - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user channel = self.make_request( @@ -1545,7 +1546,7 @@ def test_create_user_email_notif_for_new_users(self): Check that a new regular user is created successfully and got an email pusher. """ - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user body = { @@ -1588,7 +1589,7 @@ def test_create_user_email_no_notif_for_new_users(self): Check that a new regular user is created successfully and got not an email pusher. """ - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user body = { @@ -2085,10 +2086,13 @@ def test_deactivate_user(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) - self.assertIsNone(channel.json_body["password_hash"]) self.assertEqual(0, len(channel.json_body["threepids"])) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User", channel.json_body["displayname"]) + + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", channel.json_body) + # the user is deactivated, the threepid will be deleted # Get user @@ -2101,11 +2105,13 @@ def test_deactivate_user(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["deactivated"]) - self.assertIsNone(channel.json_body["password_hash"]) self.assertEqual(0, len(channel.json_body["threepids"])) self.assertEqual("mxc://servername/mediaid", channel.json_body["avatar_url"]) self.assertEqual("User", channel.json_body["displayname"]) + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", channel.json_body) + @override_config({"user_directory": {"enabled": True, "search_all_users": True}}) def test_change_name_deactivate_user_user_directory(self): """ @@ -2177,9 +2183,11 @@ def test_reactivate_user(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) - self.assertIsNotNone(channel.json_body["password_hash"]) self._is_erased("@user:test", False) + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", channel.json_body) + @override_config({"password_config": {"localdb_enabled": False}}) def test_reactivate_user_localdb_disabled(self): """ @@ -2209,9 +2217,11 @@ def test_reactivate_user_localdb_disabled(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) - self.assertIsNone(channel.json_body["password_hash"]) self._is_erased("@user:test", False) + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", channel.json_body) + @override_config({"password_config": {"enabled": False}}) def test_reactivate_user_password_disabled(self): """ @@ -2241,9 +2251,11 @@ def test_reactivate_user_password_disabled(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@user:test", channel.json_body["name"]) self.assertFalse(channel.json_body["deactivated"]) - self.assertIsNone(channel.json_body["password_hash"]) self._is_erased("@user:test", False) + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", channel.json_body) + def test_set_user_as_admin(self): """ Test setting the admin flag on a user. @@ -2328,7 +2340,7 @@ def test_accidental_deactivation_prevention(self): Ensure an account can't accidentally be deactivated by using a str value for the deactivated body parameter """ - url = "/_synapse/admin/v2/users/@bob:test" + url = self.url_prefix % "@bob:test" # Create user channel = self.make_request( @@ -2392,18 +2404,20 @@ def _deactivate_user(self, user_id: str) -> None: # Deactivate the user. channel = self.make_request( "PUT", - "/_synapse/admin/v2/users/%s" % urllib.parse.quote(user_id), + self.url_prefix % urllib.parse.quote(user_id), access_token=self.admin_user_tok, content={"deactivated": True}, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["deactivated"]) - self.assertIsNone(channel.json_body["password_hash"]) self._is_erased(user_id, False) d = self.store.mark_user_erased(user_id) self.assertIsNone(self.get_success(d)) self._is_erased(user_id, True) + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", channel.json_body) + def _check_fields(self, content: JsonDict): """Checks that the expected user attributes are present in content @@ -2416,13 +2430,15 @@ def _check_fields(self, content: JsonDict): self.assertIn("admin", content) self.assertIn("deactivated", content) self.assertIn("shadow_banned", content) - self.assertIn("password_hash", content) self.assertIn("creation_ts", content) self.assertIn("appservice_id", content) self.assertIn("consent_server_notice_sent", content) self.assertIn("consent_version", content) self.assertIn("external_ids", content) + # This key was removed intentionally. Ensure it is not accidentally re-included. + self.assertNotIn("password_hash", content) + class UserMembershipRestTestCase(unittest.HomeserverTestCase): From 6b241f5286f05d4d8dc0569e90388713a956d038 Mon Sep 17 00:00:00 2001 From: Daniel Sonck Date: Mon, 17 Jan 2022 12:42:51 +0100 Subject: [PATCH 098/474] Make pagination of rooms in admin api stable (#11737) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Always add state.room_id after the configurable ORDER BY. Otherwise, for any sort, certain pages can contain results from other pages. (Especially when sorting by creator, since there may be many rooms by the same creator) * Document different order direction of numerical fields "joined_members", "joined_local_members", "version" and "state_events" are ordered in descending direction by default (dir=f). Added a note in tests to explain the differences in ordering. Signed-off-by: Daniël Sonck --- changelog.d/11737.bugfix | 1 + synapse/storage/databases/main/room.py | 18 +++++----- tests/rest/admin/test_room.py | 47 +++++++++++++++----------- 3 files changed, 38 insertions(+), 28 deletions(-) create mode 100644 changelog.d/11737.bugfix diff --git a/changelog.d/11737.bugfix b/changelog.d/11737.bugfix new file mode 100644 index 000000000000..a293d1cfec0c --- /dev/null +++ b/changelog.d/11737.bugfix @@ -0,0 +1 @@ +Make the list rooms admin api sort stable. Contributed by Daniël Sonck. \ No newline at end of file diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index c0e837854a32..95167116c953 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -551,24 +551,24 @@ async def get_rooms_paginate( FROM room_stats_state state INNER JOIN room_stats_current curr USING (room_id) INNER JOIN rooms USING (room_id) - %s - ORDER BY %s %s + {where} + ORDER BY {order_by} {direction}, state.room_id {direction} LIMIT ? OFFSET ? - """ % ( - where_statement, - order_by_column, - "ASC" if order_by_asc else "DESC", + """.format( + where=where_statement, + order_by=order_by_column, + direction="ASC" if order_by_asc else "DESC", ) # Use a nested SELECT statement as SQL can't count(*) with an OFFSET count_sql = """ SELECT count(*) FROM ( SELECT room_id FROM room_stats_state state - %s + {where} ) AS get_room_ids - """ % ( - where_statement, + """.format( + where=where_statement, ) def _get_rooms_paginate_txn( diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index d2c8781cd4b9..3495a0366ad3 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1089,6 +1089,8 @@ def test_list_rooms(self) -> None: ) room_ids.append(room_id) + room_ids.sort() + # Request the list of rooms url = "/_synapse/admin/v1/rooms" channel = self.make_request( @@ -1360,6 +1362,12 @@ def _order_test( room_id_2 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) room_id_3 = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + # Also create a list sorted by IDs for properties that are equal (and thus sorted by room_id) + sorted_by_room_id_asc = [room_id_1, room_id_2, room_id_3] + sorted_by_room_id_asc.sort() + sorted_by_room_id_desc = sorted_by_room_id_asc.copy() + sorted_by_room_id_desc.reverse() + # Set room names in alphabetical order. room 1 -> A, 2 -> B, 3 -> C self.helper.send_state( room_id_1, @@ -1405,41 +1413,42 @@ def _order_test( _order_test("canonical_alias", [room_id_1, room_id_2, room_id_3]) _order_test("canonical_alias", [room_id_3, room_id_2, room_id_1], reverse=True) + # Note: joined_member counts are sorted in descending order when dir=f _order_test("joined_members", [room_id_3, room_id_2, room_id_1]) _order_test("joined_members", [room_id_1, room_id_2, room_id_3], reverse=True) + # Note: joined_local_member counts are sorted in descending order when dir=f _order_test("joined_local_members", [room_id_3, room_id_2, room_id_1]) _order_test( "joined_local_members", [room_id_1, room_id_2, room_id_3], reverse=True ) - _order_test("version", [room_id_1, room_id_2, room_id_3]) - _order_test("version", [room_id_1, room_id_2, room_id_3], reverse=True) + # Note: versions are sorted in descending order when dir=f + _order_test("version", sorted_by_room_id_asc, reverse=True) + _order_test("version", sorted_by_room_id_desc) - _order_test("creator", [room_id_1, room_id_2, room_id_3]) - _order_test("creator", [room_id_1, room_id_2, room_id_3], reverse=True) + _order_test("creator", sorted_by_room_id_asc) + _order_test("creator", sorted_by_room_id_desc, reverse=True) - _order_test("encryption", [room_id_1, room_id_2, room_id_3]) - _order_test("encryption", [room_id_1, room_id_2, room_id_3], reverse=True) + _order_test("encryption", sorted_by_room_id_asc) + _order_test("encryption", sorted_by_room_id_desc, reverse=True) - _order_test("federatable", [room_id_1, room_id_2, room_id_3]) - _order_test("federatable", [room_id_1, room_id_2, room_id_3], reverse=True) + _order_test("federatable", sorted_by_room_id_asc) + _order_test("federatable", sorted_by_room_id_desc, reverse=True) - _order_test("public", [room_id_1, room_id_2, room_id_3]) - # Different sort order of SQlite and PostreSQL - # _order_test("public", [room_id_3, room_id_2, room_id_1], reverse=True) + _order_test("public", sorted_by_room_id_asc) + _order_test("public", sorted_by_room_id_desc, reverse=True) - _order_test("join_rules", [room_id_1, room_id_2, room_id_3]) - _order_test("join_rules", [room_id_1, room_id_2, room_id_3], reverse=True) + _order_test("join_rules", sorted_by_room_id_asc) + _order_test("join_rules", sorted_by_room_id_desc, reverse=True) - _order_test("guest_access", [room_id_1, room_id_2, room_id_3]) - _order_test("guest_access", [room_id_1, room_id_2, room_id_3], reverse=True) + _order_test("guest_access", sorted_by_room_id_asc) + _order_test("guest_access", sorted_by_room_id_desc, reverse=True) - _order_test("history_visibility", [room_id_1, room_id_2, room_id_3]) - _order_test( - "history_visibility", [room_id_1, room_id_2, room_id_3], reverse=True - ) + _order_test("history_visibility", sorted_by_room_id_asc) + _order_test("history_visibility", sorted_by_room_id_desc, reverse=True) + # Note: state_event counts are sorted in descending order when dir=f _order_test("state_events", [room_id_3, room_id_2, room_id_1]) _order_test("state_events", [room_id_1, room_id_2, room_id_3], reverse=True) From 6a78ede56932b486c0ca3cc7c6edd84a2f373366 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 17 Jan 2022 12:14:40 +0000 Subject: [PATCH 099/474] Improve `reactor_tick_time` metric (#11724) The existing implementation of the `python_twisted_reactor_tick_time` metric is pretty useless, because it *only* measures the time taken to execute timed calls and callbacks from threads. That neglects everything that happens off the back of I/O, which is obviously quite a lot for us. To improve this, I've hooked into a different place in the reactor - in particular, where it calls `epoll`. That call is the only place it should wait for something to happen - the rest of the loop *should* be quick. I've also removed `python_twisted_reactor_pending_calls`, because I don't believe anyone ever looks at it, and it's a nuisance to populate. --- changelog.d/11724.misc | 1 + changelog.d/11724.removal | 1 + contrib/prometheus/consoles/synapse.html | 16 ---- synapse/metrics/__init__.py | 94 +----------------------- synapse/metrics/_reactor_metrics.py | 83 +++++++++++++++++++++ 5 files changed, 86 insertions(+), 109 deletions(-) create mode 100644 changelog.d/11724.misc create mode 100644 changelog.d/11724.removal create mode 100644 synapse/metrics/_reactor_metrics.py diff --git a/changelog.d/11724.misc b/changelog.d/11724.misc new file mode 100644 index 000000000000..e9d5dae857b2 --- /dev/null +++ b/changelog.d/11724.misc @@ -0,0 +1 @@ +Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric. diff --git a/changelog.d/11724.removal b/changelog.d/11724.removal new file mode 100644 index 000000000000..088c3ff31ff8 --- /dev/null +++ b/changelog.d/11724.removal @@ -0,0 +1 @@ +Remove `python_twisted_reactor_pending_calls` prometheus metric. diff --git a/contrib/prometheus/consoles/synapse.html b/contrib/prometheus/consoles/synapse.html index cd9ad15231fc..d17c8a08d9e3 100644 --- a/contrib/prometheus/consoles/synapse.html +++ b/contrib/prometheus/consoles/synapse.html @@ -92,22 +92,6 @@

Average reactor tick time

}) -

Pending calls per tick

-
- -

Storage

Queries

diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index ba7ca0f2d4ed..9e6c1b2f3b54 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -12,15 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools import itertools import logging import os import platform import threading -import time from typing import ( - Any, Callable, Dict, Generic, @@ -33,7 +30,6 @@ Type, TypeVar, Union, - cast, ) import attr @@ -44,11 +40,9 @@ GaugeMetricFamily, ) -from twisted.internet import reactor -from twisted.internet.base import ReactorBase from twisted.python.threadpool import ThreadPool -import synapse +import synapse.metrics._reactor_metrics from synapse.metrics._exposition import ( MetricsResource, generate_latest, @@ -368,21 +362,6 @@ def collect(self) -> Iterable[Metric]: REGISTRY.register(CPUMetrics()) -# -# Twisted reactor metrics -# - -tick_time = Histogram( - "python_twisted_reactor_tick_time", - "Tick time of the Twisted reactor (sec)", - buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5], -) -pending_calls_metric = Histogram( - "python_twisted_reactor_pending_calls", - "Pending calls", - buckets=[1, 2, 5, 10, 25, 50, 100, 250, 500, 1000], -) - # # Federation Metrics # @@ -434,8 +413,6 @@ def collect(self) -> Iterable[Metric]: " ".join([platform.system(), platform.release()]), ).set(1) -last_ticked = time.time() - # 3PID send info threepid_send_requests = Histogram( "synapse_threepid_send_requests_with_tries", @@ -483,75 +460,6 @@ def register_threadpool(name: str, threadpool: ThreadPool) -> None: ) -class ReactorLastSeenMetric: - def collect(self) -> Iterable[Metric]: - cm = GaugeMetricFamily( - "python_twisted_reactor_last_seen", - "Seconds since the Twisted reactor was last seen", - ) - cm.add_metric([], time.time() - last_ticked) - yield cm - - -REGISTRY.register(ReactorLastSeenMetric()) - -F = TypeVar("F", bound=Callable[..., Any]) - - -def runUntilCurrentTimer(reactor: ReactorBase, func: F) -> F: - @functools.wraps(func) - def f(*args: Any, **kwargs: Any) -> Any: - now = reactor.seconds() - num_pending = 0 - - # _newTimedCalls is one long list of *all* pending calls. Below loop - # is based off of impl of reactor.runUntilCurrent - for delayed_call in reactor._newTimedCalls: - if delayed_call.time > now: - break - - if delayed_call.delayed_time > 0: - continue - - num_pending += 1 - - num_pending += len(reactor.threadCallQueue) - start = time.time() - ret = func(*args, **kwargs) - end = time.time() - - # record the amount of wallclock time spent running pending calls. - # This is a proxy for the actual amount of time between reactor polls, - # since about 25% of time is actually spent running things triggered by - # I/O events, but that is harder to capture without rewriting half the - # reactor. - tick_time.observe(end - start) - pending_calls_metric.observe(num_pending) - - # Update the time we last ticked, for the metric to test whether - # Synapse's reactor has frozen - global last_ticked - last_ticked = end - - return ret - - return cast(F, f) - - -try: - # Ensure the reactor has all the attributes we expect - reactor.seconds # type: ignore - reactor.runUntilCurrent # type: ignore - reactor._newTimedCalls # type: ignore - reactor.threadCallQueue # type: ignore - - # runUntilCurrent is called when we have pending calls. It is called once - # per iteratation after fd polling. - reactor.runUntilCurrent = runUntilCurrentTimer(reactor, reactor.runUntilCurrent) # type: ignore -except AttributeError: - pass - - __all__ = [ "MetricsResource", "generate_latest", diff --git a/synapse/metrics/_reactor_metrics.py b/synapse/metrics/_reactor_metrics.py new file mode 100644 index 000000000000..ce0688621c37 --- /dev/null +++ b/synapse/metrics/_reactor_metrics.py @@ -0,0 +1,83 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import select +import time +from typing import Any, Iterable, List, Tuple + +from prometheus_client import Histogram, Metric +from prometheus_client.core import REGISTRY, GaugeMetricFamily + +from twisted.internet import reactor + +# +# Twisted reactor metrics +# + +tick_time = Histogram( + "python_twisted_reactor_tick_time", + "Tick time of the Twisted reactor (sec)", + buckets=[0.001, 0.002, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1, 2, 5], +) + + +class EpollWrapper: + """a wrapper for an epoll object which records the time between polls""" + + def __init__(self, poller: "select.epoll"): + self.last_polled = time.time() + self._poller = poller + + def poll(self, *args, **kwargs) -> List[Tuple[int, int]]: # type: ignore[no-untyped-def] + # record the time since poll() was last called. This gives a good proxy for + # how long it takes to run everything in the reactor - ie, how long anything + # waiting for the next tick will have to wait. + tick_time.observe(time.time() - self.last_polled) + + ret = self._poller.poll(*args, **kwargs) + + self.last_polled = time.time() + return ret + + def __getattr__(self, item: str) -> Any: + return getattr(self._poller, item) + + +class ReactorLastSeenMetric: + def __init__(self, epoll_wrapper: EpollWrapper): + self._epoll_wrapper = epoll_wrapper + + def collect(self) -> Iterable[Metric]: + cm = GaugeMetricFamily( + "python_twisted_reactor_last_seen", + "Seconds since the Twisted reactor was last seen", + ) + cm.add_metric([], time.time() - self._epoll_wrapper.last_polled) + yield cm + + +try: + # if the reactor has a `_poller` attribute, which is an `epoll` object + # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will + # measure the time between ticks + from select import epoll + + poller = reactor._poller # type: ignore[attr-defined] +except (AttributeError, ImportError): + pass +else: + if isinstance(poller, epoll): + poller = EpollWrapper(poller) + reactor._poller = poller # type: ignore[attr-defined] + REGISTRY.register(ReactorLastSeenMetric(poller)) From b0352f9c08a16bf6c2abf4eeb014774f59e69458 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 17 Jan 2022 12:35:00 +0000 Subject: [PATCH 100/474] Update documentation for configuring facebook login (#11755) ... and a minor thinko fix in the sample config. --- changelog.d/11755.doc | 1 + docs/openid.md | 25 +++++++++++++------------ docs/sample_config.yaml | 9 ++++++--- synapse/config/oidc.py | 9 ++++++--- 4 files changed, 26 insertions(+), 18 deletions(-) create mode 100644 changelog.d/11755.doc diff --git a/changelog.d/11755.doc b/changelog.d/11755.doc new file mode 100644 index 000000000000..5dd8feea63e7 --- /dev/null +++ b/changelog.d/11755.doc @@ -0,0 +1 @@ +Update documentation for configuring login with facebook. diff --git a/docs/openid.md b/docs/openid.md index ff9de9d5b8bf..171ea3b7128b 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -390,9 +390,6 @@ oidc_providers: ### Facebook -Like Github, Facebook provide a custom OAuth2 API rather than an OIDC-compliant -one so requires a little more configuration. - 0. You will need a Facebook developer account. You can register for one [here](https://developers.facebook.com/async/registration/). 1. On the [apps](https://developers.facebook.com/apps/) page of the developer @@ -412,24 +409,28 @@ Synapse config: idp_name: Facebook idp_brand: "facebook" # optional: styling hint for clients discover: false - issuer: "https://facebook.com" + issuer: "https://www.facebook.com" client_id: "your-client-id" # TO BE FILLED client_secret: "your-client-secret" # TO BE FILLED scopes: ["openid", "email"] - authorization_endpoint: https://facebook.com/dialog/oauth - token_endpoint: https://graph.facebook.com/v9.0/oauth/access_token - user_profile_method: "userinfo_endpoint" - userinfo_endpoint: "https://graph.facebook.com/v9.0/me?fields=id,name,email,picture" + authorization_endpoint: "https://facebook.com/dialog/oauth" + token_endpoint: "https://graph.facebook.com/v9.0/oauth/access_token" + jwks_uri: "https://www.facebook.com/.well-known/oauth/openid/jwks/" user_mapping_provider: config: - subject_claim: "id" display_name_template: "{{ user.name }}" + email_template: "{{ '{{ user.email }}' }}" ``` Relevant documents: - * https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow - * Using Facebook's Graph API: https://developers.facebook.com/docs/graph-api/using-graph-api/ - * Reference to the User endpoint: https://developers.facebook.com/docs/graph-api/reference/user + * [Manually Build a Login Flow](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow) + * [Using Facebook's Graph API](https://developers.facebook.com/docs/graph-api/using-graph-api/) + * [Reference to the User endpoint](https://developers.facebook.com/docs/graph-api/reference/user) + +Facebook do have an [OIDC discovery endpoint](https://www.facebook.com/.well-known/openid-configuration), +but it has a `response_types_supported` which excludes "code" (which we rely on, and +is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)), +so we have to disable discovery and configure the URIs manually. ### Gitea diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 9a501167ee3b..4d4f6a694888 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1877,10 +1877,13 @@ saml2_config: # Defaults to false. Avoid this in production. # # user_profile_method: Whether to fetch the user profile from the userinfo -# endpoint. Valid values are: 'auto' or 'userinfo_endpoint'. +# endpoint, or to rely on the data returned in the id_token from the +# token_endpoint. # -# Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is -# included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the +# Valid values are: 'auto' or 'userinfo_endpoint'. +# +# Defaults to 'auto', which uses the userinfo endpoint if 'openid' is +# not included in 'scopes'. Set to 'userinfo_endpoint' to always use the # userinfo endpoint. # # allow_existing_users: set to 'true' to allow a user logging in via OIDC to diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index 79c400fe30b8..e783b1131501 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -148,10 +148,13 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str # Defaults to false. Avoid this in production. # # user_profile_method: Whether to fetch the user profile from the userinfo - # endpoint. Valid values are: 'auto' or 'userinfo_endpoint'. + # endpoint, or to rely on the data returned in the id_token from the + # token_endpoint. # - # Defaults to 'auto', which fetches the userinfo endpoint if 'openid' is - # included in 'scopes'. Set to 'userinfo_endpoint' to always fetch the + # Valid values are: 'auto' or 'userinfo_endpoint'. + # + # Defaults to 'auto', which uses the userinfo endpoint if 'openid' is + # not included in 'scopes'. Set to 'userinfo_endpoint' to always use the # userinfo endpoint. # # allow_existing_users: set to 'true' to allow a user logging in via OIDC to From 86615aa965a04dc4c16ff28ee5b82cbe65a83530 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 17 Jan 2022 14:55:30 +0000 Subject: [PATCH 101/474] Fix up docs for `track_puppeted_user_ips` (again) (#11757) Fixes #11741 --- changelog.d/11561.feature | 2 +- changelog.d/11749.feature | 2 +- changelog.d/11757.feature | 1 + docs/sample_config.yaml | 16 ++++++++++++---- synapse/config/api.py | 16 ++++++++++++---- 5 files changed, 27 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11757.feature diff --git a/changelog.d/11561.feature b/changelog.d/11561.feature index 19dada883bb2..3d4f2159c0b3 100644 --- a/changelog.d/11561.feature +++ b/changelog.d/11561.feature @@ -1 +1 @@ -Add `track_puppeted_user_ips` config flag to track puppeted user IP addresses. This also includes them in monthly active user counts. +Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. diff --git a/changelog.d/11749.feature b/changelog.d/11749.feature index 19dada883bb2..3d4f2159c0b3 100644 --- a/changelog.d/11749.feature +++ b/changelog.d/11749.feature @@ -1 +1 @@ -Add `track_puppeted_user_ips` config flag to track puppeted user IP addresses. This also includes them in monthly active user counts. +Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. diff --git a/changelog.d/11757.feature b/changelog.d/11757.feature new file mode 100644 index 000000000000..3d4f2159c0b3 --- /dev/null +++ b/changelog.d/11757.feature @@ -0,0 +1 @@ +Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 4d4f6a694888..5908f262e513 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1503,10 +1503,18 @@ room_prejoin_state: #additional_event_types: # - org.example.custom.event.type -# By default when puppeting another user, the user who has created the -# access token for puppeting is tracked. If this is enabled, both -# requests are tracked. Implicitly enables MAU tracking for puppeted users. -# Uncomment to also track puppeted user IP's. +# We record the IP address of clients used to access the API for various +# reasons, including displaying it to the user in the "Where you're signed in" +# dialog. +# +# By default, when puppeting another user via the admin API, the client IP +# address is recorded against the user who created the access token (ie, the +# admin user), and *not* the puppeted user. +# +# Uncomment the following to also record the IP address against the puppeted +# user. (This also means that the puppeted user will count as an "active" user +# for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc +# above.) # #track_puppeted_user_ips: true diff --git a/synapse/config/api.py b/synapse/config/api.py index f8e52150a25f..8133b6b62402 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -61,10 +61,18 @@ def generate_config_section(cls, **kwargs) -> str: #additional_event_types: # - org.example.custom.event.type - # By default when puppeting another user, the user who has created the - # access token for puppeting is tracked. If this is enabled, both - # requests are tracked. Implicitly enables MAU tracking for puppeted users. - # Uncomment to also track puppeted user IP's. + # We record the IP address of clients used to access the API for various + # reasons, including displaying it to the user in the "Where you're signed in" + # dialog. + # + # By default, when puppeting another user via the admin API, the client IP + # address is recorded against the user who created the access token (ie, the + # admin user), and *not* the puppeted user. + # + # Uncomment the following to also record the IP address against the puppeted + # user. (This also means that the puppeted user will count as an "active" user + # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc + # above.) # #track_puppeted_user_ips: true """ % { From cefd4b87a32a28fddc36a640a14ba3bdb50cb0c6 Mon Sep 17 00:00:00 2001 From: AndrewFerr Date: Mon, 17 Jan 2022 10:13:09 -0500 Subject: [PATCH 102/474] Warn against using Let's Encrypt certs for encrypted TURN (#11686) * Warn against using Let's Encrypt certs for encrypted TURN This helps to avoid client-side issues: * https://github.com/vector-im/element-android/issues/1533 * https://github.com/vector-im/element-ios/issues/2712 Signed-off-by: Andrew Ferrazzutti --- changelog.d/11686.doc | 1 + docs/turn-howto.md | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 changelog.d/11686.doc diff --git a/changelog.d/11686.doc b/changelog.d/11686.doc new file mode 100644 index 000000000000..41bc7799d46a --- /dev/null +++ b/changelog.d/11686.doc @@ -0,0 +1 @@ +Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This bypasses client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. diff --git a/docs/turn-howto.md b/docs/turn-howto.md index e32aaa1850d6..eba7ca6124a5 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -137,6 +137,10 @@ This will install and start a systemd service called `coturn`. # TLS private key file pkey=/path/to/privkey.pem + + # Ensure the configuration lines that disable TLS/DTLS are commented-out or removed + #no-tls + #no-dtls ``` In this case, replace the `turn:` schemes in the `turn_uris` settings below @@ -145,6 +149,14 @@ This will install and start a systemd service called `coturn`. We recommend that you only try to set up TLS/DTLS once you have set up a basic installation and got it working. + NB: If your TLS certificate was provided by Let's Encrypt, TLS/DTLS will + not work with any Matrix client that uses Chromium's WebRTC library. This + currently includes Element Android & iOS; for more details, see their + [respective](https://github.com/vector-im/element-android/issues/1533) + [issues](https://github.com/vector-im/element-ios/issues/2712) as well as the underlying + [WebRTC issue](https://bugs.chromium.org/p/webrtc/issues/detail?id=11710). + Consider using a ZeroSSL certificate for your TURN server as a working alternative. + 1. Ensure your firewall allows traffic into the TURN server on the ports you've configured it to listen on (By default: 3478 and 5349 for TURN traffic (remember to allow both TCP and UDP traffic), and ports 49152-65535 @@ -250,6 +262,10 @@ Here are a few things to try: * Check that you have opened your firewall to allow UDP traffic to the UDP relay ports (49152-65535 by default). + * Try disabling `coturn`'s TLS/DTLS listeners and enable only its (unencrypted) + TCP/UDP listeners. (This will only leave signaling traffic unencrypted; + voice & video WebRTC traffic is always encrypted.) + * Some WebRTC implementations (notably, that of Google Chrome) appear to get confused by TURN servers which are reachable over IPv6 (this appears to be an unexpected side-effect of its handling of multiple IP addresses as From d8be9924efd5905e584cbb320ed4ec1aaa164c62 Mon Sep 17 00:00:00 2001 From: lukasdenk <63459921+lukasdenk@users.noreply.github.com> Date: Mon, 17 Jan 2022 17:43:25 +0100 Subject: [PATCH 103/474] Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. (#11675) --- changelog.d/11675.feature | 1 + synapse/_scripts/review_recent_signups.py | 18 +++++++++++++++--- synapse/handlers/register.py | 12 +++++++----- 3 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11675.feature diff --git a/changelog.d/11675.feature b/changelog.d/11675.feature new file mode 100644 index 000000000000..9a276f9542cf --- /dev/null +++ b/changelog.d/11675.feature @@ -0,0 +1 @@ +Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. diff --git a/synapse/_scripts/review_recent_signups.py b/synapse/_scripts/review_recent_signups.py index 093af4327ae0..e207f154f3aa 100644 --- a/synapse/_scripts/review_recent_signups.py +++ b/synapse/_scripts/review_recent_signups.py @@ -46,7 +46,9 @@ class UserInfo: ips: List[str] = attr.Factory(list) -def get_recent_users(txn: LoggingTransaction, since_ms: int) -> List[UserInfo]: +def get_recent_users( + txn: LoggingTransaction, since_ms: int, exclude_app_service: bool +) -> List[UserInfo]: """Fetches recently registered users and some info on them.""" sql = """ @@ -56,6 +58,9 @@ def get_recent_users(txn: LoggingTransaction, since_ms: int) -> List[UserInfo]: AND deactivated = 0 """ + if exclude_app_service: + sql += " AND appservice_id IS NULL" + txn.execute(sql, (since_ms / 1000,)) user_infos = [UserInfo(user_id, creation_ts) for user_id, creation_ts in txn] @@ -113,7 +118,7 @@ def main() -> None: "-e", "--exclude-emails", action="store_true", - help="Exclude users that have validated email addresses", + help="Exclude users that have validated email addresses.", ) parser.add_argument( "-u", @@ -121,6 +126,12 @@ def main() -> None: action="store_true", help="Only print user IDs that match.", ) + parser.add_argument( + "-a", + "--exclude-app-service", + help="Exclude appservice users.", + action="store_true", + ) config = ReviewConfig() @@ -133,6 +144,7 @@ def main() -> None: since_ms = time.time() * 1000 - Config.parse_duration(config_args.since) exclude_users_with_email = config_args.exclude_emails + exclude_users_with_appservice = config_args.exclude_app_service include_context = not config_args.only_users for database_config in config.database.databases: @@ -143,7 +155,7 @@ def main() -> None: with make_conn(database_config, engine, "review_recent_signups") as db_conn: # This generates a type of Cursor, not LoggingTransaction. - user_infos = get_recent_users(db_conn.cursor(), since_ms) # type: ignore[arg-type] + user_infos = get_recent_users(db_conn.cursor(), since_ms, exclude_users_with_appservice) # type: ignore[arg-type] for user_info in user_infos: if exclude_users_with_email and user_info.emails: diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f08a516a7588..68dbae591618 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -979,16 +979,18 @@ async def _register_email_threepid( if ( self.hs.config.email.email_enable_notifs and self.hs.config.email.email_notif_for_new_users - and token ): # Pull the ID of the access token back out of the db # It would really make more sense for this to be passed # up when the access token is saved, but that's quite an # invasive change I'd rather do separately. - user_tuple = await self.store.get_user_by_access_token(token) - # The token better still exist. - assert user_tuple - token_id = user_tuple.token_id + if token: + user_tuple = await self.store.get_user_by_access_token(token) + # The token better still exist. + assert user_tuple + token_id = user_tuple.token_id + else: + token_id = None await self.pusher_pool.add_pusher( user_id=user_id, From 3ba9389699529f8173f787029a281ea851bf3f06 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 18 Jan 2022 10:41:36 +0000 Subject: [PATCH 104/474] 1.50.0 --- CHANGES.md | 11 ++++++++--- debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8f53ce6c5188..fb4739dd04a2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,11 +1,16 @@ -Synapse 1.50.0rc2 (2022-01-14) -============================== +Synapse 1.50.0 (2022-01-18) +=========================== -This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1. +No significant changes since 1.50.0rc2. Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. +Synapse 1.50.0rc2 (2022-01-14) +============================== + +This release candidate fixes a federation-breaking regression introduced in Synapse 1.50.0rc1. + Bugfixes -------- diff --git a/debian/changelog b/debian/changelog index 381980f4683a..f1245cd3afa2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.50.0) stable; urgency=medium + + * New synapse release 1.50.0. + + -- Synapse Packaging team Tue, 18 Jan 2022 10:40:38 +0000 + matrix-synapse-py3 (1.50.0~rc2) stable; urgency=medium * New synapse release 1.50.0~rc2. diff --git a/synapse/__init__.py b/synapse/__init__.py index f2dac4e7de6c..201925e91d48 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.50.0rc2" +__version__ = "1.50.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 4ec0a309cf8608df1d29746af76f91b8d35e5de3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 18 Jan 2022 10:47:23 +0000 Subject: [PATCH 105/474] Move python/postgres deprecation notice to the top of 1.50 changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index fb4739dd04a2..8029a9d21c31 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,10 @@ Synapse 1.50.0 (2022-01-18) =========================== -No significant changes since 1.50.0rc2. - Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. +No significant changes since 1.50.0rc2. + Synapse 1.50.0rc2 (2022-01-14) ============================== From 251b5567ecc8fb3d6debaa3f77f6ec2620877d36 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 18 Jan 2022 13:06:04 +0000 Subject: [PATCH 106/474] Remove `log_function` and its uses (#11761) I've never found this terribly useful. I think it was added in the early days of Synapse, without much thought as to what would actually be useful to log, and has just been cargo-culted ever since. Rather, it tends to clutter up debug logs with useless information. --- changelog.d/11761.misc | 1 + synapse/federation/federation_client.py | 5 -- synapse/federation/federation_server.py | 3 - synapse/federation/persistence.py | 3 - synapse/federation/transport/client.py | 48 --------------- synapse/handlers/events.py | 2 - synapse/handlers/federation.py | 6 -- synapse/handlers/federation_event.py | 3 - synapse/handlers/presence.py | 2 - synapse/logging/utils.py | 76 ------------------------ synapse/notifier.py | 3 - synapse/state/__init__.py | 2 - synapse/storage/databases/main/events.py | 2 - 13 files changed, 1 insertion(+), 155 deletions(-) create mode 100644 changelog.d/11761.misc delete mode 100644 synapse/logging/utils.py diff --git a/changelog.d/11761.misc b/changelog.d/11761.misc new file mode 100644 index 000000000000..d4d997a7b9bc --- /dev/null +++ b/changelog.d/11761.misc @@ -0,0 +1 @@ +Remove `log_function` utility function and its uses. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 6ea4edfc71f8..57cf35bd927c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -56,7 +56,6 @@ from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.transport.client import SendJoinResponse -from synapse.logging.utils import log_function from synapse.types import JsonDict, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -144,7 +143,6 @@ def _clear_tried_cache(self) -> None: if destination_dict: self.pdu_destination_tried[event_id] = destination_dict - @log_function async def make_query( self, destination: str, @@ -178,7 +176,6 @@ async def make_query( ignore_backoff=ignore_backoff, ) - @log_function async def query_client_keys( self, destination: str, content: JsonDict, timeout: int ) -> JsonDict: @@ -196,7 +193,6 @@ async def query_client_keys( destination, content, timeout ) - @log_function async def query_user_devices( self, destination: str, user_id: str, timeout: int = 30000 ) -> JsonDict: @@ -208,7 +204,6 @@ async def query_user_devices( destination, user_id, timeout ) - @log_function async def claim_client_keys( self, destination: str, content: JsonDict, timeout: int ) -> JsonDict: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ee71f289c81c..af9cb98f67b1 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -58,7 +58,6 @@ run_in_background, ) from synapse.logging.opentracing import log_kv, start_active_span_from_edu, trace -from synapse.logging.utils import log_function from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.replication.http.federation import ( ReplicationFederationSendEduRestServlet, @@ -859,7 +858,6 @@ async def on_event_auth( res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} return 200, res - @log_function async def on_query_client_keys( self, origin: str, content: Dict[str, str] ) -> Tuple[int, Dict[str, Any]]: @@ -940,7 +938,6 @@ async def on_get_missing_events( return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} - @log_function async def on_openid_userinfo(self, token: str) -> Optional[str]: ts_now_ms = self._clock.time_msec() return await self.store.get_user_id_for_open_id_token(token, ts_now_ms) diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 523ab1c51ed1..60e2e6cf019f 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -23,7 +23,6 @@ from typing import Optional, Tuple from synapse.federation.units import Transaction -from synapse.logging.utils import log_function from synapse.storage.databases.main import DataStore from synapse.types import JsonDict @@ -36,7 +35,6 @@ class TransactionActions: def __init__(self, datastore: DataStore): self.store = datastore - @log_function async def have_responded( self, origin: str, transaction: Transaction ) -> Optional[Tuple[int, JsonDict]]: @@ -53,7 +51,6 @@ async def have_responded( return await self.store.get_received_txn_response(transaction_id, origin) - @log_function async def set_response( self, origin: str, transaction: Transaction, code: int, response: JsonDict ) -> None: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 9fc4c31c93f6..8782586cd6b4 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -44,7 +44,6 @@ from synapse.events import EventBase, make_event_from_dict from synapse.federation.units import Transaction from synapse.http.matrixfederationclient import ByteParser -from synapse.logging.utils import log_function from synapse.types import JsonDict logger = logging.getLogger(__name__) @@ -62,7 +61,6 @@ def __init__(self, hs): self.server_name = hs.hostname self.client = hs.get_federation_http_client() - @log_function async def get_room_state_ids( self, destination: str, room_id: str, event_id: str ) -> JsonDict: @@ -88,7 +86,6 @@ async def get_room_state_ids( try_trailing_slash_on_400=True, ) - @log_function async def get_event( self, destination: str, event_id: str, timeout: Optional[int] = None ) -> JsonDict: @@ -111,7 +108,6 @@ async def get_event( destination, path=path, timeout=timeout, try_trailing_slash_on_400=True ) - @log_function async def backfill( self, destination: str, room_id: str, event_tuples: Collection[str], limit: int ) -> Optional[JsonDict]: @@ -149,7 +145,6 @@ async def backfill( destination, path=path, args=args, try_trailing_slash_on_400=True ) - @log_function async def timestamp_to_event( self, destination: str, room_id: str, timestamp: int, direction: str ) -> Union[JsonDict, List]: @@ -185,7 +180,6 @@ async def timestamp_to_event( return remote_response - @log_function async def send_transaction( self, transaction: Transaction, @@ -234,7 +228,6 @@ async def send_transaction( try_trailing_slash_on_400=True, ) - @log_function async def make_query( self, destination: str, @@ -254,7 +247,6 @@ async def make_query( ignore_backoff=ignore_backoff, ) - @log_function async def make_membership_event( self, destination: str, @@ -317,7 +309,6 @@ async def make_membership_event( ignore_backoff=ignore_backoff, ) - @log_function async def send_join_v1( self, room_version: RoomVersion, @@ -336,7 +327,6 @@ async def send_join_v1( max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) - @log_function async def send_join_v2( self, room_version: RoomVersion, @@ -355,7 +345,6 @@ async def send_join_v2( max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) - @log_function async def send_leave_v1( self, destination: str, room_id: str, event_id: str, content: JsonDict ) -> Tuple[int, JsonDict]: @@ -372,7 +361,6 @@ async def send_leave_v1( ignore_backoff=True, ) - @log_function async def send_leave_v2( self, destination: str, room_id: str, event_id: str, content: JsonDict ) -> JsonDict: @@ -389,7 +377,6 @@ async def send_leave_v2( ignore_backoff=True, ) - @log_function async def send_knock_v1( self, destination: str, @@ -423,7 +410,6 @@ async def send_knock_v1( destination=destination, path=path, data=content ) - @log_function async def send_invite_v1( self, destination: str, room_id: str, event_id: str, content: JsonDict ) -> Tuple[int, JsonDict]: @@ -433,7 +419,6 @@ async def send_invite_v1( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function async def send_invite_v2( self, destination: str, room_id: str, event_id: str, content: JsonDict ) -> JsonDict: @@ -443,7 +428,6 @@ async def send_invite_v2( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function async def get_public_rooms( self, remote_server: str, @@ -516,7 +500,6 @@ async def get_public_rooms( return response - @log_function async def exchange_third_party_invite( self, destination: str, room_id: str, event_dict: JsonDict ) -> JsonDict: @@ -526,7 +509,6 @@ async def exchange_third_party_invite( destination=destination, path=path, data=event_dict ) - @log_function async def get_event_auth( self, destination: str, room_id: str, event_id: str ) -> JsonDict: @@ -534,7 +516,6 @@ async def get_event_auth( return await self.client.get_json(destination=destination, path=path) - @log_function async def query_client_keys( self, destination: str, query_content: JsonDict, timeout: int ) -> JsonDict: @@ -576,7 +557,6 @@ async def query_client_keys( destination=destination, path=path, data=query_content, timeout=timeout ) - @log_function async def query_user_devices( self, destination: str, user_id: str, timeout: int ) -> JsonDict: @@ -616,7 +596,6 @@ async def query_user_devices( destination=destination, path=path, timeout=timeout ) - @log_function async def claim_client_keys( self, destination: str, query_content: JsonDict, timeout: int ) -> JsonDict: @@ -655,7 +634,6 @@ async def claim_client_keys( destination=destination, path=path, data=query_content, timeout=timeout ) - @log_function async def get_missing_events( self, destination: str, @@ -680,7 +658,6 @@ async def get_missing_events( timeout=timeout, ) - @log_function async def get_group_profile( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -694,7 +671,6 @@ async def get_group_profile( ignore_backoff=True, ) - @log_function async def update_group_profile( self, destination: str, group_id: str, requester_user_id: str, content: JsonDict ) -> JsonDict: @@ -716,7 +692,6 @@ async def update_group_profile( ignore_backoff=True, ) - @log_function async def get_group_summary( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -730,7 +705,6 @@ async def get_group_summary( ignore_backoff=True, ) - @log_function async def get_rooms_in_group( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -798,7 +772,6 @@ async def remove_room_from_group( ignore_backoff=True, ) - @log_function async def get_users_in_group( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -812,7 +785,6 @@ async def get_users_in_group( ignore_backoff=True, ) - @log_function async def get_invited_users_in_group( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -826,7 +798,6 @@ async def get_invited_users_in_group( ignore_backoff=True, ) - @log_function async def accept_group_invite( self, destination: str, group_id: str, user_id: str, content: JsonDict ) -> JsonDict: @@ -837,7 +808,6 @@ async def accept_group_invite( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function def join_group( self, destination: str, group_id: str, user_id: str, content: JsonDict ) -> Awaitable[JsonDict]: @@ -848,7 +818,6 @@ def join_group( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function async def invite_to_group( self, destination: str, @@ -868,7 +837,6 @@ async def invite_to_group( ignore_backoff=True, ) - @log_function async def invite_to_group_notification( self, destination: str, group_id: str, user_id: str, content: JsonDict ) -> JsonDict: @@ -882,7 +850,6 @@ async def invite_to_group_notification( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function async def remove_user_from_group( self, destination: str, @@ -902,7 +869,6 @@ async def remove_user_from_group( ignore_backoff=True, ) - @log_function async def remove_user_from_group_notification( self, destination: str, group_id: str, user_id: str, content: JsonDict ) -> JsonDict: @@ -916,7 +882,6 @@ async def remove_user_from_group_notification( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function async def renew_group_attestation( self, destination: str, group_id: str, user_id: str, content: JsonDict ) -> JsonDict: @@ -930,7 +895,6 @@ async def renew_group_attestation( destination=destination, path=path, data=content, ignore_backoff=True ) - @log_function async def update_group_summary_room( self, destination: str, @@ -959,7 +923,6 @@ async def update_group_summary_room( ignore_backoff=True, ) - @log_function async def delete_group_summary_room( self, destination: str, @@ -986,7 +949,6 @@ async def delete_group_summary_room( ignore_backoff=True, ) - @log_function async def get_group_categories( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -1000,7 +962,6 @@ async def get_group_categories( ignore_backoff=True, ) - @log_function async def get_group_category( self, destination: str, group_id: str, requester_user_id: str, category_id: str ) -> JsonDict: @@ -1014,7 +975,6 @@ async def get_group_category( ignore_backoff=True, ) - @log_function async def update_group_category( self, destination: str, @@ -1034,7 +994,6 @@ async def update_group_category( ignore_backoff=True, ) - @log_function async def delete_group_category( self, destination: str, group_id: str, requester_user_id: str, category_id: str ) -> JsonDict: @@ -1048,7 +1007,6 @@ async def delete_group_category( ignore_backoff=True, ) - @log_function async def get_group_roles( self, destination: str, group_id: str, requester_user_id: str ) -> JsonDict: @@ -1062,7 +1020,6 @@ async def get_group_roles( ignore_backoff=True, ) - @log_function async def get_group_role( self, destination: str, group_id: str, requester_user_id: str, role_id: str ) -> JsonDict: @@ -1076,7 +1033,6 @@ async def get_group_role( ignore_backoff=True, ) - @log_function async def update_group_role( self, destination: str, @@ -1096,7 +1052,6 @@ async def update_group_role( ignore_backoff=True, ) - @log_function async def delete_group_role( self, destination: str, group_id: str, requester_user_id: str, role_id: str ) -> JsonDict: @@ -1110,7 +1065,6 @@ async def delete_group_role( ignore_backoff=True, ) - @log_function async def update_group_summary_user( self, destination: str, @@ -1136,7 +1090,6 @@ async def update_group_summary_user( ignore_backoff=True, ) - @log_function async def set_group_join_policy( self, destination: str, group_id: str, requester_user_id: str, content: JsonDict ) -> JsonDict: @@ -1151,7 +1104,6 @@ async def set_group_join_policy( ignore_backoff=True, ) - @log_function async def delete_group_summary_user( self, destination: str, diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index a3add8a58679..bac5de052609 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -20,7 +20,6 @@ from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state -from synapse.logging.utils import log_function from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, UserID from synapse.visibility import filter_events_for_client @@ -43,7 +42,6 @@ def __init__(self, hs: "HomeServer"): self._server_notices_sender = hs.get_server_notices_sender() self._event_serializer = hs.get_event_client_serializer() - @log_function async def get_stream( self, auth_user_id: str, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 26b8e3f43c40..a37ae0ca094f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -51,7 +51,6 @@ preserve_fn, run_in_background, ) -from synapse.logging.utils import log_function from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, @@ -556,7 +555,6 @@ async def do_invite_join( run_in_background(self._handle_queued_pdus, room_queue) - @log_function async def do_knock( self, target_hosts: List[str], @@ -928,7 +926,6 @@ async def on_make_leave_request( return event - @log_function async def on_make_knock_request( self, origin: str, room_id: str, user_id: str ) -> EventBase: @@ -1039,7 +1036,6 @@ async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: else: return [] - @log_function async def on_backfill_request( self, origin: str, room_id: str, pdu_list: List[str], limit: int ) -> List[EventBase]: @@ -1056,7 +1052,6 @@ async def on_backfill_request( return events - @log_function async def get_persisted_pdu( self, origin: str, event_id: str ) -> Optional[EventBase]: @@ -1118,7 +1113,6 @@ async def on_get_missing_events( return missing_events - @log_function async def exchange_third_party_invite( self, sender_user_id: str, target_user_id: str, room_id: str, signed: JsonDict ) -> None: diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 11771f3c9c2b..3905f60b3a78 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -56,7 +56,6 @@ from synapse.events.snapshot import EventContext from synapse.federation.federation_client import InvalidResponseError from synapse.logging.context import nested_logging_context, run_in_background -from synapse.logging.utils import log_function from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( @@ -275,7 +274,6 @@ async def on_receive_pdu(self, origin: str, pdu: EventBase) -> None: await self._process_received_pdu(origin, pdu, state=None) - @log_function async def on_send_membership_event( self, origin: str, event: EventBase ) -> Tuple[EventBase, EventContext]: @@ -472,7 +470,6 @@ async def process_remote_join( return await self.persist_events_and_notify(room_id, [(event, context)]) - @log_function async def backfill( self, dest: str, room_id: str, limit: int, extremities: Collection[str] ) -> None: diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c781fefb1bc7..067c43ae4714 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -55,7 +55,6 @@ from synapse.appservice import ApplicationService from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background -from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.presence import ( @@ -1542,7 +1541,6 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.store = hs.get_datastore() - @log_function async def get_new_events( self, user: UserID, diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py deleted file mode 100644 index 4a01b902c255..000000000000 --- a/synapse/logging/utils.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from functools import wraps -from inspect import getcallargs -from typing import Callable, TypeVar, cast - -_TIME_FUNC_ID = 0 - - -def _log_debug_as_f(f, msg, msg_args): - name = f.__module__ - logger = logging.getLogger(name) - - if logger.isEnabledFor(logging.DEBUG): - lineno = f.__code__.co_firstlineno - pathname = f.__code__.co_filename - - record = logger.makeRecord( - name=name, - level=logging.DEBUG, - fn=pathname, - lno=lineno, - msg=msg, - args=msg_args, - exc_info=None, - ) - - logger.handle(record) - - -F = TypeVar("F", bound=Callable) - - -def log_function(f: F) -> F: - """Function decorator that logs every call to that function.""" - func_name = f.__name__ - - @wraps(f) - def wrapped(*args, **kwargs): - name = f.__module__ - logger = logging.getLogger(name) - level = logging.DEBUG - - if logger.isEnabledFor(level): - bound_args = getcallargs(f, *args, **kwargs) - - def format(value): - r = str(value) - if len(r) > 50: - r = r[:50] + "..." - return r - - func_args = ["%s=%s" % (k, format(v)) for k, v in bound_args.items()] - - msg_args = {"func_name": func_name, "args": ", ".join(func_args)} - - _log_debug_as_f(f, "Invoked '%(func_name)s' with args: %(args)s", msg_args) - - return f(*args, **kwargs) - - wrapped.__name__ = func_name - return cast(F, wrapped) diff --git a/synapse/notifier.py b/synapse/notifier.py index 41fd94d7724b..632b2245ef55 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -40,7 +40,6 @@ from synapse.logging import issue9533_logger from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span -from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig from synapse.types import ( @@ -686,7 +685,6 @@ async def _is_world_readable(self, room_id: str) -> bool: else: return False - @log_function def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams = [] @@ -700,7 +698,6 @@ def remove_expired_streams(self) -> None: for expired_stream in expired_streams: expired_stream.remove(self) - @log_function def _register_with_keys(self, user_stream: _NotifierUserStream): self.user_to_user_stream[user_stream.user_id] = user_stream diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 923e31587e4b..67e8bc6ec288 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -45,7 +45,6 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.logging.context import ContextResourceUsage -from synapse.logging.utils import log_function from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo @@ -512,7 +511,6 @@ def __init__(self, hs: "HomeServer"): self.clock.looping_call(self._report_metrics, 120 * 1000) - @log_function async def resolve_state_groups( self, room_id: str, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index de3b48524b13..2be36a741aad 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -39,7 +39,6 @@ from synapse.crypto.event_signing import compute_event_reference_hash from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 -from synapse.logging.utils import log_function from synapse.storage._base import db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -328,7 +327,6 @@ def _get_prevs_before_rejected_txn(txn, batch): return existing_prevs - @log_function def _persist_events_txn( self, txn: LoggingTransaction, From d93ec0a0ba5f6d2fbf2bc321086d4ad4c03136e0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 18 Jan 2022 16:03:56 +0000 Subject: [PATCH 107/474] Partially revert #11602 to prevent webclient overriding client resource (#11764) --- changelog.d/11764.bugfix | 1 + synapse/app/homeserver.py | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11764.bugfix diff --git a/changelog.d/11764.bugfix b/changelog.d/11764.bugfix new file mode 100644 index 000000000000..1de5b9c6094a --- /dev/null +++ b/changelog.d/11764.bugfix @@ -0,0 +1 @@ +Fixes a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the 'webclient' resource enabled. \ No newline at end of file diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 177ce040e857..dd76e0732108 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -27,7 +27,6 @@ import synapse.config.logger from synapse import events from synapse.api.urls import ( - CLIENT_API_PREFIX, FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_R0_PREFIX, @@ -193,7 +192,13 @@ def _configure_named_resource( resources.update( { - CLIENT_API_PREFIX: client_resource, + "/_matrix/client/api/v1": client_resource, + "/_matrix/client/r0": client_resource, + "/_matrix/client/v1": client_resource, + "/_matrix/client/v3": client_resource, + "/_matrix/client/unstable": client_resource, + "/_matrix/client/v2_alpha": client_resource, + "/_matrix/client/versions": client_resource, "/.well-known": well_known_resource(self), "/_synapse/admin": AdminRestResource(self), **build_synapse_client_resource_tree(self), From ab12c909a20d626cd39f615d1df218d574f826a7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 18 Jan 2022 16:09:04 +0000 Subject: [PATCH 108/474] 1.50.1 --- CHANGES.md | 13 +++++++++++++ changelog.d/11764.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 20 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/11764.bugfix diff --git a/CHANGES.md b/CHANGES.md index 8029a9d21c31..ea8b15dcee16 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,19 @@ +Synapse 1.50.1 (2022-01-18) +=========================== + +This release fixes a bug in Synapse 1.50.0 that could prevent clients from being able to connect to Synapse if the `webclient` resource was enabled. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763). + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the 'webclient' resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764)) + + Synapse 1.50.0 (2022-01-18) =========================== +**This release has a critical bug that may prevent clients from being able to connect. As such, we do not recommend upgrading to 1.50.0, and instead upgrading straight to 1.50.1. Further details are in [this issue](https://github.com/matrix-org/synapse/issues/11763).** + Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. No significant changes since 1.50.0rc2. diff --git a/changelog.d/11764.bugfix b/changelog.d/11764.bugfix deleted file mode 100644 index 1de5b9c6094a..000000000000 --- a/changelog.d/11764.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the 'webclient' resource enabled. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index f1245cd3afa2..18983f5da672 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.50.1) stable; urgency=medium + + * New synapse release 1.50.1. + + -- Synapse Packaging team Tue, 18 Jan 2022 16:06:26 +0000 + matrix-synapse-py3 (1.50.0) stable; urgency=medium * New synapse release 1.50.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 201925e91d48..5ec9f941741a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.50.0" +__version__ = "1.50.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 9d0098595e49755e420eb6f654d5e2e2327868fe Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 18 Jan 2022 16:11:38 +0000 Subject: [PATCH 109/474] Reword 1.50.0 warning a bit in the changelog --- CHANGES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ea8b15dcee16..84fddb47ca38 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,7 +12,9 @@ Bugfixes Synapse 1.50.0 (2022-01-18) =========================== -**This release has a critical bug that may prevent clients from being able to connect. As such, we do not recommend upgrading to 1.50.0, and instead upgrading straight to 1.50.1. Further details are in [this issue](https://github.com/matrix-org/synapse/issues/11763).** +**This release has a critical bug that may prevent clients from being able to connect. +As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to +to 1.50.1. Further details are in [this issue](https://github.com/matrix-org/synapse/issues/11763).** Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. From fd05a3ed0330d5d4993620cd0160a3d000262f2c Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 18 Jan 2022 16:13:54 +0000 Subject: [PATCH 110/474] Wording fixes to 1.50.0/1 changelog entries --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 84fddb47ca38..ced1dcc0db80 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,15 +6,15 @@ This release fixes a bug in Synapse 1.50.0 that could prevent clients from being Bugfixes -------- -- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the 'webclient' resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764)) +- Fix a bug introduced in Synapse 1.50.0rc1 that could cause Matrix clients to be unable to connect to Synapse instances with the `webclient` resource enabled. ([\#11764](https://github.com/matrix-org/synapse/issues/11764)) Synapse 1.50.0 (2022-01-18) =========================== -**This release has a critical bug that may prevent clients from being able to connect. +**This release contains a critical bug that may prevent clients from being able to connect. As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to -to 1.50.1. Further details are in [this issue](https://github.com/matrix-org/synapse/issues/11763).** +to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).** Please note that we now only support Python 3.7+ and PostgreSQL 10+ (if applicable), because Python 3.6 and PostgreSQL 9.6 have reached end-of-life. From 68acb0a29dcb03a0ecbcebdb95e09c5999598f42 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 18 Jan 2022 11:38:57 -0500 Subject: [PATCH 111/474] Include whether the requesting user has participated in a thread. (#11577) Per updates to MSC3440. This is implement as a separate method since it needs to be cached on a per-user basis, instead of a per-thread basis. --- changelog.d/11577.feature | 1 + synapse/handlers/pagination.py | 2 +- synapse/handlers/room.py | 12 +++- synapse/handlers/sync.py | 4 +- synapse/rest/client/relations.py | 4 +- synapse/rest/client/room.py | 4 +- synapse/storage/databases/main/events.py | 7 +++ synapse/storage/databases/main/relations.py | 66 +++++++++++++++++---- tests/rest/client/test_relations.py | 3 + 9 files changed, 85 insertions(+), 18 deletions(-) create mode 100644 changelog.d/11577.feature diff --git a/changelog.d/11577.feature b/changelog.d/11577.feature new file mode 100644 index 000000000000..f9c8a0d5f40a --- /dev/null +++ b/changelog.d/11577.feature @@ -0,0 +1 @@ +Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 472688f04506..973f262964c1 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -537,7 +537,7 @@ async def get_messages( state_dict = await self.store.get_events(list(state_ids.values())) state = state_dict.values() - aggregations = await self.store.get_bundled_aggregations(events) + aggregations = await self.store.get_bundled_aggregations(events, user_id) time_now = self.clock.time_msec() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3d47163f25cf..f963078e596c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1182,12 +1182,18 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: results["event"] = filtered[0] # Fetch the aggregations. - aggregations = await self.store.get_bundled_aggregations([results["event"]]) + aggregations = await self.store.get_bundled_aggregations( + [results["event"]], user.to_string() + ) aggregations.update( - await self.store.get_bundled_aggregations(results["events_before"]) + await self.store.get_bundled_aggregations( + results["events_before"], user.to_string() + ) ) aggregations.update( - await self.store.get_bundled_aggregations(results["events_after"]) + await self.store.get_bundled_aggregations( + results["events_after"], user.to_string() + ) ) results["aggregations"] = aggregations diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index e1df9b310635..ffc6b748e84e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -637,7 +637,9 @@ async def _load_filtered_recents( # as clients will have all the necessary information. bundled_aggregations = None if limited or newly_joined_room: - bundled_aggregations = await self.store.get_bundled_aggregations(recents) + bundled_aggregations = await self.store.get_bundled_aggregations( + recents, sync_config.user.to_string() + ) return TimelineBatch( events=recents, diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 37d949a71e74..8cf5ebaa07b7 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -118,7 +118,9 @@ async def on_GET( ) # The relations returned for the requested event do include their # bundled aggregations. - aggregations = await self.store.get_bundled_aggregations(events) + aggregations = await self.store.get_bundled_aggregations( + events, requester.user.to_string() + ) serialized_events = self._event_serializer.serialize_events( events, now, bundle_aggregations=aggregations ) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index da6014900a9c..31fd329a3862 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -663,7 +663,9 @@ async def on_GET( if event: # Ensure there are bundled aggregations available. - aggregations = await self._store.get_bundled_aggregations([event]) + aggregations = await self._store.get_bundled_aggregations( + [event], requester.user.to_string() + ) time_now = self.clock.time_msec() event_dict = self._event_serializer.serialize_event( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 2be36a741aad..727800232207 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1793,6 +1793,13 @@ def _handle_event_relations( txn.call_after( self.store.get_thread_summary.invalidate, (parent_id, event.room_id) ) + # It should be safe to only invalidate the cache if the user has not + # previously participated in the thread, but that's difficult (and + # potentially error-prone) so it is always invalidated. + txn.call_after( + self.store.get_thread_participated.invalidate, + (parent_id, event.room_id, event.sender), + ) def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase): """Handles keeping track of insertion events and edges/connections. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index c6c4bd18da3e..2cb5d06c1352 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -384,8 +384,7 @@ def _get_applicable_edit_txn(txn: LoggingTransaction) -> Optional[str]: async def get_thread_summary( self, event_id: str, room_id: str ) -> Tuple[int, Optional[EventBase]]: - """Get the number of threaded replies, the senders of those replies, and - the latest reply (if any) for the given event. + """Get the number of threaded replies and the latest reply (if any) for the given event. Args: event_id: Summarize the thread related to this event ID. @@ -398,7 +397,7 @@ async def get_thread_summary( def _get_thread_summary_txn( txn: LoggingTransaction, ) -> Tuple[int, Optional[str]]: - # Fetch the count of threaded events and the latest event ID. + # Fetch the latest event ID in the thread. # TODO Should this only allow m.room.message events. sql = """ SELECT event_id @@ -419,6 +418,7 @@ def _get_thread_summary_txn( latest_event_id = row[0] + # Fetch the number of threaded replies. sql = """ SELECT COUNT(event_id) FROM event_relations @@ -443,6 +443,44 @@ def _get_thread_summary_txn( return count, latest_event + @cached() + async def get_thread_participated( + self, event_id: str, room_id: str, user_id: str + ) -> bool: + """Get whether the requesting user participated in a thread. + + This is separate from get_thread_summary since that can be cached across + all users while this value is specific to the requeser. + + Args: + event_id: The thread related to this event ID. + room_id: The room the event belongs to. + user_id: The user requesting the summary. + + Returns: + True if the requesting user participated in the thread, otherwise false. + """ + + def _get_thread_summary_txn(txn: LoggingTransaction) -> bool: + # Fetch whether the requester has participated or not. + sql = """ + SELECT 1 + FROM event_relations + INNER JOIN events USING (event_id) + WHERE + relates_to_id = ? + AND room_id = ? + AND relation_type = ? + AND sender = ? + """ + + txn.execute(sql, (event_id, room_id, RelationTypes.THREAD, user_id)) + return bool(txn.fetchone()) + + return await self.db_pool.runInteraction( + "get_thread_summary", _get_thread_summary_txn + ) + async def events_have_relations( self, parent_ids: List[str], @@ -546,7 +584,7 @@ def _get_if_user_has_annotated_event(txn: LoggingTransaction) -> bool: ) async def _get_bundled_aggregation_for_event( - self, event: EventBase + self, event: EventBase, user_id: str ) -> Optional[Dict[str, Any]]: """Generate bundled aggregations for an event. @@ -554,6 +592,7 @@ async def _get_bundled_aggregation_for_event( Args: event: The event to calculate bundled aggregations for. + user_id: The user requesting the bundled aggregations. Returns: The bundled aggregations for an event, if bundled aggregations are @@ -598,27 +637,32 @@ async def _get_bundled_aggregation_for_event( # If this event is the start of a thread, include a summary of the replies. if self._msc3440_enabled: - ( - thread_count, - latest_thread_event, - ) = await self.get_thread_summary(event_id, room_id) + thread_count, latest_thread_event = await self.get_thread_summary( + event_id, room_id + ) + participated = await self.get_thread_participated( + event_id, room_id, user_id + ) if latest_thread_event: aggregations[RelationTypes.THREAD] = { - # Don't bundle aggregations as this could recurse forever. "latest_event": latest_thread_event, "count": thread_count, + "current_user_participated": participated, } # Store the bundled aggregations in the event metadata for later use. return aggregations async def get_bundled_aggregations( - self, events: Iterable[EventBase] + self, + events: Iterable[EventBase], + user_id: str, ) -> Dict[str, Dict[str, Any]]: """Generate bundled aggregations for events. Args: events: The iterable of events to calculate bundled aggregations for. + user_id: The user requesting the bundled aggregations. Returns: A map of event ID to the bundled aggregation for the event. Not all @@ -631,7 +675,7 @@ async def get_bundled_aggregations( # TODO Parallelize. results = {} for event in events: - event_result = await self._get_bundled_aggregation_for_event(event) + event_result = await self._get_bundled_aggregation_for_event(event, user_id) if event_result is not None: results[event.event_id] = event_result diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index ee26751430f0..4b20ab0e3e57 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -515,6 +515,9 @@ def assert_bundle(actual): 2, actual[RelationTypes.THREAD].get("count"), ) + self.assertTrue( + actual[RelationTypes.THREAD].get("current_user_participated") + ) # The latest thread event has some fields that don't matter. self.assert_dict( { From 15ffc4143c36593bc3d899fad7fb5db00f4d95ea Mon Sep 17 00:00:00 2001 From: Philippe Daouadi Date: Tue, 18 Jan 2022 19:20:24 +0100 Subject: [PATCH 112/474] Fix preview of imgur and Tenor URLs. (#11669) By scraping Open Graph information from the HTML even when an autodiscovery endpoint is found. The results are then combined to capture as much information as possible from the page. --- changelog.d/11669.bugfix | 1 + docs/development/url_previews.md | 7 +++- synapse/rest/media/v1/oembed.py | 10 ++++-- synapse/rest/media/v1/preview_url_resource.py | 35 +++++++++++++------ 4 files changed, 39 insertions(+), 14 deletions(-) create mode 100644 changelog.d/11669.bugfix diff --git a/changelog.d/11669.bugfix b/changelog.d/11669.bugfix new file mode 100644 index 000000000000..10d913aaceed --- /dev/null +++ b/changelog.d/11669.bugfix @@ -0,0 +1 @@ +Fix preview of some gif URLs (like tenor.com). Contributed by Philippe Daouadi. diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md index aff38136091d..154b9a5e12f4 100644 --- a/docs/development/url_previews.md +++ b/docs/development/url_previews.md @@ -35,7 +35,12 @@ When Synapse is asked to preview a URL it does the following: 5. If the media is HTML: 1. Decodes the HTML via the stored file. 2. Generates an Open Graph response from the HTML. - 3. If an image exists in the Open Graph response: + 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: + 1. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 2. Convert the oEmbed response to an Open Graph response. + 3. Override any Open Graph data from the HTML with data from oEmbed. + 4. If an image exists in the Open Graph response: 1. Downloads the URL and stores it into a file via the media storage provider and saves the local media metadata. 2. Generates thumbnails. diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py index cce1527ed9fb..2177b46c9eba 100644 --- a/synapse/rest/media/v1/oembed.py +++ b/synapse/rest/media/v1/oembed.py @@ -33,6 +33,8 @@ class OEmbedResult: # The Open Graph result (converted from the oEmbed result). open_graph_result: JsonDict + # The author_name of the oEmbed result + author_name: Optional[str] # Number of milliseconds to cache the content, according to the oEmbed response. # # This will be None if no cache-age is provided in the oEmbed response (or @@ -154,11 +156,12 @@ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult: "og:url": url, } - # Use either title or author's name as the title. - title = oembed.get("title") or oembed.get("author_name") + title = oembed.get("title") if title: open_graph_response["og:title"] = title + author_name = oembed.get("author_name") + # Use the provider name and as the site. provider_name = oembed.get("provider_name") if provider_name: @@ -193,9 +196,10 @@ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult: # Trap any exception and let the code follow as usual. logger.warning("Error parsing oEmbed metadata from %s: %r", url, e) open_graph_response = {} + author_name = None cache_age = None - return OEmbedResult(open_graph_response, cache_age) + return OEmbedResult(open_graph_response, author_name, cache_age) def _fetch_urls(tree: "etree.Element", tag_name: str) -> List[str]: diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index a3829d943b7f..e8881bc8709e 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -262,6 +262,7 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: # The number of milliseconds that the response should be considered valid. expiration_ms = media_info.expires + author_name: Optional[str] = None if _is_media(media_info.media_type): file_id = media_info.filesystem_id @@ -294,17 +295,25 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: # Check if this HTML document points to oEmbed information and # defer to that. oembed_url = self._oembed.autodiscover_from_html(tree) - og = {} + og_from_oembed: JsonDict = {} if oembed_url: oembed_info = await self._download_url(oembed_url, user) - og, expiration_ms = await self._handle_oembed_response( + ( + og_from_oembed, + author_name, + expiration_ms, + ) = await self._handle_oembed_response( url, oembed_info, expiration_ms ) - # If there was no oEmbed URL (or oEmbed parsing failed), attempt - # to generate the Open Graph information from the HTML. - if not oembed_url or not og: - og = parse_html_to_open_graph(tree, media_info.uri) + # Parse Open Graph information from the HTML in case the oEmbed + # response failed or is incomplete. + og_from_html = parse_html_to_open_graph(tree, media_info.uri) + + # Compile the Open Graph response by using the scraped + # information from the HTML and overlaying any information + # from the oEmbed response. + og = {**og_from_html, **og_from_oembed} await self._precache_image_url(user, media_info, og) else: @@ -312,7 +321,7 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: elif oembed_url: # Handle the oEmbed information. - og, expiration_ms = await self._handle_oembed_response( + og, author_name, expiration_ms = await self._handle_oembed_response( url, media_info, expiration_ms ) await self._precache_image_url(user, media_info, og) @@ -321,6 +330,11 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: logger.warning("Failed to find any OG data in %s", url) og = {} + # If we don't have a title but we have author_name, copy it as + # title + if not og.get("og:title") and author_name: + og["og:title"] = author_name + # filter out any stupidly long values keys_to_remove = [] for k, v in og.items(): @@ -484,7 +498,7 @@ async def _precache_image_url( async def _handle_oembed_response( self, url: str, media_info: MediaInfo, expiration_ms: int - ) -> Tuple[JsonDict, int]: + ) -> Tuple[JsonDict, Optional[str], int]: """ Parse the downloaded oEmbed info. @@ -497,11 +511,12 @@ async def _handle_oembed_response( Returns: A tuple of: The Open Graph dictionary, if the oEmbed info can be parsed. + The author name if it could be retrieved from oEmbed. The (possibly updated) length of time, in milliseconds, the media is valid for. """ # If JSON was not returned, there's nothing to do. if not _is_json(media_info.media_type): - return {}, expiration_ms + return {}, None, expiration_ms with open(media_info.filename, "rb") as file: body = file.read() @@ -513,7 +528,7 @@ async def _handle_oembed_response( if open_graph_result and oembed_response.cache_age is not None: expiration_ms = oembed_response.cache_age - return open_graph_result, expiration_ms + return open_graph_result, oembed_response.author_name, expiration_ms def _start_expire_url_cache_data(self) -> Deferred: return run_as_background_process( From 7ad7a47e5ae01b57299b17a266cc3b0269c0b3cd Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 19 Jan 2022 12:39:11 +0000 Subject: [PATCH 113/474] Add missing `auto_attribs=True` to the `_WrappedRustReporter` class (#11768) --- changelog.d/11768.misc | 1 + synapse/logging/opentracing.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11768.misc diff --git a/changelog.d/11768.misc b/changelog.d/11768.misc new file mode 100644 index 000000000000..1cac1f7446f8 --- /dev/null +++ b/changelog.d/11768.misc @@ -0,0 +1 @@ +Use `auto_attribs` and native type hints for attrs classes. \ No newline at end of file diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 5672d60de348..b240d2d21da2 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -247,7 +247,7 @@ class _DummyTagNames: class BaseReporter: # type: ignore[no-redef] pass - @attr.s(slots=True, frozen=True) + @attr.s(slots=True, frozen=True, auto_attribs=True) class _WrappedRustReporter(BaseReporter): """Wrap the reporter to ensure `report_span` never throws.""" From c072c0b82969879cd08d0d6e669ded509f1e833b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 19 Jan 2022 11:50:09 -0500 Subject: [PATCH 114/474] Fix mypy for platforms without epoll support. (#11771) --- changelog.d/11771.misc | 1 + synapse/metrics/_reactor_metrics.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11771.misc diff --git a/changelog.d/11771.misc b/changelog.d/11771.misc new file mode 100644 index 000000000000..e9d5dae857b2 --- /dev/null +++ b/changelog.d/11771.misc @@ -0,0 +1 @@ +Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric. diff --git a/synapse/metrics/_reactor_metrics.py b/synapse/metrics/_reactor_metrics.py index ce0688621c37..f38f7983131f 100644 --- a/synapse/metrics/_reactor_metrics.py +++ b/synapse/metrics/_reactor_metrics.py @@ -35,7 +35,7 @@ class EpollWrapper: """a wrapper for an epoll object which records the time between polls""" - def __init__(self, poller: "select.epoll"): + def __init__(self, poller: "select.epoll"): # type: ignore[name-defined] self.last_polled = time.time() self._poller = poller @@ -71,7 +71,7 @@ def collect(self) -> Iterable[Metric]: # if the reactor has a `_poller` attribute, which is an `epoll` object # (ie, it's an EPollReactor), we wrap the `epoll` with a thing that will # measure the time between ticks - from select import epoll + from select import epoll # type: ignore[attr-defined] poller = reactor._poller # type: ignore[attr-defined] except (AttributeError, ImportError): From 5572e6cc4b0f7f3a907773f29d82671202812a6d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 19 Jan 2022 19:45:36 +0000 Subject: [PATCH 115/474] Comments and typing for `_update_outliers_txn` (#11776) A couple of surprises for me here, so thought I'd document them --- changelog.d/11776.misc | 1 + synapse/storage/databases/main/events.py | 35 ++++++++++++++++-------- 2 files changed, 24 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11776.misc diff --git a/changelog.d/11776.misc b/changelog.d/11776.misc new file mode 100644 index 000000000000..572ccda84741 --- /dev/null +++ b/changelog.d/11776.misc @@ -0,0 +1 @@ +Add some comments and type annotations for `_update_outliers_txn`. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 727800232207..1ae1ebe10879 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1254,20 +1254,22 @@ def _update_room_depths_txn( for room_id, depth in depth_updates.items(): self._update_min_depth_for_room_txn(txn, room_id, depth) - def _update_outliers_txn(self, txn, events_and_contexts): + def _update_outliers_txn( + self, + txn: LoggingTransaction, + events_and_contexts: List[Tuple[EventBase, EventContext]], + ) -> List[Tuple[EventBase, EventContext]]: """Update any outliers with new event info. - This turns outliers into ex-outliers (unless the new event was - rejected). + This turns outliers into ex-outliers (unless the new event was rejected), and + also removes any other events we have already seen from the list. Args: - txn (twisted.enterprise.adbapi.Connection): db connection - events_and_contexts (list[(EventBase, EventContext)]): events - we are persisting + txn: db connection + events_and_contexts: events we are persisting Returns: - list[(EventBase, EventContext)] new list, without events which - are already in the events table. + new list, without events which are already in the events table. """ txn.execute( "SELECT event_id, outlier FROM events WHERE event_id in (%s)" @@ -1275,7 +1277,9 @@ def _update_outliers_txn(self, txn, events_and_contexts): [event.event_id for event, _ in events_and_contexts], ) - have_persisted = {event_id: outlier for event_id, outlier in txn} + have_persisted: Dict[str, bool] = { + event_id: outlier for event_id, outlier in txn + } to_remove = set() for event, context in events_and_contexts: @@ -1285,15 +1289,22 @@ def _update_outliers_txn(self, txn, events_and_contexts): to_remove.add(event) if context.rejected: - # If the event is rejected then we don't care if the event - # was an outlier or not. + # If the incoming event is rejected then we don't care if the event + # was an outlier or not - what we have is at least as good. continue outlier_persisted = have_persisted[event.event_id] if not event.internal_metadata.is_outlier() and outlier_persisted: # We received a copy of an event that we had already stored as - # an outlier in the database. We now have some state at that + # an outlier in the database. We now have some state at that event # so we need to update the state_groups table with that state. + # + # Note that we do not update the stream_ordering of the event in this + # scenario. XXX: does this cause bugs? It will mean we won't send such + # events down /sync. In general they will be historical events, so that + # doesn't matter too much, but that is not always the case. + + logger.info("Updating state for ex-outlier event %s", event.event_id) # insert into event_to_state_groups. try: From af13a3be29dd2d84d9255f8e613ca70c16819436 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 20 Jan 2022 11:03:42 +0000 Subject: [PATCH 116/474] Fix a bug that corrupted the cache of federated space hierarchies (#11775) `FederationClient.get_room_hierarchy()` caches its return values, so refactor the code to avoid modifying the returned room summary. --- changelog.d/11775.bugfix | 1 + synapse/federation/federation_client.py | 18 ++--- synapse/handlers/room_summary.py | 3 +- tests/handlers/test_room_summary.py | 92 ++++++++++++++++++++++++- 4 files changed, 102 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11775.bugfix diff --git a/changelog.d/11775.bugfix b/changelog.d/11775.bugfix new file mode 100644 index 000000000000..2c548dbf3096 --- /dev/null +++ b/changelog.d/11775.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 57cf35bd927c..74f17aa4daa3 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -118,7 +118,8 @@ def __init__(self, hs: "HomeServer"): # It is a map of (room ID, suggested-only) -> the response of # get_room_hierarchy. self._get_room_hierarchy_cache: ExpiringCache[ - Tuple[str, bool], Tuple[JsonDict, Sequence[JsonDict], Sequence[str]] + Tuple[str, bool], + Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]], ] = ExpiringCache( cache_name="get_room_hierarchy_cache", clock=self._clock, @@ -1333,7 +1334,7 @@ async def get_room_hierarchy( destinations: Iterable[str], room_id: str, suggested_only: bool, - ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]: + ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]: """ Call other servers to get a hierarchy of the given room. @@ -1348,7 +1349,8 @@ async def get_room_hierarchy( Returns: A tuple of: - The room as a JSON dictionary. + The room as a JSON dictionary, without a "children_state" key. + A list of `m.space.child` state events. A list of children rooms, as JSON dictionaries. A list of inaccessible children room IDs. @@ -1363,7 +1365,7 @@ async def get_room_hierarchy( async def send_request( destination: str, - ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[str]]: + ) -> Tuple[JsonDict, Sequence[JsonDict], Sequence[JsonDict], Sequence[str]]: try: res = await self.transport_layer.get_room_hierarchy( destination=destination, @@ -1392,7 +1394,7 @@ async def send_request( raise InvalidResponseError("'room' must be a dict") # Validate children_state of the room. - children_state = room.get("children_state", []) + children_state = room.pop("children_state", []) if not isinstance(children_state, Sequence): raise InvalidResponseError("'room.children_state' must be a list") if any(not isinstance(e, dict) for e in children_state): @@ -1421,7 +1423,7 @@ async def send_request( "Invalid room ID in 'inaccessible_children' list" ) - return room, children, inaccessible_children + return room, children_state, children, inaccessible_children try: result = await self._try_destination_list( @@ -1469,8 +1471,6 @@ async def send_request( if event.room_id == room_id: children_events.append(event.data) children_room_ids.add(event.state_key) - # And add them under the requested room. - requested_room["children_state"] = children_events # Find the children rooms. children = [] @@ -1480,7 +1480,7 @@ async def send_request( # It isn't clear from the response whether some of the rooms are # not accessible. - result = (requested_room, children, ()) + result = (requested_room, children_events, children, ()) # Cache the result to avoid fetching data over federation every time. self._get_room_hierarchy_cache[(room_id, suggested_only)] = result diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 7c60cb0bdd27..4844b69a0345 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -780,6 +780,7 @@ async def _summarize_remote_room_hierarchy( try: ( room_response, + children_state_events, children, inaccessible_children, ) = await self._federation_client.get_room_hierarchy( @@ -804,7 +805,7 @@ async def _summarize_remote_room_hierarchy( } return ( - _RoomEntry(room_id, room_response, room_response.pop("children_state", ())), + _RoomEntry(room_id, room_response, children_state_events), children_by_room_id, set(inaccessible_children), ) diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index ce3ebcf2f2fa..51b22d299812 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -28,6 +28,7 @@ from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict +from synapse.federation.transport.client import TransportLayerClient from synapse.handlers.room_summary import _child_events_comparison_key, _RoomEntry from synapse.rest import admin from synapse.rest.client import login, room @@ -134,10 +135,18 @@ def prepare(self, reactor, clock, hs: HomeServer): self._add_child(self.space, self.room, self.token) def _add_child( - self, space_id: str, room_id: str, token: str, order: Optional[str] = None + self, + space_id: str, + room_id: str, + token: str, + order: Optional[str] = None, + via: Optional[List[str]] = None, ) -> None: """Add a child room to a space.""" - content: JsonDict = {"via": [self.hs.hostname]} + if via is None: + via = [self.hs.hostname] + + content: JsonDict = {"via": via} if order is not None: content["order"] = order self.helper.send_state( @@ -1036,6 +1045,85 @@ async def summarize_remote_room_hierarchy(_self, room, suggested_only): ) self._assert_hierarchy(result, expected) + def test_fed_caching(self): + """ + Federation `/hierarchy` responses should be cached. + """ + fed_hostname = self.hs.hostname + "2" + fed_subspace = "#space:" + fed_hostname + fed_room = "#room:" + fed_hostname + + # Add a room to the space which is on another server. + self._add_child(self.space, fed_subspace, self.token, via=[fed_hostname]) + + federation_requests = 0 + + async def get_room_hierarchy( + _self: TransportLayerClient, + destination: str, + room_id: str, + suggested_only: bool, + ) -> JsonDict: + nonlocal federation_requests + federation_requests += 1 + + return { + "room": { + "room_id": fed_subspace, + "world_readable": True, + "room_type": RoomTypes.SPACE, + "children_state": [ + { + "type": EventTypes.SpaceChild, + "room_id": fed_subspace, + "state_key": fed_room, + "content": {"via": [fed_hostname]}, + }, + ], + }, + "children": [ + { + "room_id": fed_room, + "world_readable": True, + }, + ], + "inaccessible_children": [], + } + + expected = [ + (self.space, [self.room, fed_subspace]), + (self.room, ()), + (fed_subspace, [fed_room]), + (fed_room, ()), + ] + + with mock.patch( + "synapse.federation.transport.client.TransportLayerClient.get_room_hierarchy", + new=get_room_hierarchy, + ): + result = self.get_success( + self.handler.get_room_hierarchy(create_requester(self.user), self.space) + ) + self.assertEqual(federation_requests, 1) + self._assert_hierarchy(result, expected) + + # The previous federation response should be reused. + result = self.get_success( + self.handler.get_room_hierarchy(create_requester(self.user), self.space) + ) + self.assertEqual(federation_requests, 1) + self._assert_hierarchy(result, expected) + + # Expire the response cache + self.reactor.advance(5 * 60 + 1) + + # A new federation request should be made. + result = self.get_success( + self.handler.get_room_hierarchy(create_requester(self.user), self.space) + ) + self.assertEqual(federation_requests, 2) + self._assert_hierarchy(result, expected) + class RoomSummaryTestCase(unittest.HomeserverTestCase): servlets = [ From fa583c2198c75583ed5e16e9ecd157a9d10f6417 Mon Sep 17 00:00:00 2001 From: Nicolas Werner <89468146+nico-famedly@users.noreply.github.com> Date: Thu, 20 Jan 2022 13:04:58 +0000 Subject: [PATCH 117/474] Allow overriding the complement ref. (#11766) Updates complement.sh to read the ref from an environment variable (defaulting to master) when downloading a complement bundle for testing. --- .gitignore | 2 +- changelog.d/11766.misc | 1 + scripts-dev/complement.sh | 14 ++++++++------ 3 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11766.misc diff --git a/.gitignore b/.gitignore index 8eb4eda73d71..3bd6b1a08c57 100644 --- a/.gitignore +++ b/.gitignore @@ -52,5 +52,5 @@ __pycache__/ book/ # complement -/complement-master +/complement-* /master.tar.gz diff --git a/changelog.d/11766.misc b/changelog.d/11766.misc new file mode 100644 index 000000000000..3c9e5f95ffcc --- /dev/null +++ b/changelog.d/11766.misc @@ -0,0 +1 @@ +Allow overriding complement commit using `COMPLEMENT_REF`. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 67a22d3ed3e7..e08ffedaf33a 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -8,7 +8,8 @@ # By default the script will fetch the latest Complement master branch and # run tests with that. This can be overridden to use a custom Complement # checkout by setting the COMPLEMENT_DIR environment variable to the -# filepath of a local Complement checkout. +# filepath of a local Complement checkout or by setting the COMPLEMENT_REF +# environment variable to pull a different branch or commit. # # By default Synapse is run in monolith mode. This can be overridden by # setting the WORKERS environment variable. @@ -31,11 +32,12 @@ cd "$(dirname $0)/.." # Check for a user-specified Complement checkout if [[ -z "$COMPLEMENT_DIR" ]]; then - echo "COMPLEMENT_DIR not set. Fetching the latest Complement checkout..." - wget -Nq https://github.com/matrix-org/complement/archive/master.tar.gz - tar -xzf master.tar.gz - COMPLEMENT_DIR=complement-master - echo "Checkout available at 'complement-master'" + COMPLEMENT_REF=${COMPLEMENT_REF:-master} + echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..." + wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz + tar -xzf ${COMPLEMENT_REF}.tar.gz + COMPLEMENT_DIR=complement-${COMPLEMENT_REF} + echo "Checkout available at 'complement-${COMPLEMENT_REF}'" fi # Build the base Synapse image from the local checkout From f160fe18e3cc5604375f491300d12dd5c7e9b9b2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 20 Jan 2022 13:38:44 +0000 Subject: [PATCH 118/474] Debug for device lists updates (#11760) Debug for #8631. I'm having a hard time tracking down what's going wrong in that issue. In the reported example, I could see server A sending federation traffic to server B and all was well. Yet B reports out-of-sync device updates from A. I couldn't see what was _in_ the events being sent from A to B. So I have added some crude logging to track - when we have updates to send to a remote HS - the edus we actually accumulate to send - when a federation transaction includes a device list update edu - when such an EDU is received This is a bit of a sledgehammer. --- changelog.d/11760.misc | 1 + .../federation/sender/transaction_manager.py | 12 ++++++++++++ .../federation/transport/server/federation.py | 15 +++++++++++++++ synapse/storage/databases/main/devices.py | 18 ++++++++++++++++++ 4 files changed, 46 insertions(+) create mode 100644 changelog.d/11760.misc diff --git a/changelog.d/11760.misc b/changelog.d/11760.misc new file mode 100644 index 000000000000..6cb1b5dd49ac --- /dev/null +++ b/changelog.d/11760.misc @@ -0,0 +1 @@ +Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). \ No newline at end of file diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index ab935e5a7eda..742ee572558d 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -35,6 +35,7 @@ import synapse.server logger = logging.getLogger(__name__) +issue_8631_logger = logging.getLogger("synapse.8631_debug") last_pdu_ts_metric = Gauge( "synapse_federation_last_sent_pdu_time", @@ -124,6 +125,17 @@ async def send_new_transaction( len(pdus), len(edus), ) + if issue_8631_logger.isEnabledFor(logging.DEBUG): + DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"} + device_list_updates = [ + edu.content for edu in edus if edu.edu_type in DEVICE_UPDATE_EDUS + ] + if device_list_updates: + issue_8631_logger.debug( + "about to send txn [%s] including device list updates: %s", + transaction.transaction_id, + device_list_updates, + ) # Actually send the transaction diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 77bfd88ad052..beadfa422ba3 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -36,6 +36,7 @@ from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) +issue_8631_logger = logging.getLogger("synapse.8631_debug") class BaseFederationServerServlet(BaseFederationServlet): @@ -95,6 +96,20 @@ async def on_PUT( len(transaction_data.get("edus", [])), ) + if issue_8631_logger.isEnabledFor(logging.DEBUG): + DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"} + device_list_updates = [ + edu.content + for edu in transaction_data.get("edus", []) + if edu.edu_type in DEVICE_UPDATE_EDUS + ] + if device_list_updates: + issue_8631_logger.debug( + "received transaction [%s] including device list updates: %s", + transaction_id, + device_list_updates, + ) + except Exception as e: logger.exception(e) return 400, {"error": "Invalid transaction"} diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 8f0cd0695fa5..b2a5cd9a6508 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -53,6 +53,7 @@ from synapse.server import HomeServer logger = logging.getLogger(__name__) +issue_8631_logger = logging.getLogger("synapse.8631_debug") DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = ( "drop_device_list_streams_non_unique_indexes" @@ -229,6 +230,12 @@ async def get_device_updates_by_remote( if not updates: return now_stream_id, [] + if issue_8631_logger.isEnabledFor(logging.DEBUG): + data = {(user, device): stream_id for user, device, stream_id, _ in updates} + issue_8631_logger.debug( + "device updates need to be sent to %s: %s", destination, data + ) + # get the cross-signing keys of the users in the list, so that we can # determine which of the device changes were cross-signing keys users = {r[0] for r in updates} @@ -365,6 +372,17 @@ async def get_device_updates_by_remote( # and remove the length budgeting above. results.append(("org.matrix.signing_key_update", result)) + if issue_8631_logger.isEnabledFor(logging.DEBUG): + for (user_id, edu) in results: + issue_8631_logger.debug( + "device update to %s for %s from %s to %s: %s", + destination, + user_id, + from_stream_id, + last_processed_stream_id, + edu, + ) + return last_processed_stream_id, results def _get_device_updates_by_remote_txn( From 91221b696156e9f1f9deecd425ae58af03ebb5d3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 20 Jan 2022 09:21:06 -0500 Subject: [PATCH 119/474] Add deprecation warnings for `webclient` listener and non-HTTP(S) `web_client_location`. (#11774) This changes the behaviour of the root endpoint to redirect directly to the configuration of `web_client_location` if it is given an HTTP(S) URL. --- changelog.d/11774.misc | 1 + docs/sample_config.yaml | 10 +--------- docs/upgrade.md | 11 +++++++++++ synapse/app/homeserver.py | 12 +++++++----- synapse/config/server.py | 34 ++++++++++++++++++++-------------- 5 files changed, 40 insertions(+), 28 deletions(-) create mode 100644 changelog.d/11774.misc diff --git a/changelog.d/11774.misc b/changelog.d/11774.misc new file mode 100644 index 000000000000..136ba57f9410 --- /dev/null +++ b/changelog.d/11774.misc @@ -0,0 +1 @@ +Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 5908f262e513..1b86d0295d73 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -74,13 +74,7 @@ server_name: "SERVERNAME" # pid_file: DATADIR/homeserver.pid -# The absolute URL to the web client which /_matrix/client will redirect -# to if 'webclient' is configured under the 'listeners' configuration. -# -# This option can be also set to the filesystem path to the web client -# which will be served at /_matrix/client/ if 'webclient' is configured -# under the 'listeners' configuration, however this is a security risk: -# https://github.com/matrix-org/synapse#security-note +# The absolute URL to the web client which / will redirect to. # #web_client_location: https://riot.example.com/ @@ -310,8 +304,6 @@ presence: # static: static resources under synapse/static (/_matrix/static). (Mostly # useful for 'fallback authentication'.) # -# webclient: A web client. Requires web_client_location to be set. -# listeners: # TLS-enabled listener: for when matrix traffic is sent directly to synapse. # diff --git a/docs/upgrade.md b/docs/upgrade.md index 30bb0dcd9cfd..f455d257babf 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,17 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.51.0 + +## Deprecation of `webclient` listeners and non-HTTP(S) `web_client_location` + +Listeners of type `webclient` are deprecated and scheduled to be removed in +Synapse v1.53.0. + +Similarly, a non-HTTP(S) `web_client_location` configuration is deprecated and +will become a configuration error in Synapse v1.53.0. + + # Upgrading to v1.50.0 ## Dropping support for old Python and Postgres versions diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index dd76e0732108..7ef0fdf272ed 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -132,8 +132,10 @@ def _listener_http( self._module_web_resources_consumed = True # try to find something useful to redirect '/' to - if WEB_CLIENT_PREFIX in resources: - root_resource: Resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX) + if self.config.server.web_client_location_is_redirect: + root_resource: Resource = RootOptionsRedirectResource( + self.config.server.web_client_location + ) elif STATIC_PREFIX in resources: root_resource = RootOptionsRedirectResource(STATIC_PREFIX) else: @@ -262,15 +264,15 @@ def _configure_named_resource( resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": + # webclient listeners are deprecated as of Synapse v1.51.0, remove it + # in > v1.53.0. webclient_loc = self.config.server.web_client_location if webclient_loc is None: logger.warning( "Not enabling webclient resource, as web_client_location is unset." ) - elif webclient_loc.startswith("http://") or webclient_loc.startswith( - "https://" - ): + elif self.config.server.web_client_location_is_redirect: resources[WEB_CLIENT_PREFIX] = RootRedirect(webclient_loc) else: logger.warning( diff --git a/synapse/config/server.py b/synapse/config/server.py index 5010266b6983..f200d0c1f1cf 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -259,7 +259,6 @@ def read_config(self, config, **kwargs): raise ConfigError(str(e)) self.pid_file = self.abspath(config.get("pid_file")) - self.web_client_location = config.get("web_client_location", None) self.soft_file_limit = config.get("soft_file_limit", 0) self.daemonize = config.get("daemonize") self.print_pidfile = config.get("print_pidfile") @@ -506,8 +505,17 @@ def read_config(self, config, **kwargs): l2.append(listener) self.listeners = l2 - if not self.web_client_location: - _warn_if_webclient_configured(self.listeners) + self.web_client_location = config.get("web_client_location", None) + self.web_client_location_is_redirect = self.web_client_location and ( + self.web_client_location.startswith("http://") + or self.web_client_location.startswith("https://") + ) + # A non-HTTP(S) web client location is deprecated. + if self.web_client_location and not self.web_client_location_is_redirect: + logger.warning(NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING) + + # Warn if webclient is configured for a worker. + _warn_if_webclient_configured(self.listeners) self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None)) @@ -793,13 +801,7 @@ def generate_config_section( # pid_file: %(pid_file)s - # The absolute URL to the web client which /_matrix/client will redirect - # to if 'webclient' is configured under the 'listeners' configuration. - # - # This option can be also set to the filesystem path to the web client - # which will be served at /_matrix/client/ if 'webclient' is configured - # under the 'listeners' configuration, however this is a security risk: - # https://github.com/matrix-org/synapse#security-note + # The absolute URL to the web client which / will redirect to. # #web_client_location: https://riot.example.com/ @@ -1011,8 +1013,6 @@ def generate_config_section( # static: static resources under synapse/static (/_matrix/static). (Mostly # useful for 'fallback authentication'.) # - # webclient: A web client. Requires web_client_location to be set. - # listeners: # TLS-enabled listener: for when matrix traffic is sent directly to synapse. # @@ -1349,9 +1349,15 @@ def parse_listener_def(listener: Any) -> ListenerConfig: return ListenerConfig(port, bind_addresses, listener_type, tls, http_config) +NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING = """ +Synapse no longer supports serving a web client. To remove this warning, +configure 'web_client_location' with an HTTP(S) URL. +""" + + NO_MORE_WEB_CLIENT_WARNING = """ -Synapse no longer includes a web client. To enable a web client, configure -web_client_location. To remove this warning, remove 'webclient' from the 'listeners' +Synapse no longer includes a web client. To redirect the root resource to a web client, configure +'web_client_location'. To remove this warning, remove 'webclient' from the 'listeners' configuration. """ From 56834ab779f82858c9229809e8095771ae48ac8d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 20 Jan 2022 14:37:11 +0000 Subject: [PATCH 120/474] installation.md: drop python 3.6 support (#11781) #11595 dropped support for python 3.6, but forgot to update this doc. --- changelog.d/11781.doc | 1 + docs/setup/installation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11781.doc diff --git a/changelog.d/11781.doc b/changelog.d/11781.doc new file mode 100644 index 000000000000..b68e861d6772 --- /dev/null +++ b/changelog.d/11781.doc @@ -0,0 +1 @@ +Update installation instructions to note that Python 3.6 is no longer supported. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 210c80daced8..fe657a15dfa6 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -194,7 +194,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.6 or later, up to Python 3.9. +- Python 3.7 or later, up to Python 3.9. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org To install the Synapse homeserver run: From 7bf2d6c268bb9fb6a1b22cff4287ce5f8224ac7b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 20 Jan 2022 14:37:34 +0000 Subject: [PATCH 121/474] Partially revert #11675; prevent attempting to create pushers on workers (#11770) --- changelog.d/11770.feature | 1 + synapse/handlers/register.py | 12 +++++------- 2 files changed, 6 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11770.feature diff --git a/changelog.d/11770.feature b/changelog.d/11770.feature new file mode 100644 index 000000000000..72777075cb33 --- /dev/null +++ b/changelog.d/11770.feature @@ -0,0 +1 @@ +Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. \ No newline at end of file diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 68dbae591618..f08a516a7588 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -979,18 +979,16 @@ async def _register_email_threepid( if ( self.hs.config.email.email_enable_notifs and self.hs.config.email.email_notif_for_new_users + and token ): # Pull the ID of the access token back out of the db # It would really make more sense for this to be passed # up when the access token is saved, but that's quite an # invasive change I'd rather do separately. - if token: - user_tuple = await self.store.get_user_by_access_token(token) - # The token better still exist. - assert user_tuple - token_id = user_tuple.token_id - else: - token_id = None + user_tuple = await self.store.get_user_by_access_token(token) + # The token better still exist. + assert user_tuple + token_id = user_tuple.token_id await self.pusher_pool.add_pusher( user_id=user_id, From 121b9e2475f4d7b3bca50d81732f07db80b2264f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 20 Jan 2022 14:47:29 +0000 Subject: [PATCH 122/474] Add a regression test for using both webclient and client resources simultaneously (#11765) --- changelog.d/11765.misc | 1 + tests/http/test_webclient.py | 108 +++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 changelog.d/11765.misc create mode 100644 tests/http/test_webclient.py diff --git a/changelog.d/11765.misc b/changelog.d/11765.misc new file mode 100644 index 000000000000..a6c946e45237 --- /dev/null +++ b/changelog.d/11765.misc @@ -0,0 +1 @@ +Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. \ No newline at end of file diff --git a/tests/http/test_webclient.py b/tests/http/test_webclient.py new file mode 100644 index 000000000000..ee5cf299f64c --- /dev/null +++ b/tests/http/test_webclient.py @@ -0,0 +1,108 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from http import HTTPStatus +from typing import Dict + +from twisted.web.resource import Resource + +from synapse.app.homeserver import SynapseHomeServer +from synapse.config.server import HttpListenerConfig, HttpResourceConfig, ListenerConfig +from synapse.http.site import SynapseSite + +from tests.server import make_request +from tests.unittest import HomeserverTestCase, create_resource_tree, override_config + + +class WebClientTests(HomeserverTestCase): + @override_config( + { + "web_client_location": "https://example.org", + } + ) + def test_webclient_resolves_with_client_resource(self): + """ + Tests that both client and webclient resources can be accessed simultaneously. + + This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763. + """ + for resource_name_order_list in [ + ["webclient", "client"], + ["client", "webclient"], + ]: + # Create a dictionary from path regex -> resource + resource_dict: Dict[str, Resource] = {} + + for resource_name in resource_name_order_list: + resource_dict.update( + SynapseHomeServer._configure_named_resource(self.hs, resource_name) + ) + + # Create a root resource which ties the above resources together into one + root_resource = Resource() + create_resource_tree(resource_dict, root_resource) + + # Create a site configured with this resource to make HTTP requests against + listener_config = ListenerConfig( + port=8008, + bind_addresses=["127.0.0.1"], + type="http", + http_options=HttpListenerConfig( + resources=[HttpResourceConfig(names=resource_name_order_list)] + ), + ) + test_site = SynapseSite( + logger_name="synapse.access.http.fake", + site_tag=self.hs.config.server.server_name, + config=listener_config, + resource=root_resource, + server_version_string="1", + max_request_body_size=1234, + reactor=self.reactor, + ) + + # Attempt to make requests to endpoints on both the webclient and client resources + # on test_site. + self._request_client_and_webclient_resources(test_site) + + def _request_client_and_webclient_resources(self, test_site: SynapseSite) -> None: + """Make a request to an endpoint on both the webclient and client-server resources + of the given SynapseSite. + + Args: + test_site: The SynapseSite object to make requests against. + """ + + # Ensure that the *webclient* resource is behaving as expected (we get redirected to + # the configured web_client_location) + channel = make_request( + self.reactor, + site=test_site, + method="GET", + path="/_matrix/client", + ) + # Check that we are being redirected to the webclient location URI. + self.assertEqual(channel.code, HTTPStatus.FOUND) + self.assertEqual( + channel.headers.getRawHeaders("Location"), ["https://example.org"] + ) + + # Ensure that a request to the *client* resource works. + channel = make_request( + self.reactor, + site=test_site, + method="GET", + path="/_matrix/client/v3/login", + ) + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertIn("flows", channel.json_body) From d09099642e3a1fe9b26149fa57c58db3c26afc10 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 20 Jan 2022 10:34:45 -0500 Subject: [PATCH 123/474] Fix redirecting to the webclient for non-HTTP(S) web_client_location. (#11783) To not change the behaviour during the deprecation period. Follow-up to #11774. --- changelog.d/11783.misc | 1 + synapse/app/homeserver.py | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11783.misc diff --git a/changelog.d/11783.misc b/changelog.d/11783.misc new file mode 100644 index 000000000000..136ba57f9410 --- /dev/null +++ b/changelog.d/11783.misc @@ -0,0 +1 @@ +Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 7ef0fdf272ed..efedcc88894b 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -131,11 +131,18 @@ def _listener_http( resources.update(self._module_web_resources) self._module_web_resources_consumed = True - # try to find something useful to redirect '/' to + # Try to find something useful to serve at '/': + # + # 1. Redirect to the web client if it is an HTTP(S) URL. + # 2. Redirect to the web client served via Synapse. + # 3. Redirect to the static "Synapse is running" page. + # 4. Do not redirect and use a blank resource. if self.config.server.web_client_location_is_redirect: root_resource: Resource = RootOptionsRedirectResource( self.config.server.web_client_location ) + elif WEB_CLIENT_PREFIX in resources: + root_resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX) elif STATIC_PREFIX in resources: root_resource = RootOptionsRedirectResource(STATIC_PREFIX) else: From bfe6d5553abc5d812bce22488d9ea5137d2b9416 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 20 Jan 2022 19:19:40 +0100 Subject: [PATCH 124/474] Correctly await on_logged_out callbacks (#11786) --- changelog.d/11786.bugfix | 1 + synapse/handlers/auth.py | 2 +- tests/handlers/test_password_providers.py | 28 ++++++++++++++++++++++- 3 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11786.bugfix diff --git a/changelog.d/11786.bugfix b/changelog.d/11786.bugfix new file mode 100644 index 000000000000..306875f2dd63 --- /dev/null +++ b/changelog.d/11786.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 2389c9ac5214..bd1a3225638a 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -2281,7 +2281,7 @@ async def on_logged_out( # call all of the on_logged_out callbacks for callback in self.on_logged_out_callbacks: try: - callback(user_id, device_id, access_token) + await callback(user_id, device_id, access_token) except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 08e9730d4dfa..2add72b28a3a 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -22,7 +22,7 @@ import synapse from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.module_api import ModuleApi -from synapse.rest.client import devices, login +from synapse.rest.client import devices, login, logout from synapse.types import JsonDict from tests import unittest @@ -155,6 +155,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets, login.register_servlets, devices.register_servlets, + logout.register_servlets, ] def setUp(self): @@ -719,6 +720,31 @@ def custom_auth_no_local_user_fallback_test_body(self): channel = self._send_password_login("localuser", "localpass") self.assertEqual(channel.code, 400, channel.result) + def test_on_logged_out(self): + """Tests that the on_logged_out callback is called when the user logs out.""" + self.register_user("rin", "password") + tok = self.login("rin", "password") + + self.called = False + + async def on_logged_out(user_id, device_id, access_token): + self.called = True + + on_logged_out = Mock(side_effect=on_logged_out) + self.hs.get_password_auth_provider().on_logged_out_callbacks.append( + on_logged_out + ) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/logout", + {}, + access_token=tok, + ) + self.assertEqual(channel.code, 200) + on_logged_out.assert_called_once() + self.assertTrue(self.called) + def _get_login_flows(self) -> JsonDict: channel = self.make_request("GET", "/_matrix/client/r0/login") self.assertEqual(channel.code, 200, channel.result) From e83520cc42cb174a5d3dc5ca1dcce299ad4abb25 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 21 Jan 2022 08:01:37 +0000 Subject: [PATCH 125/474] Make `get_account_data_for_room_and_type` a tree cache (#11789) --- changelog.d/11789.feature | 1 + synapse/storage/databases/main/account_data.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11789.feature diff --git a/changelog.d/11789.feature b/changelog.d/11789.feature new file mode 100644 index 000000000000..dc426fb658ac --- /dev/null +++ b/changelog.d/11789.feature @@ -0,0 +1 @@ +Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index ef475e18c788..bb3740711e3b 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -210,7 +210,7 @@ def get_account_data_for_room_txn( "get_account_data_for_room", get_account_data_for_room_txn ) - @cached(num_args=3, max_entries=5000) + @cached(num_args=3, max_entries=5000, tree=True) async def get_account_data_for_room_and_type( self, user_id: str, room_id: str, account_data_type: str ) -> Optional[JsonDict]: From 4c2096599c9780290703e14df63963e77d058dda Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 21 Jan 2022 08:38:36 +0000 Subject: [PATCH 126/474] Make the `get_global_account_data_by_type_for_user` cache be a tree-cache whose key is prefixed with the user ID (#11788) --- changelog.d/11788.feature | 1 + synapse/handlers/sync.py | 2 +- synapse/rest/client/account_data.py | 2 +- synapse/storage/databases/main/account_data.py | 8 ++++---- synapse/visibility.py | 2 +- tests/replication/slave/storage/test_account_data.py | 4 ++-- 6 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/11788.feature diff --git a/changelog.d/11788.feature b/changelog.d/11788.feature new file mode 100644 index 000000000000..dc426fb658ac --- /dev/null +++ b/changelog.d/11788.feature @@ -0,0 +1 @@ +Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ffc6b748e84e..7e2a892b63ae 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1619,7 +1619,7 @@ async def _get_ignored_users(self, user_id: str) -> FrozenSet[str]: # TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead? ignored_account_data = ( await self.store.get_global_account_data_by_type_for_user( - AccountDataTypes.IGNORED_USER_LIST, user_id=user_id + user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST ) ) diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index d1badbdf3bec..58b8adbd3245 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -66,7 +66,7 @@ async def on_GET( raise AuthError(403, "Cannot get account data for other users.") event = await self.store.get_global_account_data_by_type_for_user( - account_data_type, user_id + user_id, account_data_type ) if event is None: diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index bb3740711e3b..9c19f0965f39 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -158,9 +158,9 @@ def get_account_data_for_user_txn( "get_account_data_for_user", get_account_data_for_user_txn ) - @cached(num_args=2, max_entries=5000) + @cached(num_args=2, max_entries=5000, tree=True) async def get_global_account_data_by_type_for_user( - self, data_type: str, user_id: str + self, user_id: str, data_type: str ) -> Optional[JsonDict]: """ Returns: @@ -392,7 +392,7 @@ def process_replication_rows( for row in rows: if not row.room_id: self.get_global_account_data_by_type_for_user.invalidate( - (row.data_type, row.user_id) + (row.user_id, row.data_type) ) self.get_account_data_for_user.invalidate((row.user_id,)) self.get_account_data_for_room.invalidate((row.user_id, row.room_id)) @@ -476,7 +476,7 @@ async def add_account_data_for_user( self._account_data_stream_cache.entity_has_changed(user_id, next_id) self.get_account_data_for_user.invalidate((user_id,)) self.get_global_account_data_by_type_for_user.invalidate( - (account_data_type, user_id) + (user_id, account_data_type) ) return self._account_data_id_gen.get_current_token() diff --git a/synapse/visibility.py b/synapse/visibility.py index 17532059e9f8..1b970ce479d0 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -87,7 +87,7 @@ async def filter_events_for_client( ) ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( - AccountDataTypes.IGNORED_USER_LIST, user_id + user_id, AccountDataTypes.IGNORED_USER_LIST ) ignore_list: FrozenSet[str] = frozenset() diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py index 43e3248703c9..1524087c43d4 100644 --- a/tests/replication/slave/storage/test_account_data.py +++ b/tests/replication/slave/storage/test_account_data.py @@ -30,7 +30,7 @@ def test_user_account_data(self): ) self.replicate() self.check( - "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 1} + "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 1} ) self.get_success( @@ -38,5 +38,5 @@ def test_user_account_data(self): ) self.replicate() self.check( - "get_global_account_data_by_type_for_user", [TYPE, USER_ID], {"a": 2} + "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 2} ) From c027bc0e4b071d3d40c7b0c1e7011ad5c8c3d0a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 21 Jan 2022 09:10:01 +0000 Subject: [PATCH 127/474] Add `FrozenEvent.get_state_key` and use it in a couple of places (#11793) This is more efficient, since we only have to look up `state_key` in the event dict once, rather than three (!) times. --- changelog.d/11793.misc | 1 + synapse/events/__init__.py | 13 +++++++++---- synapse/events/snapshot.py | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11793.misc diff --git a/changelog.d/11793.misc b/changelog.d/11793.misc new file mode 100644 index 000000000000..fc0530bf2cb6 --- /dev/null +++ b/changelog.d/11793.misc @@ -0,0 +1 @@ +Add `FrozenEvent.get_state_key` and use it in a couple of places. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 38f3cf4d330d..9acb3c0cc454 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -315,10 +315,11 @@ def __init__( redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None) room_id: DictProperty[str] = DictProperty("room_id") sender: DictProperty[str] = DictProperty("sender") - # TODO state_key should be Optional[str], this is generally asserted in Synapse - # by calling is_state() first (which ensures this), but it is hard (not possible?) + # TODO state_key should be Optional[str]. This is generally asserted in Synapse + # by calling is_state() first (which ensures it is not None), but it is hard (not possible?) # to properly annotate that calling is_state() asserts that state_key exists - # and is non-None. + # and is non-None. It would be better to replace such direct references with + # get_state_key() (and a check for None). state_key: DictProperty[str] = DictProperty("state_key") type: DictProperty[str] = DictProperty("type") user_id: DictProperty[str] = DictProperty("sender") @@ -332,7 +333,11 @@ def membership(self) -> str: return self.content["membership"] def is_state(self) -> bool: - return hasattr(self, "state_key") and self.state_key is not None + return self.get_state_key() is not None + + def get_state_key(self) -> Optional[str]: + """Get the state key of this event, or None if it's not a state event""" + return self._dict.get("state_key") def get_dict(self) -> JsonDict: d = dict(self._dict) diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 0eab1aefd637..5833fee25fce 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -163,7 +163,7 @@ async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict: return { "prev_state_id": prev_state_id, "event_type": event.type, - "event_state_key": event.state_key if event.is_state() else None, + "event_state_key": event.get_state_key(), "state_group": self._state_group, "state_group_before_event": self.state_group_before_event, "rejected": self.rejected, From 227727548546c12e644721d0380be056eead48b0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 21 Jan 2022 09:18:10 +0000 Subject: [PATCH 128/474] Stop reading from `event_reference_hashes` (#11794) Preparation for dropping this table altogether. Part of #6574. --- changelog.d/11794.misc | 1 + synapse/replication/slave/storage/events.py | 2 +- .../databases/main/event_federation.py | 2 +- synapse/storage/databases/main/signatures.py | 54 +++++++++---------- synapse/storage/schema/__init__.py | 5 +- 5 files changed, 31 insertions(+), 33 deletions(-) create mode 100644 changelog.d/11794.misc diff --git a/changelog.d/11794.misc b/changelog.d/11794.misc new file mode 100644 index 000000000000..29826bc0e5ab --- /dev/null +++ b/changelog.d/11794.misc @@ -0,0 +1 @@ +Preparation for database schema simplifications: stop reading from `event_reference_hashes`. diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 0f0837269486..a72dad7464d7 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -52,8 +52,8 @@ class SlavedEventStore( EventPushActionsWorkerStore, StreamWorkerStore, StateGroupWorkerStore, - EventsWorkerStore, SignatureWorkerStore, + EventsWorkerStore, UserErasureWorkerStore, RelationsWorkerStore, BaseSlavedStore, diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 270b30800bf2..0856a9332aa4 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -65,7 +65,7 @@ def __init__(self, room_id: str): super().__init__("Unexpectedly no chain cover for events in %s" % (room_id,)) -class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBaseStore): +class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBaseStore): def __init__( self, database: DatabasePool, diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index 3201623fe415..0518b8b910e0 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Iterable, List, Tuple +from typing import Collection, Dict, List, Tuple from unpaddedbase64 import encode_base64 -from synapse.storage._base import SQLBaseStore -from synapse.storage.types import Cursor +from synapse.crypto.event_signing import compute_event_reference_hash +from synapse.storage.databases.main.events_worker import ( + EventRedactBehaviour, + EventsWorkerStore, +) from synapse.util.caches.descriptors import cached, cachedList -class SignatureWorkerStore(SQLBaseStore): +class SignatureWorkerStore(EventsWorkerStore): @cached() def get_event_reference_hash(self, event_id): # This is a dummy function to allow get_event_reference_hashes @@ -32,7 +35,7 @@ def get_event_reference_hash(self, event_id): cached_method_name="get_event_reference_hash", list_name="event_ids", num_args=1 ) async def get_event_reference_hashes( - self, event_ids: Iterable[str] + self, event_ids: Collection[str] ) -> Dict[str, Dict[str, bytes]]: """Get all hashes for given events. @@ -41,18 +44,27 @@ async def get_event_reference_hashes( Returns: A mapping of event ID to a mapping of algorithm to hash. + Returns an empty dict for a given event id if that event is unknown. """ + events = await self.get_events( + event_ids, + redact_behaviour=EventRedactBehaviour.AS_IS, + allow_rejected=True, + ) - def f(txn): - return { - event_id: self._get_event_reference_hashes_txn(txn, event_id) - for event_id in event_ids - } + hashes: Dict[str, Dict[str, bytes]] = {} + for event_id in event_ids: + event = events.get(event_id) + if event is None: + hashes[event_id] = {} + else: + ref_alg, ref_hash_bytes = compute_event_reference_hash(event) + hashes[event_id] = {ref_alg: ref_hash_bytes} - return await self.db_pool.runInteraction("get_event_reference_hashes", f) + return hashes async def add_event_hashes( - self, event_ids: Iterable[str] + self, event_ids: Collection[str] ) -> List[Tuple[str, Dict[str, str]]]: """ @@ -70,24 +82,6 @@ async def add_event_hashes( return list(encoded_hashes.items()) - def _get_event_reference_hashes_txn( - self, txn: Cursor, event_id: str - ) -> Dict[str, bytes]: - """Get all the hashes for a given PDU. - Args: - txn: - event_id: Id for the Event. - Returns: - A mapping of algorithm -> hash. - """ - query = ( - "SELECT algorithm, hash" - " FROM event_reference_hashes" - " WHERE event_id = ?" - ) - txn.execute(query, (event_id,)) - return {k: v for k, v in txn} - class SignatureStore(SignatureWorkerStore): """Persistence for event signatures and hashes""" diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 2a3d47185ae5..166173ba376c 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 67 # remember to update the list below when updating +SCHEMA_VERSION = 68 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -53,6 +53,9 @@ Changes in SCHEMA_VERSION = 67: - state_events.prev_state is no longer written to. + +Changes in SCHEMA_VERSION = 68: + - event_reference_hashes is no longer read. """ From 9f2016e96e800460c390c2f2de85797910954ca6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 21 Jan 2022 09:19:56 +0000 Subject: [PATCH 129/474] Drop unused table `public_room_list_stream`. (#11795) This is a follow-up to #10565. --- changelog.d/11795.misc | 1 + synapse/storage/databases/main/purge_events.py | 1 - synapse/storage/schema/__init__.py | 4 +++- .../67/01drop_public_room_list_stream.sql | 18 ++++++++++++++++++ tests/rest/admin/test_room.py | 1 - 5 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11795.misc create mode 100644 synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql diff --git a/changelog.d/11795.misc b/changelog.d/11795.misc new file mode 100644 index 000000000000..aeba317670e9 --- /dev/null +++ b/changelog.d/11795.misc @@ -0,0 +1 @@ +Drop unused table `public_room_list_stream`. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 91b0576b8568..e87a8fb85dce 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -390,7 +390,6 @@ def _purge_room_txn(self, txn, room_id: str) -> List[int]: "event_search", "events", "group_rooms", - "public_room_list_stream", "receipts_graph", "receipts_linearized", "room_aliases", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 166173ba376c..75659f931c68 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -60,7 +60,9 @@ SCHEMA_COMPAT_VERSION = ( - 61 # 61: Remove unused tables `user_stats_historical` and `room_stats_historical` + # we have removed the public_room_list_stream table, so are now incompatible with + # synapses wth SCHEMA_VERSION < 63. + 63 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql b/synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql new file mode 100644 index 000000000000..1eb8de9907a5 --- /dev/null +++ b/synapse/storage/schema/main/delta/67/01drop_public_room_list_stream.sql @@ -0,0 +1,18 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- this table is unused as of Synapse 1.41 +DROP TABLE public_room_list_stream; + diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 3495a0366ad3..23da0ad73679 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -2468,7 +2468,6 @@ def _block_room(self, room_id: str) -> None: "event_search", "events", "group_rooms", - "public_room_list_stream", "receipts_graph", "receipts_linearized", "room_aliases", From b784299cbc121d27d7dadd0a4a96f4657244a4e9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Jan 2022 05:31:31 -0500 Subject: [PATCH 130/474] Do not try to serialize raw aggregations dict. (#11791) --- changelog.d/11612.bugfix | 1 + changelog.d/11612.misc | 1 - changelog.d/11791.bugfix | 1 + synapse/events/utils.py | 4 +- synapse/rest/admin/rooms.py | 13 ++-- synapse/rest/client/room.py | 11 ++- tests/rest/client/test_relations.py | 108 +++++++++++++++++++--------- 7 files changed, 85 insertions(+), 54 deletions(-) create mode 100644 changelog.d/11612.bugfix delete mode 100644 changelog.d/11612.misc create mode 100644 changelog.d/11791.bugfix diff --git a/changelog.d/11612.bugfix b/changelog.d/11612.bugfix new file mode 100644 index 000000000000..842f6892fda3 --- /dev/null +++ b/changelog.d/11612.bugfix @@ -0,0 +1 @@ +Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11612.misc b/changelog.d/11612.misc deleted file mode 100644 index 2d886169c547..000000000000 --- a/changelog.d/11612.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid database access in the JSON serialization process. diff --git a/changelog.d/11791.bugfix b/changelog.d/11791.bugfix new file mode 100644 index 000000000000..842f6892fda3 --- /dev/null +++ b/changelog.d/11791.bugfix @@ -0,0 +1 @@ +Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/synapse/events/utils.py b/synapse/events/utils.py index de0e0c17312a..918adeecf8cd 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -402,7 +402,7 @@ def serialize_event( if bundle_aggregations: event_aggregations = bundle_aggregations.get(event.event_id) if event_aggregations: - self._injected_bundled_aggregations( + self._inject_bundled_aggregations( event, time_now, bundle_aggregations[event.event_id], @@ -411,7 +411,7 @@ def serialize_event( return serialized_event - def _injected_bundled_aggregations( + def _inject_bundled_aggregations( self, event: EventBase, time_now: int, diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 2e714ac87bfe..efe25fe7ebf7 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -744,20 +744,15 @@ async def on_GET( ) time_now = self.clock.time_msec() + aggregations = results.pop("aggregations", None) results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_before"], time_now, bundle_aggregations=aggregations ) results["event"] = self._event_serializer.serialize_event( - results["event"], - time_now, - bundle_aggregations=results["aggregations"], + results["event"], time_now, bundle_aggregations=aggregations ) results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_after"], time_now, bundle_aggregations=aggregations ) results["state"] = self._event_serializer.serialize_events( results["state"], time_now diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 31fd329a3862..90bb9142a098 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -714,18 +714,15 @@ async def on_GET( raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() + aggregations = results.pop("aggregations", None) results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_before"], time_now, bundle_aggregations=aggregations ) results["event"] = self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=results["aggregations"] + results["event"], time_now, bundle_aggregations=aggregations ) results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_after"], time_now, bundle_aggregations=aggregations ) results["state"] = self._event_serializer.serialize_events( results["state"], time_now diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 4b20ab0e3e57..c9b220e73d1a 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin from synapse.rest.client import login, register, relations, room, sync +from synapse.types import JsonDict from tests import unittest from tests.server import FakeChannel @@ -454,7 +455,14 @@ def test_aggregation_must_be_annotation(self): @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_bundled_aggregations(self): - """Test that annotations, references, and threads get correctly bundled.""" + """ + Test that annotations, references, and threads get correctly bundled. + + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + + See test_edit for a similar test for edits. + """ # Setup by sending a variety of relations. channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEquals(200, channel.code, channel.json_body) @@ -482,12 +490,13 @@ def test_bundled_aggregations(self): self.assertEquals(200, channel.code, channel.json_body) thread_2 = channel.json_body["event_id"] - def assert_bundle(actual): + def assert_bundle(event_json: JsonDict) -> None: """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") # Ensure the fields are as expected. self.assertCountEqual( - actual.keys(), + relations_dict.keys(), ( RelationTypes.ANNOTATION, RelationTypes.REFERENCE, @@ -503,20 +512,20 @@ def assert_bundle(actual): {"type": "m.reaction", "key": "b", "count": 1}, ] }, - actual[RelationTypes.ANNOTATION], + relations_dict[RelationTypes.ANNOTATION], ) self.assertEquals( {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - actual[RelationTypes.REFERENCE], + relations_dict[RelationTypes.REFERENCE], ) self.assertEquals( 2, - actual[RelationTypes.THREAD].get("count"), + relations_dict[RelationTypes.THREAD].get("count"), ) self.assertTrue( - actual[RelationTypes.THREAD].get("current_user_participated") + relations_dict[RelationTypes.THREAD].get("current_user_participated") ) # The latest thread event has some fields that don't matter. self.assert_dict( @@ -533,20 +542,9 @@ def assert_bundle(actual): "type": "m.room.test", "user_id": self.user_id, }, - actual[RelationTypes.THREAD].get("latest_event"), + relations_dict[RelationTypes.THREAD].get("latest_event"), ) - def _find_and_assert_event(events): - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - break - else: - raise AssertionError(f"Event {self.parent_id} not found in chunk") - assert_bundle(event["unsigned"].get("m.relations")) - # Request the event directly. channel = self.make_request( "GET", @@ -554,7 +552,7 @@ def _find_and_assert_event(events): access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["unsigned"].get("m.relations")) + assert_bundle(channel.json_body) # Request the room messages. channel = self.make_request( @@ -563,7 +561,7 @@ def _find_and_assert_event(events): access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - _find_and_assert_event(channel.json_body["chunk"]) + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) # Request the room context. channel = self.make_request( @@ -572,17 +570,14 @@ def _find_and_assert_event(events): access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]["unsigned"].get("m.relations")) + assert_bundle(channel.json_body["event"]) # Request sync. channel = self.make_request("GET", "/sync", access_token=self.user_token) self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) - _find_and_assert_event(room_timeline["events"]) - - # Note that /relations is tested separately in test_aggregation_get_event_for_thread - # since it needs different data configured. + self._find_event_in_chunk(room_timeline["events"]) def test_aggregation_get_event_for_annotation(self): """Test that annotations do not get bundled aggregations included @@ -777,25 +772,58 @@ def test_edit(self): edit_event_id = channel.json_body["event_id"] + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - self.assertEquals(channel.json_body["content"], new_body) + assert_bundle(channel.json_body) - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + # Request the room messages. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/messages?dir=b", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + # Request sync, but limit the timeline so it becomes limited (and includes + # bundled aggregations). + filter = urllib.parse.quote_plus( + '{"room": {"timeline": {"limit": 2}}}'.encode() + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token ) + self.assertEquals(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + assert_bundle(self._find_event_in_chunk(room_timeline["events"])) def test_multi_edit(self): """Test that multiple edits, including attempts by people who @@ -1102,6 +1130,16 @@ def test_unknown_relations(self): self.assertEquals(200, channel.code, channel.json_body) self.assertEquals(channel.json_body["chunk"], []) + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + def _send_relation( self, relation_type: str, From 7a11509d17f56b8711a3bda11491b3a5a42a2f67 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 Jan 2022 05:31:31 -0500 Subject: [PATCH 131/474] Do not try to serialize raw aggregations dict. (#11791) --- changelog.d/11612.bugfix | 1 + changelog.d/11612.misc | 1 - changelog.d/11791.bugfix | 1 + synapse/events/utils.py | 4 +- synapse/rest/admin/rooms.py | 13 ++-- synapse/rest/client/room.py | 11 ++- tests/rest/client/test_relations.py | 108 +++++++++++++++++++--------- 7 files changed, 85 insertions(+), 54 deletions(-) create mode 100644 changelog.d/11612.bugfix delete mode 100644 changelog.d/11612.misc create mode 100644 changelog.d/11791.bugfix diff --git a/changelog.d/11612.bugfix b/changelog.d/11612.bugfix new file mode 100644 index 000000000000..842f6892fda3 --- /dev/null +++ b/changelog.d/11612.bugfix @@ -0,0 +1 @@ +Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11612.misc b/changelog.d/11612.misc deleted file mode 100644 index 2d886169c547..000000000000 --- a/changelog.d/11612.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid database access in the JSON serialization process. diff --git a/changelog.d/11791.bugfix b/changelog.d/11791.bugfix new file mode 100644 index 000000000000..842f6892fda3 --- /dev/null +++ b/changelog.d/11791.bugfix @@ -0,0 +1 @@ +Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/synapse/events/utils.py b/synapse/events/utils.py index de0e0c17312a..918adeecf8cd 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -402,7 +402,7 @@ def serialize_event( if bundle_aggregations: event_aggregations = bundle_aggregations.get(event.event_id) if event_aggregations: - self._injected_bundled_aggregations( + self._inject_bundled_aggregations( event, time_now, bundle_aggregations[event.event_id], @@ -411,7 +411,7 @@ def serialize_event( return serialized_event - def _injected_bundled_aggregations( + def _inject_bundled_aggregations( self, event: EventBase, time_now: int, diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 2e714ac87bfe..efe25fe7ebf7 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -744,20 +744,15 @@ async def on_GET( ) time_now = self.clock.time_msec() + aggregations = results.pop("aggregations", None) results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_before"], time_now, bundle_aggregations=aggregations ) results["event"] = self._event_serializer.serialize_event( - results["event"], - time_now, - bundle_aggregations=results["aggregations"], + results["event"], time_now, bundle_aggregations=aggregations ) results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_after"], time_now, bundle_aggregations=aggregations ) results["state"] = self._event_serializer.serialize_events( results["state"], time_now diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 31fd329a3862..90bb9142a098 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -714,18 +714,15 @@ async def on_GET( raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() + aggregations = results.pop("aggregations", None) results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_before"], time_now, bundle_aggregations=aggregations ) results["event"] = self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=results["aggregations"] + results["event"], time_now, bundle_aggregations=aggregations ) results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], - time_now, - bundle_aggregations=results["aggregations"], + results["events_after"], time_now, bundle_aggregations=aggregations ) results["state"] = self._event_serializer.serialize_events( results["state"], time_now diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 4b20ab0e3e57..c9b220e73d1a 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -21,6 +21,7 @@ from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin from synapse.rest.client import login, register, relations, room, sync +from synapse.types import JsonDict from tests import unittest from tests.server import FakeChannel @@ -454,7 +455,14 @@ def test_aggregation_must_be_annotation(self): @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_bundled_aggregations(self): - """Test that annotations, references, and threads get correctly bundled.""" + """ + Test that annotations, references, and threads get correctly bundled. + + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + + See test_edit for a similar test for edits. + """ # Setup by sending a variety of relations. channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEquals(200, channel.code, channel.json_body) @@ -482,12 +490,13 @@ def test_bundled_aggregations(self): self.assertEquals(200, channel.code, channel.json_body) thread_2 = channel.json_body["event_id"] - def assert_bundle(actual): + def assert_bundle(event_json: JsonDict) -> None: """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") # Ensure the fields are as expected. self.assertCountEqual( - actual.keys(), + relations_dict.keys(), ( RelationTypes.ANNOTATION, RelationTypes.REFERENCE, @@ -503,20 +512,20 @@ def assert_bundle(actual): {"type": "m.reaction", "key": "b", "count": 1}, ] }, - actual[RelationTypes.ANNOTATION], + relations_dict[RelationTypes.ANNOTATION], ) self.assertEquals( {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - actual[RelationTypes.REFERENCE], + relations_dict[RelationTypes.REFERENCE], ) self.assertEquals( 2, - actual[RelationTypes.THREAD].get("count"), + relations_dict[RelationTypes.THREAD].get("count"), ) self.assertTrue( - actual[RelationTypes.THREAD].get("current_user_participated") + relations_dict[RelationTypes.THREAD].get("current_user_participated") ) # The latest thread event has some fields that don't matter. self.assert_dict( @@ -533,20 +542,9 @@ def assert_bundle(actual): "type": "m.room.test", "user_id": self.user_id, }, - actual[RelationTypes.THREAD].get("latest_event"), + relations_dict[RelationTypes.THREAD].get("latest_event"), ) - def _find_and_assert_event(events): - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - break - else: - raise AssertionError(f"Event {self.parent_id} not found in chunk") - assert_bundle(event["unsigned"].get("m.relations")) - # Request the event directly. channel = self.make_request( "GET", @@ -554,7 +552,7 @@ def _find_and_assert_event(events): access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["unsigned"].get("m.relations")) + assert_bundle(channel.json_body) # Request the room messages. channel = self.make_request( @@ -563,7 +561,7 @@ def _find_and_assert_event(events): access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - _find_and_assert_event(channel.json_body["chunk"]) + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) # Request the room context. channel = self.make_request( @@ -572,17 +570,14 @@ def _find_and_assert_event(events): access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]["unsigned"].get("m.relations")) + assert_bundle(channel.json_body["event"]) # Request sync. channel = self.make_request("GET", "/sync", access_token=self.user_token) self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) - _find_and_assert_event(room_timeline["events"]) - - # Note that /relations is tested separately in test_aggregation_get_event_for_thread - # since it needs different data configured. + self._find_event_in_chunk(room_timeline["events"]) def test_aggregation_get_event_for_annotation(self): """Test that annotations do not get bundled aggregations included @@ -777,25 +772,58 @@ def test_edit(self): edit_event_id = channel.json_body["event_id"] + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - self.assertEquals(channel.json_body["content"], new_body) + assert_bundle(channel.json_body) - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + # Request the room messages. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/messages?dir=b", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + # Request sync, but limit the timeline so it becomes limited (and includes + # bundled aggregations). + filter = urllib.parse.quote_plus( + '{"room": {"timeline": {"limit": 2}}}'.encode() + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token ) + self.assertEquals(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + assert_bundle(self._find_event_in_chunk(room_timeline["events"])) def test_multi_edit(self): """Test that multiple edits, including attempts by people who @@ -1102,6 +1130,16 @@ def test_unknown_relations(self): self.assertEquals(200, channel.code, channel.json_body) self.assertEquals(channel.json_body["chunk"], []) + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + def _send_relation( self, relation_type: str, From 266df5c90811a48f41ae683d184dc5365728b714 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 21 Jan 2022 10:47:03 +0000 Subject: [PATCH 132/474] 1.51.0rc1 --- CHANGES.md | 76 +++++++++++++++++++++++++++++++++++++++ changelog.d/11530.bugfix | 2 -- changelog.d/11561.feature | 1 - changelog.d/11576.feature | 1 - changelog.d/11577.feature | 1 - changelog.d/11587.bugfix | 1 - changelog.d/11593.bugfix | 1 - changelog.d/11612.bugfix | 1 - changelog.d/11659.bugfix | 1 - changelog.d/11667.bugfix | 1 - changelog.d/11669.bugfix | 1 - changelog.d/11672.feature | 1 - changelog.d/11675.feature | 1 - changelog.d/11682.removal | 1 - changelog.d/11685.misc | 1 - changelog.d/11686.doc | 1 - changelog.d/11691.misc | 1 - changelog.d/11692.misc | 1 - changelog.d/11693.misc | 1 - changelog.d/11695.bugfix | 1 - changelog.d/11699.misc | 1 - changelog.d/11701.misc | 1 - changelog.d/11702.misc | 1 - changelog.d/11707.misc | 1 - changelog.d/11710.bugfix | 1 - changelog.d/11714.misc | 1 - changelog.d/11715.doc | 1 - changelog.d/11716.misc | 1 - changelog.d/11718.misc | 1 - changelog.d/11723.misc | 1 - changelog.d/11724.misc | 1 - changelog.d/11724.removal | 1 - changelog.d/11725.doc | 1 - changelog.d/11735.doc | 1 - changelog.d/11737.bugfix | 1 - changelog.d/11739.doc | 1 - changelog.d/11740.doc | 1 - changelog.d/11742.misc | 1 - changelog.d/11744.misc | 1 - changelog.d/11745.bugfix | 1 - changelog.d/11749.feature | 1 - changelog.d/11755.doc | 1 - changelog.d/11757.feature | 1 - changelog.d/11760.misc | 1 - changelog.d/11761.misc | 1 - changelog.d/11765.misc | 1 - changelog.d/11766.misc | 1 - changelog.d/11768.misc | 1 - changelog.d/11770.feature | 1 - changelog.d/11771.misc | 1 - changelog.d/11774.misc | 1 - changelog.d/11775.bugfix | 1 - changelog.d/11776.misc | 1 - changelog.d/11781.doc | 1 - changelog.d/11783.misc | 1 - changelog.d/11786.bugfix | 1 - changelog.d/11791.bugfix | 1 - debian/changelog | 6 ++++ synapse/__init__.py | 2 +- 59 files changed, 83 insertions(+), 58 deletions(-) delete mode 100644 changelog.d/11530.bugfix delete mode 100644 changelog.d/11561.feature delete mode 100644 changelog.d/11576.feature delete mode 100644 changelog.d/11577.feature delete mode 100644 changelog.d/11587.bugfix delete mode 100644 changelog.d/11593.bugfix delete mode 100644 changelog.d/11612.bugfix delete mode 100644 changelog.d/11659.bugfix delete mode 100644 changelog.d/11667.bugfix delete mode 100644 changelog.d/11669.bugfix delete mode 100644 changelog.d/11672.feature delete mode 100644 changelog.d/11675.feature delete mode 100644 changelog.d/11682.removal delete mode 100644 changelog.d/11685.misc delete mode 100644 changelog.d/11686.doc delete mode 100644 changelog.d/11691.misc delete mode 100644 changelog.d/11692.misc delete mode 100644 changelog.d/11693.misc delete mode 100644 changelog.d/11695.bugfix delete mode 100644 changelog.d/11699.misc delete mode 100644 changelog.d/11701.misc delete mode 100644 changelog.d/11702.misc delete mode 100644 changelog.d/11707.misc delete mode 100644 changelog.d/11710.bugfix delete mode 100644 changelog.d/11714.misc delete mode 100644 changelog.d/11715.doc delete mode 100644 changelog.d/11716.misc delete mode 100644 changelog.d/11718.misc delete mode 100644 changelog.d/11723.misc delete mode 100644 changelog.d/11724.misc delete mode 100644 changelog.d/11724.removal delete mode 100644 changelog.d/11725.doc delete mode 100644 changelog.d/11735.doc delete mode 100644 changelog.d/11737.bugfix delete mode 100644 changelog.d/11739.doc delete mode 100644 changelog.d/11740.doc delete mode 100644 changelog.d/11742.misc delete mode 100644 changelog.d/11744.misc delete mode 100644 changelog.d/11745.bugfix delete mode 100644 changelog.d/11749.feature delete mode 100644 changelog.d/11755.doc delete mode 100644 changelog.d/11757.feature delete mode 100644 changelog.d/11760.misc delete mode 100644 changelog.d/11761.misc delete mode 100644 changelog.d/11765.misc delete mode 100644 changelog.d/11766.misc delete mode 100644 changelog.d/11768.misc delete mode 100644 changelog.d/11770.feature delete mode 100644 changelog.d/11771.misc delete mode 100644 changelog.d/11774.misc delete mode 100644 changelog.d/11775.bugfix delete mode 100644 changelog.d/11776.misc delete mode 100644 changelog.d/11781.doc delete mode 100644 changelog.d/11783.misc delete mode 100644 changelog.d/11786.bugfix delete mode 100644 changelog.d/11791.bugfix diff --git a/CHANGES.md b/CHANGES.md index ced1dcc0db80..2ca978bae186 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,79 @@ +Synapse 1.51.0rc1 (2022-01-21) +============================== + +Features +-------- + +- Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. ([\#11561](https://github.com/matrix-org/synapse/issues/11561), [\#11749](https://github.com/matrix-org/synapse/issues/11749), [\#11757](https://github.com/matrix-org/synapse/issues/11757)) +- Remove the `"password_hash"` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) +- Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11577](https://github.com/matrix-org/synapse/issues/11577)) +- Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. ([\#11672](https://github.com/matrix-org/synapse/issues/11672)) +- Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. ([\#11675](https://github.com/matrix-org/synapse/issues/11675), [\#11770](https://github.com/matrix-org/synapse/issues/11770)) + + +Bugfixes +-------- + +- Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events + received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530)) +- Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587)) +- Fix an error in to get federation status of a destination server even if no error has occurred. This admin API was new introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593)) +- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791)) +- Fix `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667)) +- Fix preview of some gif URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669)) +- Fix a bug where the only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695)) +- Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745)) +- Make the list rooms admin api sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737)) +- Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775)) +- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786)) + + +Improved Documentation +---------------------- + +- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This bypasses client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) +- Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715)) +- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) +- Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) +- Update room spec url in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) +- Mention python3-venv and libpq-dev dependencies in contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) +- Update documentation for configuring login with facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) +- Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781)) + + +Deprecations and Removals +------------------------- + +- Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682)) +- Remove `python_twisted_reactor_pending_calls` prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) + + +Internal Changes +---------------- + +- Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. ([\#11685](https://github.com/matrix-org/synapse/issues/11685)) +- Use buildkit's cache feature to speed up docker builds. ([\#11691](https://github.com/matrix-org/synapse/issues/11691)) +- Use `auto_attribs` and native type hints for attrs classes. ([\#11692](https://github.com/matrix-org/synapse/issues/11692), [\#11768](https://github.com/matrix-org/synapse/issues/11768)) +- Remove debug logging for #4422, which has been closed since Synapse 0.99. ([\#11693](https://github.com/matrix-org/synapse/issues/11693)) +- Remove fallback code for Python 2. ([\#11699](https://github.com/matrix-org/synapse/issues/11699)) +- Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. ([\#11701](https://github.com/matrix-org/synapse/issues/11701)) +- Add the option to write sqlite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702)) +- Improve Complement test output for Gitub Actions. ([\#11707](https://github.com/matrix-org/synapse/issues/11707)) +- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714)) +- Fix docstring on `add_account_data_for_user`. ([\#11716](https://github.com/matrix-org/synapse/issues/11716)) +- Complement environment variable name change and update `.gitignore`. ([\#11718](https://github.com/matrix-org/synapse/issues/11718)) +- Simplify calculation of prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723)) +- Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771)) +- Minor efficiency improvements when inserting many values into the database. ([\#11742](https://github.com/matrix-org/synapse/issues/11742)) +- Invite PR authors to give themselves credit in the changelog. ([\#11744](https://github.com/matrix-org/synapse/issues/11744)) +- Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). ([\#11760](https://github.com/matrix-org/synapse/issues/11760)) +- Remove `log_function` utility function and its uses. ([\#11761](https://github.com/matrix-org/synapse/issues/11761)) +- Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. ([\#11765](https://github.com/matrix-org/synapse/issues/11765)) +- Allow overriding complement commit using `COMPLEMENT_REF`. ([\#11766](https://github.com/matrix-org/synapse/issues/11766)) +- Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783)) +- Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776)) + + Synapse 1.50.1 (2022-01-18) =========================== diff --git a/changelog.d/11530.bugfix b/changelog.d/11530.bugfix deleted file mode 100644 index 7ea9ba4e4988..000000000000 --- a/changelog.d/11530.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events -received over federation. \ No newline at end of file diff --git a/changelog.d/11561.feature b/changelog.d/11561.feature deleted file mode 100644 index 3d4f2159c0b3..000000000000 --- a/changelog.d/11561.feature +++ /dev/null @@ -1 +0,0 @@ -Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. diff --git a/changelog.d/11576.feature b/changelog.d/11576.feature deleted file mode 100644 index 5be836ae022d..000000000000 --- a/changelog.d/11576.feature +++ /dev/null @@ -1 +0,0 @@ -Remove the `"password_hash"` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). \ No newline at end of file diff --git a/changelog.d/11577.feature b/changelog.d/11577.feature deleted file mode 100644 index f9c8a0d5f40a..000000000000 --- a/changelog.d/11577.feature +++ /dev/null @@ -1 +0,0 @@ -Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). diff --git a/changelog.d/11587.bugfix b/changelog.d/11587.bugfix deleted file mode 100644 index ad2b83edf7c7..000000000000 --- a/changelog.d/11587.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. \ No newline at end of file diff --git a/changelog.d/11593.bugfix b/changelog.d/11593.bugfix deleted file mode 100644 index 963fd0e58ecf..000000000000 --- a/changelog.d/11593.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an error in to get federation status of a destination server even if no error has occurred. This admin API was new introduced in Synapse 1.49.0. diff --git a/changelog.d/11612.bugfix b/changelog.d/11612.bugfix deleted file mode 100644 index 842f6892fda3..000000000000 --- a/changelog.d/11612.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11659.bugfix b/changelog.d/11659.bugfix deleted file mode 100644 index 842f6892fda3..000000000000 --- a/changelog.d/11659.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11667.bugfix b/changelog.d/11667.bugfix deleted file mode 100644 index bf65fd4c8b27..000000000000 --- a/changelog.d/11667.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. diff --git a/changelog.d/11669.bugfix b/changelog.d/11669.bugfix deleted file mode 100644 index 10d913aaceed..000000000000 --- a/changelog.d/11669.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix preview of some gif URLs (like tenor.com). Contributed by Philippe Daouadi. diff --git a/changelog.d/11672.feature b/changelog.d/11672.feature deleted file mode 100644 index ce8b3e9547f1..000000000000 --- a/changelog.d/11672.feature +++ /dev/null @@ -1 +0,0 @@ -Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. diff --git a/changelog.d/11675.feature b/changelog.d/11675.feature deleted file mode 100644 index 9a276f9542cf..000000000000 --- a/changelog.d/11675.feature +++ /dev/null @@ -1 +0,0 @@ -Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. diff --git a/changelog.d/11682.removal b/changelog.d/11682.removal deleted file mode 100644 index 50bdf35b2003..000000000000 --- a/changelog.d/11682.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the unstable `/send_relation` endpoint. diff --git a/changelog.d/11685.misc b/changelog.d/11685.misc deleted file mode 100644 index c4566b2012e3..000000000000 --- a/changelog.d/11685.misc +++ /dev/null @@ -1 +0,0 @@ -Run `pyupgrade --py37-plus --keep-percent-format` on Synapse. diff --git a/changelog.d/11686.doc b/changelog.d/11686.doc deleted file mode 100644 index 41bc7799d46a..000000000000 --- a/changelog.d/11686.doc +++ /dev/null @@ -1 +0,0 @@ -Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This bypasses client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. diff --git a/changelog.d/11691.misc b/changelog.d/11691.misc deleted file mode 100644 index 383d0b306488..000000000000 --- a/changelog.d/11691.misc +++ /dev/null @@ -1 +0,0 @@ -Use buildkit's cache feature to speed up docker builds. diff --git a/changelog.d/11692.misc b/changelog.d/11692.misc deleted file mode 100644 index 0cdfca54e7d6..000000000000 --- a/changelog.d/11692.misc +++ /dev/null @@ -1 +0,0 @@ -Use `auto_attribs` and native type hints for attrs classes. diff --git a/changelog.d/11693.misc b/changelog.d/11693.misc deleted file mode 100644 index 521a1796b8eb..000000000000 --- a/changelog.d/11693.misc +++ /dev/null @@ -1 +0,0 @@ -Remove debug logging for #4422, which has been closed since Synapse 0.99. \ No newline at end of file diff --git a/changelog.d/11695.bugfix b/changelog.d/11695.bugfix deleted file mode 100644 index 7799aefb8258..000000000000 --- a/changelog.d/11695.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where the only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. diff --git a/changelog.d/11699.misc b/changelog.d/11699.misc deleted file mode 100644 index ffae5f296061..000000000000 --- a/changelog.d/11699.misc +++ /dev/null @@ -1 +0,0 @@ -Remove fallback code for Python 2. diff --git a/changelog.d/11701.misc b/changelog.d/11701.misc deleted file mode 100644 index 68905e041240..000000000000 --- a/changelog.d/11701.misc +++ /dev/null @@ -1 +0,0 @@ -Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. \ No newline at end of file diff --git a/changelog.d/11702.misc b/changelog.d/11702.misc deleted file mode 100644 index fc1069cae034..000000000000 --- a/changelog.d/11702.misc +++ /dev/null @@ -1 +0,0 @@ -Add the option to write sqlite test dbs to disk when running tests. \ No newline at end of file diff --git a/changelog.d/11707.misc b/changelog.d/11707.misc deleted file mode 100644 index ef1e01cac82f..000000000000 --- a/changelog.d/11707.misc +++ /dev/null @@ -1 +0,0 @@ -Improve Complement test output for Gitub Actions. diff --git a/changelog.d/11710.bugfix b/changelog.d/11710.bugfix deleted file mode 100644 index 6521a37f6eb9..000000000000 --- a/changelog.d/11710.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. diff --git a/changelog.d/11714.misc b/changelog.d/11714.misc deleted file mode 100644 index 7f39bf0e3dca..000000000000 --- a/changelog.d/11714.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. \ No newline at end of file diff --git a/changelog.d/11715.doc b/changelog.d/11715.doc deleted file mode 100644 index 32b7c10b0bf9..000000000000 --- a/changelog.d/11715.doc +++ /dev/null @@ -1 +0,0 @@ -Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. diff --git a/changelog.d/11716.misc b/changelog.d/11716.misc deleted file mode 100644 index 08f731049806..000000000000 --- a/changelog.d/11716.misc +++ /dev/null @@ -1 +0,0 @@ -Fix docstring on `add_account_data_for_user`. \ No newline at end of file diff --git a/changelog.d/11718.misc b/changelog.d/11718.misc deleted file mode 100644 index 91dc5b5874f9..000000000000 --- a/changelog.d/11718.misc +++ /dev/null @@ -1 +0,0 @@ -Complement environment variable name change and update `.gitignore`. diff --git a/changelog.d/11723.misc b/changelog.d/11723.misc deleted file mode 100644 index f99e02070a93..000000000000 --- a/changelog.d/11723.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify calculation of prometheus metrics for garbage collection. diff --git a/changelog.d/11724.misc b/changelog.d/11724.misc deleted file mode 100644 index e9d5dae857b2..000000000000 --- a/changelog.d/11724.misc +++ /dev/null @@ -1 +0,0 @@ -Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric. diff --git a/changelog.d/11724.removal b/changelog.d/11724.removal deleted file mode 100644 index 088c3ff31ff8..000000000000 --- a/changelog.d/11724.removal +++ /dev/null @@ -1 +0,0 @@ -Remove `python_twisted_reactor_pending_calls` prometheus metric. diff --git a/changelog.d/11725.doc b/changelog.d/11725.doc deleted file mode 100644 index 46eb9b814fc3..000000000000 --- a/changelog.d/11725.doc +++ /dev/null @@ -1 +0,0 @@ -Document that now the minimum supported PostgreSQL version is 10. diff --git a/changelog.d/11735.doc b/changelog.d/11735.doc deleted file mode 100644 index d8822f6b52aa..000000000000 --- a/changelog.d/11735.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in demo docs: differnt. diff --git a/changelog.d/11737.bugfix b/changelog.d/11737.bugfix deleted file mode 100644 index a293d1cfec0c..000000000000 --- a/changelog.d/11737.bugfix +++ /dev/null @@ -1 +0,0 @@ -Make the list rooms admin api sort stable. Contributed by Daniël Sonck. \ No newline at end of file diff --git a/changelog.d/11739.doc b/changelog.d/11739.doc deleted file mode 100644 index 3d64f473f535..000000000000 --- a/changelog.d/11739.doc +++ /dev/null @@ -1 +0,0 @@ -Update room spec url in config files. \ No newline at end of file diff --git a/changelog.d/11740.doc b/changelog.d/11740.doc deleted file mode 100644 index dce080a5e9a1..000000000000 --- a/changelog.d/11740.doc +++ /dev/null @@ -1 +0,0 @@ -Mention python3-venv and libpq-dev dependencies in contribution guide. diff --git a/changelog.d/11742.misc b/changelog.d/11742.misc deleted file mode 100644 index f65ccdf30a5f..000000000000 --- a/changelog.d/11742.misc +++ /dev/null @@ -1 +0,0 @@ -Minor efficiency improvements when inserting many values into the database. diff --git a/changelog.d/11744.misc b/changelog.d/11744.misc deleted file mode 100644 index b7df14657ac1..000000000000 --- a/changelog.d/11744.misc +++ /dev/null @@ -1 +0,0 @@ -Invite PR authors to give themselves credit in the changelog. diff --git a/changelog.d/11745.bugfix b/changelog.d/11745.bugfix deleted file mode 100644 index 6521a37f6eb9..000000000000 --- a/changelog.d/11745.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. diff --git a/changelog.d/11749.feature b/changelog.d/11749.feature deleted file mode 100644 index 3d4f2159c0b3..000000000000 --- a/changelog.d/11749.feature +++ /dev/null @@ -1 +0,0 @@ -Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. diff --git a/changelog.d/11755.doc b/changelog.d/11755.doc deleted file mode 100644 index 5dd8feea63e7..000000000000 --- a/changelog.d/11755.doc +++ /dev/null @@ -1 +0,0 @@ -Update documentation for configuring login with facebook. diff --git a/changelog.d/11757.feature b/changelog.d/11757.feature deleted file mode 100644 index 3d4f2159c0b3..000000000000 --- a/changelog.d/11757.feature +++ /dev/null @@ -1 +0,0 @@ -Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. diff --git a/changelog.d/11760.misc b/changelog.d/11760.misc deleted file mode 100644 index 6cb1b5dd49ac..000000000000 --- a/changelog.d/11760.misc +++ /dev/null @@ -1 +0,0 @@ -Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). \ No newline at end of file diff --git a/changelog.d/11761.misc b/changelog.d/11761.misc deleted file mode 100644 index d4d997a7b9bc..000000000000 --- a/changelog.d/11761.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `log_function` utility function and its uses. diff --git a/changelog.d/11765.misc b/changelog.d/11765.misc deleted file mode 100644 index a6c946e45237..000000000000 --- a/changelog.d/11765.misc +++ /dev/null @@ -1 +0,0 @@ -Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. \ No newline at end of file diff --git a/changelog.d/11766.misc b/changelog.d/11766.misc deleted file mode 100644 index 3c9e5f95ffcc..000000000000 --- a/changelog.d/11766.misc +++ /dev/null @@ -1 +0,0 @@ -Allow overriding complement commit using `COMPLEMENT_REF`. diff --git a/changelog.d/11768.misc b/changelog.d/11768.misc deleted file mode 100644 index 1cac1f7446f8..000000000000 --- a/changelog.d/11768.misc +++ /dev/null @@ -1 +0,0 @@ -Use `auto_attribs` and native type hints for attrs classes. \ No newline at end of file diff --git a/changelog.d/11770.feature b/changelog.d/11770.feature deleted file mode 100644 index 72777075cb33..000000000000 --- a/changelog.d/11770.feature +++ /dev/null @@ -1 +0,0 @@ -Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. \ No newline at end of file diff --git a/changelog.d/11771.misc b/changelog.d/11771.misc deleted file mode 100644 index e9d5dae857b2..000000000000 --- a/changelog.d/11771.misc +++ /dev/null @@ -1 +0,0 @@ -Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric. diff --git a/changelog.d/11774.misc b/changelog.d/11774.misc deleted file mode 100644 index 136ba57f9410..000000000000 --- a/changelog.d/11774.misc +++ /dev/null @@ -1 +0,0 @@ -Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. diff --git a/changelog.d/11775.bugfix b/changelog.d/11775.bugfix deleted file mode 100644 index 2c548dbf3096..000000000000 --- a/changelog.d/11775.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. diff --git a/changelog.d/11776.misc b/changelog.d/11776.misc deleted file mode 100644 index 572ccda84741..000000000000 --- a/changelog.d/11776.misc +++ /dev/null @@ -1 +0,0 @@ -Add some comments and type annotations for `_update_outliers_txn`. diff --git a/changelog.d/11781.doc b/changelog.d/11781.doc deleted file mode 100644 index b68e861d6772..000000000000 --- a/changelog.d/11781.doc +++ /dev/null @@ -1 +0,0 @@ -Update installation instructions to note that Python 3.6 is no longer supported. diff --git a/changelog.d/11783.misc b/changelog.d/11783.misc deleted file mode 100644 index 136ba57f9410..000000000000 --- a/changelog.d/11783.misc +++ /dev/null @@ -1 +0,0 @@ -Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. diff --git a/changelog.d/11786.bugfix b/changelog.d/11786.bugfix deleted file mode 100644 index 306875f2dd63..000000000000 --- a/changelog.d/11786.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. diff --git a/changelog.d/11791.bugfix b/changelog.d/11791.bugfix deleted file mode 100644 index 842f6892fda3..000000000000 --- a/changelog.d/11791.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/debian/changelog b/debian/changelog index 18983f5da672..a013580e4f74 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.51.0~rc1) stable; urgency=medium + + * New synapse release 1.51.0~rc1. + + -- Synapse Packaging team Fri, 21 Jan 2022 10:46:02 +0000 + matrix-synapse-py3 (1.50.1) stable; urgency=medium * New synapse release 1.50.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5ec9f941741a..3d0d165f48d2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.50.1" +__version__ = "1.51.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From ea579a478aba4965b74d889612456ae1eb7958b6 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 21 Jan 2022 11:44:02 +0000 Subject: [PATCH 133/474] Edit the changelog for grammar and clarity --- CHANGES.md | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2ca978bae186..a5dbf366697a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,6 @@ Features -------- - Add `track_puppeted_user_ips` config flag to record client IP addresses against puppeted users, and include the puppeted users in monthly active user counts. ([\#11561](https://github.com/matrix-org/synapse/issues/11561), [\#11749](https://github.com/matrix-org/synapse/issues/11749), [\#11757](https://github.com/matrix-org/synapse/issues/11757)) -- Remove the `"password_hash"` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) - Include whether the requesting user has participated in a thread when generating a summary for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440). ([\#11577](https://github.com/matrix-org/synapse/issues/11577)) - Return an `M_FORBIDDEN` error code instead of `M_UNKNOWN` when a spam checker module prevents a user from creating a room. ([\#11672](https://github.com/matrix-org/synapse/issues/11672)) - Add a flag to the `synapse_review_recent_signups` script to ignore and filter appservice users. ([\#11675](https://github.com/matrix-org/synapse/issues/11675), [\#11770](https://github.com/matrix-org/synapse/issues/11770)) @@ -17,26 +16,26 @@ Bugfixes - Fix a long-standing issue which could cause Synapse to incorrectly accept data in the unsigned field of events received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530)) - Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587)) -- Fix an error in to get federation status of a destination server even if no error has occurred. This admin API was new introduced in Synapse 1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593)) +- Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse v1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593)) - Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791)) -- Fix `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667)) -- Fix preview of some gif URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669)) -- Fix a bug where the only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695)) +- Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667)) +- Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669)) +- Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695)) - Fix a bug introduced in Synapse v1.18.0 where password reset and address validation emails would not be sent if their subject was configured to use the 'app' template variable. Contributed by @br4nnigan. ([\#11710](https://github.com/matrix-org/synapse/issues/11710), [\#11745](https://github.com/matrix-org/synapse/issues/11745)) -- Make the list rooms admin api sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737)) +- Make the 'List Rooms' Admin API sort stable. Contributed by Daniël Sonck. ([\#11737](https://github.com/matrix-org/synapse/issues/11737)) - Fix a long-standing bug where space hierarchy over federation would only work correctly some of the time. ([\#11775](https://github.com/matrix-org/synapse/issues/11775)) -- Fix a bug introduced in Synapse 1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786)) +- Fix a bug introduced in Synapse v1.46.0 that prevented `on_logged_out` module callbacks from being correctly awaited by Synapse. ([\#11786](https://github.com/matrix-org/synapse/issues/11786)) Improved Documentation ---------------------- -- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This bypasses client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) +- Warn against using a Let's Encrypt certificate for TLS/DTLS TURN server client connections, and suggest using ZeroSSL certificate instead. This works around client-side connectivity errors caused by WebRTC libraries that reject Let's Encrypt certificates. Contibuted by @AndrewFerr. ([\#11686](https://github.com/matrix-org/synapse/issues/11686)) - Document the new `SYNAPSE_TEST_PERSIST_SQLITE_DB` environment variable in the contributing guide. ([\#11715](https://github.com/matrix-org/synapse/issues/11715)) -- Document that now the minimum supported PostgreSQL version is 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) +- Document that the minimum supported PostgreSQL version is now 10. ([\#11725](https://github.com/matrix-org/synapse/issues/11725)) - Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) -- Update room spec url in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) -- Mention python3-venv and libpq-dev dependencies in contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) +- Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) +- Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) - Update documentation for configuring login with facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) - Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781)) @@ -45,7 +44,8 @@ Deprecations and Removals ------------------------- - Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682)) -- Remove `python_twisted_reactor_pending_calls` prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) +- Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) +- Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) Internal Changes @@ -57,13 +57,12 @@ Internal Changes - Remove debug logging for #4422, which has been closed since Synapse 0.99. ([\#11693](https://github.com/matrix-org/synapse/issues/11693)) - Remove fallback code for Python 2. ([\#11699](https://github.com/matrix-org/synapse/issues/11699)) - Add a test for [an edge case](https://github.com/matrix-org/synapse/pull/11532#discussion_r769104461) in the `/sync` logic. ([\#11701](https://github.com/matrix-org/synapse/issues/11701)) -- Add the option to write sqlite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702)) +- Add the option to write SQLite test dbs to disk when running tests. ([\#11702](https://github.com/matrix-org/synapse/issues/11702)) - Improve Complement test output for Gitub Actions. ([\#11707](https://github.com/matrix-org/synapse/issues/11707)) -- Fix a typechecker problem related to our (ab)use of `nacl.signing.SigningKey`s. ([\#11714](https://github.com/matrix-org/synapse/issues/11714)) - Fix docstring on `add_account_data_for_user`. ([\#11716](https://github.com/matrix-org/synapse/issues/11716)) - Complement environment variable name change and update `.gitignore`. ([\#11718](https://github.com/matrix-org/synapse/issues/11718)) -- Simplify calculation of prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723)) -- Improve accuracy of `python_twisted_reactor_tick_time` prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771)) +- Simplify calculation of Prometheus metrics for garbage collection. ([\#11723](https://github.com/matrix-org/synapse/issues/11723)) +- Improve accuracy of `python_twisted_reactor_tick_time` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724), [\#11771](https://github.com/matrix-org/synapse/issues/11771)) - Minor efficiency improvements when inserting many values into the database. ([\#11742](https://github.com/matrix-org/synapse/issues/11742)) - Invite PR authors to give themselves credit in the changelog. ([\#11744](https://github.com/matrix-org/synapse/issues/11744)) - Add optional debugging to investigate [issue 8631](https://github.com/matrix-org/synapse/issues/8631). ([\#11760](https://github.com/matrix-org/synapse/issues/11760)) From 2aa37a4250675f6d9feb57ec0dce65b2a6a3cde6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 21 Jan 2022 12:21:28 +0000 Subject: [PATCH 134/474] Add `state_key` and `rejection_reason` to `events` (#11792) ... and start populating them for new events --- changelog.d/11792.misc | 1 + synapse/storage/databases/main/events.py | 7 ++++- synapse/storage/schema/__init__.py | 8 +++--- .../schema/main/delta/68/01event_columns.sql | 26 +++++++++++++++++++ tests/storage/test_event_chain.py | 5 +++- 5 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11792.misc create mode 100644 synapse/storage/schema/main/delta/68/01event_columns.sql diff --git a/changelog.d/11792.misc b/changelog.d/11792.misc new file mode 100644 index 000000000000..6aa1cd61c354 --- /dev/null +++ b/changelog.d/11792.misc @@ -0,0 +1 @@ +Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1ae1ebe10879..b7554154ac19 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1389,6 +1389,8 @@ def event_dict(event): "received_ts", "sender", "contains_url", + "state_key", + "rejection_reason", ), values=( ( @@ -1405,8 +1407,10 @@ def event_dict(event): self._clock.time_msec(), event.sender, "url" in event.content and isinstance(event.content["url"], str), + event.get_state_key(), + context.rejected or None, ) - for event, _ in events_and_contexts + for event, context in events_and_contexts ), ) @@ -1456,6 +1460,7 @@ def _store_rejected_events_txn(self, txn, events_and_contexts): for event, context in events_and_contexts: if context.rejected: # Insert the event_id into the rejections table + # (events.rejection_reason has already been done) self._store_rejections_txn(txn, event.event_id, context.rejected) to_remove.add(event) diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 75659f931c68..7b21c1b96db9 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -56,13 +56,15 @@ Changes in SCHEMA_VERSION = 68: - event_reference_hashes is no longer read. + - `events` has `state_key` and `rejection_reason` columns, which are populated for + new events. """ SCHEMA_COMPAT_VERSION = ( - # we have removed the public_room_list_stream table, so are now incompatible with - # synapses wth SCHEMA_VERSION < 63. - 63 + # we now have `state_key` columns in both `events` and `state_events`, so + # now incompatible with synapses wth SCHEMA_VERSION < 66. + 66 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/68/01event_columns.sql b/synapse/storage/schema/main/delta/68/01event_columns.sql new file mode 100644 index 000000000000..7c072f972ef7 --- /dev/null +++ b/synapse/storage/schema/main/delta/68/01event_columns.sql @@ -0,0 +1,26 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add new colums to the `events` table which will (one day) make the `state_events` +-- and `rejections` tables redundant. + +ALTER TABLE events + -- if this event is a state event, its state key + ADD COLUMN state_key TEXT DEFAULT NULL; + + +ALTER TABLE events + -- if this event was rejected, the reason it was rejected. + ADD COLUMN rejection_reason TEXT DEFAULT NULL; diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 7b7f6c349e1c..e3273a93f9a2 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -19,6 +19,7 @@ from synapse.api.constants import EventTypes from synapse.api.room_versions import RoomVersions from synapse.events import EventBase +from synapse.events.snapshot import EventContext from synapse.rest import admin from synapse.rest.client import login, room from synapse.storage.databases.main.events import _LinkMap @@ -391,7 +392,9 @@ def persist( def _persist(txn): # We need to persist the events to the events and state_events # tables. - persist_events_store._store_event_txn(txn, [(e, {}) for e in events]) + persist_events_store._store_event_txn( + txn, [(e, EventContext()) for e in events] + ) # Actually call the function that calculates the auth chain stuff. persist_events_store._persist_event_auth_chain_txn(txn, events) From 2d295a4be92894d18d71512548db8629a3ed4b50 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 21 Jan 2022 13:15:13 +0000 Subject: [PATCH 135/474] Edit the changelog according to feedback --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index a5dbf366697a..ddf57f1494f7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,7 +17,7 @@ Bugfixes received over federation. ([\#11530](https://github.com/matrix-org/synapse/issues/11530)) - Fix a long-standing bug where Synapse wouldn't cache a response indicating that a remote user has no devices. ([\#11587](https://github.com/matrix-org/synapse/issues/11587)) - Fix an error that occurs whilst trying to get the federation status of a destination server that was working normally. This admin API was newly introduced in Synapse v1.49.0. ([\#11593](https://github.com/matrix-org/synapse/issues/11593)) -- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791)) +- Fix bundled aggregations not being included in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612), [\#11659](https://github.com/matrix-org/synapse/issues/11659), [\#11791](https://github.com/matrix-org/synapse/issues/11791)) - Fix the `/_matrix/client/v1/room/{roomId}/hierarchy` endpoint returning incorrect fields which have been present since Synapse 1.49.0. ([\#11667](https://github.com/matrix-org/synapse/issues/11667)) - Fix preview of some GIF URLs (like tenor.com). Contributed by Philippe Daouadi. ([\#11669](https://github.com/matrix-org/synapse/issues/11669)) - Fix a bug where only the first 50 rooms from a space were returned from the `/hierarchy` API. This has existed since the introduction of the API in Synapse v1.41.0. ([\#11695](https://github.com/matrix-org/synapse/issues/11695)) @@ -36,7 +36,7 @@ Improved Documentation - Fix typo in demo docs: differnt. ([\#11735](https://github.com/matrix-org/synapse/issues/11735)) - Update room spec URL in config files. ([\#11739](https://github.com/matrix-org/synapse/issues/11739)) - Mention `python3-venv` and `libpq-dev` dependencies in the contribution guide. ([\#11740](https://github.com/matrix-org/synapse/issues/11740)) -- Update documentation for configuring login with facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) +- Update documentation for configuring login with Facebook. ([\#11755](https://github.com/matrix-org/synapse/issues/11755)) - Update installation instructions to note that Python 3.6 is no longer supported. ([\#11781](https://github.com/matrix-org/synapse/issues/11781)) @@ -46,6 +46,7 @@ Deprecations and Removals - Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682)) - Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) - Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) +- Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783)) Internal Changes @@ -69,7 +70,6 @@ Internal Changes - Remove `log_function` utility function and its uses. ([\#11761](https://github.com/matrix-org/synapse/issues/11761)) - Add a unit test that checks both `client` and `webclient` resources will function when simultaneously enabled. ([\#11765](https://github.com/matrix-org/synapse/issues/11765)) - Allow overriding complement commit using `COMPLEMENT_REF`. ([\#11766](https://github.com/matrix-org/synapse/issues/11766)) -- Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783)) - Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776)) From f8cf02b200dc56c3de857b05ee7ef4a58d23d254 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Fri, 21 Jan 2022 14:05:27 +0000 Subject: [PATCH 136/474] Remove obsolete newsfile The PR was cherrypicked into v1.51.0rc1. --- changelog.d/11791.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/11791.bugfix diff --git a/changelog.d/11791.bugfix b/changelog.d/11791.bugfix deleted file mode 100644 index 842f6892fda3..000000000000 --- a/changelog.d/11791.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). From 9006ee36d1d3d83ffaf1cce2ac9d70ff2d29df51 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 21 Jan 2022 14:23:26 -0800 Subject: [PATCH 137/474] Drop support for and remove references to EOL Python 3.6 (#11683) * remove reference in comments to python3.6 * upgrade tox python env in script * bump python version in example for completeness * upgrade python version requirement in setup doc * upgrade necessary python version in __init__.py * upgrade python version in setup.py * newsfragment * drops refs to bionic and replace with focal * bump refs to postgres 9.6 to 10 * fix hanging ci * try installing tzdata first * revert change made in b979f336 * ignore new random mypy error while debugging other error * fix lint error for temporary workaround * revert change to install list * try passing env var * export debian frontend var? * move line and add comment * bump pillow dependency * bump lxml depenency * install libjpeg-dev for pillow * bump automat version to one compatible with py3.8 * add libwebp for pillow * bump twisted trunk python version * change suffix of newsfragment * remove redundant python 3.7 checks * lint --- .ci/scripts/test_old_deps.sh | 8 +++++--- .github/workflows/tests.yml | 8 ++++---- .github/workflows/twisted_trunk.yml | 2 +- changelog.d/11683.removal | 1 + docker/Dockerfile-pgtests | 2 +- docker/run_pg_tests.sh | 2 +- docs/admin_api/version_api.md | 2 +- setup.py | 2 +- synapse/__init__.py | 4 ++-- synapse/app/_base.py | 11 +++-------- synapse/python_dependencies.py | 4 ++-- synapse/storage/engines/postgres.py | 4 ++-- tox.ini | 3 +-- 13 files changed, 25 insertions(+), 28 deletions(-) create mode 100644 changelog.d/11683.removal diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh index 8b473936f8c3..a54aa86fbc0f 100755 --- a/.ci/scripts/test_old_deps.sh +++ b/.ci/scripts/test_old_deps.sh @@ -1,12 +1,14 @@ #!/usr/bin/env bash - -# this script is run by GitHub Actions in a plain `bionic` container; it installs the +# this script is run by GitHub Actions in a plain `focal` container; it installs the # minimal requirements for tox and hands over to the py3-old tox environment. +# Prevent tzdata from asking for user input +export DEBIAN_FRONTEND=noninteractive + set -ex apt-get update -apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox +apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev export LANG="C.UTF-8" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4f58069702dc..e47671102ec9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -141,7 +141,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Test with old deps - uses: docker://ubuntu:bionic # For old python and sqlite + uses: docker://ubuntu:focal # For old python and sqlite with: workdir: /github/workspace entrypoint: .ci/scripts/test_old_deps.sh @@ -213,15 +213,15 @@ jobs: fail-fast: false matrix: include: - - sytest-tag: bionic + - sytest-tag: focal - - sytest-tag: bionic + - sytest-tag: focal postgres: postgres - sytest-tag: testing postgres: postgres - - sytest-tag: bionic + - sytest-tag: focal postgres: multi-postgres workers: workers diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index e974ac7aba37..fb9d46b7bfdd 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -25,7 +25,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.7 - run: .ci/patch_for_twisted_trunk.sh - run: pip install tox - run: tox -e py diff --git a/changelog.d/11683.removal b/changelog.d/11683.removal new file mode 100644 index 000000000000..b1f048f7f52d --- /dev/null +++ b/changelog.d/11683.removal @@ -0,0 +1 @@ +Drop support for Python 3.6, which is EOL. \ No newline at end of file diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests index 92b804d1937f..b94484ea7fd6 100644 --- a/docker/Dockerfile-pgtests +++ b/docker/Dockerfile-pgtests @@ -1,6 +1,6 @@ # Use the Sytest image that comes with a lot of the build dependencies # pre-installed -FROM matrixdotorg/sytest:bionic +FROM matrixdotorg/sytest:focal # The Sytest image doesn't come with python, so install that RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh index 58e2177d34c2..b22b6ef16b7e 100755 --- a/docker/run_pg_tests.sh +++ b/docker/run_pg_tests.sh @@ -16,4 +16,4 @@ sudo -u postgres /usr/lib/postgresql/10/bin/pg_ctl -w -D /var/lib/postgresql/dat # Run the tests cd /src export TRIAL_FLAGS="-j 4" -tox --workdir=./.tox-pg-container -e py36-postgres "$@" +tox --workdir=./.tox-pg-container -e py37-postgres "$@" diff --git a/docs/admin_api/version_api.md b/docs/admin_api/version_api.md index efb4a0c0f7aa..27977de0d379 100644 --- a/docs/admin_api/version_api.md +++ b/docs/admin_api/version_api.md @@ -16,6 +16,6 @@ It returns a JSON body like the following: ```json { "server_version": "0.99.2rc1 (b=develop, abcdef123)", - "python_version": "3.6.8" + "python_version": "3.7.8" } ``` diff --git a/setup.py b/setup.py index e618ff898b46..d0511c767f98 100755 --- a/setup.py +++ b/setup.py @@ -150,7 +150,7 @@ def exec_file(path_segments): zip_safe=False, long_description=long_description, long_description_content_type="text/x-rst", - python_requires="~=3.6", + python_requires="~=3.7", entry_points={ "console_scripts": [ "synapse_homeserver = synapse.app.homeserver:main", diff --git a/synapse/__init__.py b/synapse/__init__.py index 3d0d165f48d2..4ef87280180c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -21,8 +21,8 @@ import sys # Check that we're not running on an unsupported Python version. -if sys.version_info < (3, 6): - print("Synapse requires Python 3.6 or above.") +if sys.version_info < (3, 7): + print("Synapse requires Python 3.7 or above.") sys.exit(1) # Twisted and canonicaljson will fail to import when this file is executed to diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 579adbbca02d..e5ee03b79ff9 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -16,7 +16,6 @@ import gc import logging import os -import platform import signal import socket import sys @@ -468,16 +467,12 @@ def run_sighup(*args: Any, **kwargs: Any) -> None: # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). # - # This only works on Python 3.7 - if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7): - gc.collect() - gc.freeze() + gc.collect() + gc.freeze() # Speed up shutdowns by freezing all allocated objects. This moves everything # into the permanent generation and excludes them from the final GC. - # Unfortunately only works on Python 3.7 - if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7): - atexit.register(gc.freeze) + atexit.register(gc.freeze) def setup_sentry(hs: "HomeServer") -> None: diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index d844fbb3b3d6..22b4606ae0ea 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -70,7 +70,7 @@ "pyasn1>=0.1.9", "pyasn1-modules>=0.0.7", "bcrypt>=3.1.0", - "pillow>=4.3.0", + "pillow>=5.4.0", "sortedcontainers>=1.4.4", "pymacaroons>=0.13.0", "msgpack>=0.5.2", @@ -107,7 +107,7 @@ # `systemd.journal.JournalHandler`, as is documented in # `contrib/systemd/log_config.yaml`. "systemd": ["systemd-python>=231"], - "url_preview": ["lxml>=3.5.0"], + "url_preview": ["lxml>=4.2.0"], "sentry": ["sentry-sdk>=0.7.2"], "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"], "jwt": ["pyjwt>=1.6.4"], diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 30f948a0f77d..b3d71f661c03 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -46,8 +46,8 @@ def check_database(self, db_conn, allow_outdated_version: bool = False): self._version = db_conn.server_version # Are we on a supported PostgreSQL version? - if not allow_outdated_version and self._version < 90600: - raise RuntimeError("Synapse requires PostgreSQL 9.6 or above.") + if not allow_outdated_version and self._version < 100000: + raise RuntimeError("Synapse requires PostgreSQL 10 or above.") with db_conn.cursor() as txn: txn.execute("SHOW SERVER_ENCODING") diff --git a/tox.ini b/tox.ini index 2ffca14b2278..32679e9106c8 100644 --- a/tox.ini +++ b/tox.ini @@ -117,8 +117,7 @@ usedevelop=true skip_install = true usedevelop = false deps = - # Old automat version for Twisted - Automat == 0.3.0 + Automat == 0.8.0 lxml {[base]deps} From dc671d3ea7e869cf765c08c755d4045feee98fe7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 24 Jan 2022 12:20:01 +0000 Subject: [PATCH 138/474] Fix logic for dropping old events in fed queue (#11806) Co-authored-by: Brendan Abolivier Co-authored-by: Richard van der Hoff --- changelog.d/11806.bugfix | 1 + .../databases/main/event_federation.py | 5 +++- tests/storage/test_event_federation.py | 30 +++++++++++++++---- 3 files changed, 29 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11806.bugfix diff --git a/changelog.d/11806.bugfix b/changelog.d/11806.bugfix new file mode 100644 index 000000000000..e4beaf103b04 --- /dev/null +++ b/changelog.d/11806.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. \ No newline at end of file diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 270b30800bf2..a556f17dac15 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1432,7 +1432,10 @@ async def prune_staged_events_in_room( if room_version.event_format == EventFormatVersions.V1: for prev_event_tuple in prev_events: - if not isinstance(prev_event_tuple, list) or len(prev_events) != 2: + if ( + not isinstance(prev_event_tuple, list) + or len(prev_event_tuple) != 2 + ): logger.info("Invalid prev_events for %s", event_id) break diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 632bbc9de73e..2bc89512f8ae 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -12,10 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Tuple, Union + import attr from parameterized import parameterized -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import ( + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + RoomVersion, +) from synapse.events import _EventInternalMetadata from synapse.util import json_encoder @@ -506,11 +512,21 @@ def insert_event(txn): ) self.assertSetEqual(difference, set()) - def test_prune_inbound_federation_queue(self): - "Test that pruning of inbound federation queues work" + @parameterized.expand( + [(room_version,) for room_version in KNOWN_ROOM_VERSIONS.values()] + ) + def test_prune_inbound_federation_queue(self, room_version: RoomVersion): + """Test that pruning of inbound federation queues work""" room_id = "some_room_id" + def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]: + """Account for differences in prev_events format across room versions""" + if room_version.event_format == EventFormatVersions.V1: + return prev_event_id, {} + + return prev_event_id + # Insert a bunch of events that all reference the previous one. self.get_success( self.store.db_pool.simple_insert_many( @@ -529,7 +545,9 @@ def test_prune_inbound_federation_queue(self): room_id, 0, f"$fake_event_id_{i + 1}", - json_encoder.encode({"prev_events": [f"$fake_event_id_{i}"]}), + json_encoder.encode( + {"prev_events": [prev_event_format(f"$fake_event_id_{i}")]} + ), "{}", ) for i in range(500) @@ -541,12 +559,12 @@ def test_prune_inbound_federation_queue(self): # Calling prune once should return True, i.e. a prune happen. The second # time it shouldn't. pruned = self.get_success( - self.store.prune_staged_events_in_room(room_id, RoomVersions.V6) + self.store.prune_staged_events_in_room(room_id, room_version) ) self.assertTrue(pruned) pruned = self.get_success( - self.store.prune_staged_events_in_room(room_id, RoomVersions.V6) + self.store.prune_staged_events_in_room(room_id, room_version) ) self.assertFalse(pruned) From 14b45b25dda963ecc4bc18946db50443a1b4411d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 24 Jan 2022 12:25:18 +0000 Subject: [PATCH 139/474] 1.51.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/11806.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/11806.bugfix diff --git a/CHANGES.md b/CHANGES.md index ddf57f1494f7..ce3d657c98cc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.51.0rc2 (2022-01-24) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) + + Synapse 1.51.0rc1 (2022-01-21) ============================== diff --git a/changelog.d/11806.bugfix b/changelog.d/11806.bugfix deleted file mode 100644 index e4beaf103b04..000000000000 --- a/changelog.d/11806.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index a013580e4f74..a07bf4f2efb1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.51.0~rc2) stable; urgency=medium + + * New synapse release 1.51.0~rc2. + + -- Synapse Packaging team Mon, 24 Jan 2022 12:25:00 +0000 + matrix-synapse-py3 (1.51.0~rc1) stable; urgency=medium * New synapse release 1.51.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 3d0d165f48d2..ef859c9deddb 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.51.0rc1" +__version__ = "1.51.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 8ff465d20685ba5e1d446e336dc11de571df6a97 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 24 Jan 2022 12:20:01 +0000 Subject: [PATCH 140/474] Fix logic for dropping old events in fed queue (#11806) Co-authored-by: Brendan Abolivier Co-authored-by: Richard van der Hoff --- changelog.d/11806.bugfix | 1 + .../databases/main/event_federation.py | 5 +++- tests/storage/test_event_federation.py | 28 +++++++++++++++---- 3 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11806.bugfix diff --git a/changelog.d/11806.bugfix b/changelog.d/11806.bugfix new file mode 100644 index 000000000000..e4beaf103b04 --- /dev/null +++ b/changelog.d/11806.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. \ No newline at end of file diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 270b30800bf2..a556f17dac15 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1432,7 +1432,10 @@ async def prune_staged_events_in_room( if room_version.event_format == EventFormatVersions.V1: for prev_event_tuple in prev_events: - if not isinstance(prev_event_tuple, list) or len(prev_events) != 2: + if ( + not isinstance(prev_event_tuple, list) + or len(prev_event_tuple) != 2 + ): logger.info("Invalid prev_events for %s", event_id) break diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index ecfda7677e0a..bf7808486970 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -12,10 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Tuple, Union + import attr from parameterized import parameterized -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import ( + KNOWN_ROOM_VERSIONS, + EventFormatVersions, + RoomVersion, +) from synapse.events import _EventInternalMetadata from synapse.util import json_encoder @@ -506,11 +512,21 @@ def insert_event(txn): ) self.assertSetEqual(difference, set()) - def test_prune_inbound_federation_queue(self): - "Test that pruning of inbound federation queues work" + @parameterized.expand( + [(room_version,) for room_version in KNOWN_ROOM_VERSIONS.values()] + ) + def test_prune_inbound_federation_queue(self, room_version: RoomVersion): + """Test that pruning of inbound federation queues work""" room_id = "some_room_id" + def prev_event_format(prev_event_id: str) -> Union[Tuple[str, dict], str]: + """Account for differences in prev_events format across room versions""" + if room_version.event_format == EventFormatVersions.V1: + return prev_event_id, {} + + return prev_event_id + # Insert a bunch of events that all reference the previous one. self.get_success( self.store.db_pool.simple_insert_many( @@ -522,7 +538,7 @@ def test_prune_inbound_federation_queue(self): "received_ts": 0, "event_id": f"$fake_event_id_{i + 1}", "event_json": json_encoder.encode( - {"prev_events": [f"$fake_event_id_{i}"]} + {"prev_events": [prev_event_format(f"$fake_event_id_{i}")]} ), "internal_metadata": "{}", } @@ -535,12 +551,12 @@ def test_prune_inbound_federation_queue(self): # Calling prune once should return True, i.e. a prune happen. The second # time it shouldn't. pruned = self.get_success( - self.store.prune_staged_events_in_room(room_id, RoomVersions.V6) + self.store.prune_staged_events_in_room(room_id, room_version) ) self.assertTrue(pruned) pruned = self.get_success( - self.store.prune_staged_events_in_room(room_id, RoomVersions.V6) + self.store.prune_staged_events_in_room(room_id, room_version) ) self.assertFalse(pruned) From df54c8485a286dbefaa038319399ef8985d5344e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 24 Jan 2022 13:37:00 +0000 Subject: [PATCH 141/474] Remove account data (including client config, push rules and ignored users) upon user deactivation. (#11621) Co-authored-by: Patrick Cloke --- changelog.d/11621.feature | 1 + docs/admin_api/user_admin_api.md | 6 +- synapse/handlers/deactivate_account.py | 3 + .../storage/databases/main/account_data.py | 73 +++++- tests/handlers/test_deactivate_account.py | 219 ++++++++++++++++++ 5 files changed, 299 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11621.feature create mode 100644 tests/handlers/test_deactivate_account.py diff --git a/changelog.d/11621.feature b/changelog.d/11621.feature new file mode 100644 index 000000000000..dc426fb658ac --- /dev/null +++ b/changelog.d/11621.feature @@ -0,0 +1 @@ +Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index c514cadb9dae..fdc1f2d1cfd1 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -353,6 +353,11 @@ The following actions are performed when deactivating an user: - Remove the user from the user directory - Reject all pending invites - Remove all account validity information related to the user +- Remove the arbitrary data store known as *account data*. For example, this includes: + - list of ignored users; + - push rules; + - secret storage keys; and + - cross-signing keys. The following additional actions are performed during deactivation if `erase` is set to `true`: @@ -366,7 +371,6 @@ The following actions are **NOT** performed. The list may be incomplete. - Remove mappings of SSO IDs - [Delete media uploaded](#delete-media-uploaded-by-a-user) by user (included avatar images) - Delete sent and received messages -- Delete E2E cross-signing keys - Remove the user's creation (registration) timestamp - [Remove rate limit overrides](#override-ratelimiting-for-users) - Remove from monthly active users diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index bee62cf36088..7a13d76a687f 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -157,6 +157,9 @@ async def deactivate_account( # Mark the user as deactivated. await self.store.set_user_deactivated_status(user_id, True) + # Remove account data (including ignored users and push rules). + await self.store.purge_account_data_for_user(user_id) + return identity_server_supports_unbinding async def _reject_pending_invites_for_user(self, user_id: str) -> None: diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 9c19f0965f39..5bfa408f74b0 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -26,6 +26,7 @@ LoggingTransaction, ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore +from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, @@ -44,7 +45,7 @@ logger = logging.getLogger(__name__) -class AccountDataWorkerStore(CacheInvalidationWorkerStore): +class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore): def __init__( self, database: DatabasePool, @@ -179,7 +180,7 @@ async def get_global_account_data_by_type_for_user( else: return None - @cached(num_args=2) + @cached(num_args=2, tree=True) async def get_account_data_for_room( self, user_id: str, room_id: str ) -> Dict[str, JsonDict]: @@ -546,6 +547,74 @@ def _add_account_data_for_user( for ignored_user_id in previously_ignored_users ^ currently_ignored_users: self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,)) + async def purge_account_data_for_user(self, user_id: str) -> None: + """ + Removes the account data for a user. + + This is intended to be used upon user deactivation and also removes any + derived information from account data (e.g. push rules and ignored users). + + Args: + user_id: The user ID to remove data for. + """ + + def purge_account_data_for_user_txn(txn: LoggingTransaction) -> None: + # Purge from the primary account_data tables. + self.db_pool.simple_delete_txn( + txn, table="account_data", keyvalues={"user_id": user_id} + ) + + self.db_pool.simple_delete_txn( + txn, table="room_account_data", keyvalues={"user_id": user_id} + ) + + # Purge from ignored_users where this user is the ignorer. + # N.B. We don't purge where this user is the ignoree, because that + # interferes with other users' account data. + # It's also not this user's data to delete! + self.db_pool.simple_delete_txn( + txn, table="ignored_users", keyvalues={"ignorer_user_id": user_id} + ) + + # Remove the push rules + self.db_pool.simple_delete_txn( + txn, table="push_rules", keyvalues={"user_name": user_id} + ) + self.db_pool.simple_delete_txn( + txn, table="push_rules_enable", keyvalues={"user_name": user_id} + ) + self.db_pool.simple_delete_txn( + txn, table="push_rules_stream", keyvalues={"user_id": user_id} + ) + + # Invalidate caches as appropriate + self._invalidate_cache_and_stream( + txn, self.get_account_data_for_room_and_type, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_account_data_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_global_account_data_by_type_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_account_data_for_room, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_push_rules_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_push_rules_enabled_for_user, (user_id,) + ) + # This user might be contained in the ignored_by cache for other users, + # so we have to invalidate it all. + self._invalidate_all_cache_and_stream(txn, self.ignored_by) + + await self.db_pool.runInteraction( + "purge_account_data_for_user_txn", + purge_account_data_for_user_txn, + ) + class AccountDataStore(AccountDataWorkerStore): pass diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py new file mode 100644 index 000000000000..3da597768c62 --- /dev/null +++ b/tests/handlers/test_deactivate_account.py @@ -0,0 +1,219 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from http import HTTPStatus +from typing import Any, Dict + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.constants import AccountDataTypes +from synapse.push.rulekinds import PRIORITY_CLASS_MAP +from synapse.rest import admin +from synapse.rest.client import account, login +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + + +class DeactivateAccountTestCase(HomeserverTestCase): + servlets = [ + login.register_servlets, + admin.register_servlets, + account.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self._store = hs.get_datastore() + + self.user = self.register_user("user", "pass") + self.token = self.login("user", "pass") + + def _deactivate_my_account(self): + """ + Deactivates the account `self.user` using `self.token` and asserts + that it returns a 200 success code. + """ + req = self.get_success( + self.make_request( + "POST", + "account/deactivate", + { + "auth": { + "type": "m.login.password", + "user": self.user, + "password": "pass", + }, + "erase": True, + }, + access_token=self.token, + ) + ) + self.assertEqual(req.code, HTTPStatus.OK, req) + + def test_global_account_data_deleted_upon_deactivation(self) -> None: + """ + Tests that global account data is removed upon deactivation. + """ + # Add some account data + self.get_success( + self._store.add_account_data_for_user( + self.user, + AccountDataTypes.DIRECT, + {"@someone:remote": ["!somewhere:remote"]}, + ) + ) + + # Check that we actually added some. + self.assertIsNotNone( + self.get_success( + self._store.get_global_account_data_by_type_for_user( + self.user, AccountDataTypes.DIRECT + ) + ), + ) + + # Request the deactivation of our account + self._deactivate_my_account() + + # Check that the account data does not persist. + self.assertIsNone( + self.get_success( + self._store.get_global_account_data_by_type_for_user( + self.user, AccountDataTypes.DIRECT + ) + ), + ) + + def test_room_account_data_deleted_upon_deactivation(self) -> None: + """ + Tests that room account data is removed upon deactivation. + """ + room_id = "!room:test" + + # Add some room account data + self.get_success( + self._store.add_account_data_to_room( + self.user, + room_id, + "m.fully_read", + {"event_id": "$aaaa:test"}, + ) + ) + + # Check that we actually added some. + self.assertIsNotNone( + self.get_success( + self._store.get_account_data_for_room_and_type( + self.user, room_id, "m.fully_read" + ) + ), + ) + + # Request the deactivation of our account + self._deactivate_my_account() + + # Check that the account data does not persist. + self.assertIsNone( + self.get_success( + self._store.get_account_data_for_room_and_type( + self.user, room_id, "m.fully_read" + ) + ), + ) + + def _is_custom_rule(self, push_rule: Dict[str, Any]) -> bool: + """ + Default rules start with a dot: such as .m.rule and .im.vector. + This function returns true iff a rule is custom (not default). + """ + return "/." not in push_rule["rule_id"] + + def test_push_rules_deleted_upon_account_deactivation(self) -> None: + """ + Push rules are a special case of account data. + They are stored separately but get sent to the client as account data in /sync. + This tests that deactivating a user deletes push rules along with the rest + of their account data. + """ + + # Add a push rule + self.get_success( + self._store.add_push_rule( + self.user, + "personal.override.rule1", + PRIORITY_CLASS_MAP["override"], + [], + [], + ) + ) + + # Test the rule exists + push_rules = self.get_success(self._store.get_push_rules_for_user(self.user)) + # Filter out default rules; we don't care + push_rules = list(filter(self._is_custom_rule, push_rules)) + # Check our rule made it + self.assertEqual( + push_rules, + [ + { + "user_name": "@user:test", + "rule_id": "personal.override.rule1", + "priority_class": 5, + "priority": 0, + "conditions": [], + "actions": [], + "default": False, + } + ], + push_rules, + ) + + # Request the deactivation of our account + self._deactivate_my_account() + + push_rules = self.get_success(self._store.get_push_rules_for_user(self.user)) + # Filter out default rules; we don't care + push_rules = list(filter(self._is_custom_rule, push_rules)) + # Check our rule no longer exists + self.assertEqual(push_rules, [], push_rules) + + def test_ignored_users_deleted_upon_deactivation(self) -> None: + """ + Ignored users are a special case of account data. + They get denormalised into the `ignored_users` table upon being stored as + account data. + Test that a user's list of ignored users is deleted upon deactivation. + """ + + # Add an ignored user + self.get_success( + self._store.add_account_data_for_user( + self.user, + AccountDataTypes.IGNORED_USER_LIST, + {"ignored_users": {"@sheltie:test": {}}}, + ) + ) + + # Test the user is ignored + self.assertEqual( + self.get_success(self._store.ignored_by("@sheltie:test")), {self.user} + ) + + # Request the deactivation of our account + self._deactivate_my_account() + + # Test the user is no longer ignored by the user that was deactivated + self.assertEqual( + self.get_success(self._store.ignored_by("@sheltie:test")), set() + ) From 36f37acf538ae7b2e9ca9927fd3c561f581d4ee0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 24 Jan 2022 13:37:20 +0000 Subject: [PATCH 142/474] 1.50.2 --- CHANGES.md | 9 +++++++++ changelog.d/11806.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/11806.bugfix diff --git a/CHANGES.md b/CHANGES.md index ced1dcc0db80..b8f3588ec940 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.50.2 (2022-01-24) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) + + Synapse 1.50.1 (2022-01-18) =========================== diff --git a/changelog.d/11806.bugfix b/changelog.d/11806.bugfix deleted file mode 100644 index e4beaf103b04..000000000000 --- a/changelog.d/11806.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 18983f5da672..c790c2187701 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.50.2) stable; urgency=medium + + * New synapse release 1.50.2. + + -- Synapse Packaging team Mon, 24 Jan 2022 13:37:11 +0000 + matrix-synapse-py3 (1.50.1) stable; urgency=medium * New synapse release 1.50.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5ec9f941741a..5ef294cd422e 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.50.1" +__version__ = "1.50.2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 807efd26aec9b65c6a2f02d10fd139095a5b3387 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 24 Jan 2022 08:58:18 -0500 Subject: [PATCH 143/474] Support rendering previews with data: URLs in them (#11767) Images which are data URLs will no longer break URL previews and will properly be "downloaded" and thumbnailed. --- changelog.d/11767.bugfix | 1 + synapse/rest/media/v1/preview_html.py | 31 ++- synapse/rest/media/v1/preview_url_resource.py | 224 +++++++++++++----- .../media/v1/test_html_preview.py} | 34 ++- tests/rest/media/v1/test_url_preview.py | 81 ++++++- tests/server.py | 2 +- 6 files changed, 299 insertions(+), 74 deletions(-) create mode 100644 changelog.d/11767.bugfix rename tests/{test_preview.py => rest/media/v1/test_html_preview.py} (94%) diff --git a/changelog.d/11767.bugfix b/changelog.d/11767.bugfix new file mode 100644 index 000000000000..3e344747f4a2 --- /dev/null +++ b/changelog.d/11767.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when previewing Reddit URLs which do not contain an image. diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index 30b067dd4271..872a9e72e818 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -321,14 +321,33 @@ def _iterate_over_text( def rebase_url(url: str, base: str) -> str: - base_parts = list(urlparse.urlparse(base)) + """ + Resolves a potentially relative `url` against an absolute `base` URL. + + For example: + + >>> rebase_url("subpage", "https://example.com/foo/") + 'https://example.com/foo/subpage' + >>> rebase_url("sibling", "https://example.com/foo") + 'https://example.com/sibling' + >>> rebase_url("/bar", "https://example.com/foo/") + 'https://example.com/bar' + >>> rebase_url("https://alice.com/a/", "https://example.com/foo/") + 'https://alice.com/a' + """ + base_parts = urlparse.urlparse(base) + # Convert the parsed URL to a list for (potential) modification. url_parts = list(urlparse.urlparse(url)) - if not url_parts[0]: # fix up schema - url_parts[0] = base_parts[0] or "http" - if not url_parts[1]: # fix up hostname - url_parts[1] = base_parts[1] + # Add a scheme, if one does not exist. + if not url_parts[0]: + url_parts[0] = base_parts.scheme or "http" + # Fix up the hostname, if this is not a data URL. + if url_parts[0] != "data" and not url_parts[1]: + url_parts[1] = base_parts.netloc + # If the path does not start with a /, nest it under the base path's last + # directory. if not url_parts[2].startswith("/"): - url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts[2]) + url_parts[2] + url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts.path) + url_parts[2] return urlparse.urlunparse(url_parts) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index e8881bc8709e..efd84ced8f54 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -21,8 +21,9 @@ import shutil import sys import traceback -from typing import TYPE_CHECKING, Iterable, Optional, Tuple +from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple from urllib import parse as urlparse +from urllib.request import urlopen import attr @@ -70,6 +71,17 @@ IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY +@attr.s(slots=True, frozen=True, auto_attribs=True) +class DownloadResult: + length: int + uri: str + response_code: int + media_type: str + download_name: Optional[str] + expires: int + etag: Optional[str] + + @attr.s(slots=True, frozen=True, auto_attribs=True) class MediaInfo: """ @@ -256,7 +268,7 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: if oembed_url: url_to_download = oembed_url - media_info = await self._download_url(url_to_download, user) + media_info = await self._handle_url(url_to_download, user) logger.debug("got media_info of '%s'", media_info) @@ -297,7 +309,9 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: oembed_url = self._oembed.autodiscover_from_html(tree) og_from_oembed: JsonDict = {} if oembed_url: - oembed_info = await self._download_url(oembed_url, user) + oembed_info = await self._handle_url( + oembed_url, user, allow_data_urls=True + ) ( og_from_oembed, author_name, @@ -367,7 +381,135 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: return jsonog.encode("utf8") - async def _download_url(self, url: str, user: UserID) -> MediaInfo: + async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult: + """ + Fetches a remote URL and parses the headers. + + Args: + url: The URL to fetch. + output_stream: The stream to write the content to. + + Returns: + A tuple of: + Media length, URL downloaded, the HTTP response code, + the media type, the downloaded file name, the number of + milliseconds the result is valid for, the etag header. + """ + + try: + logger.debug("Trying to get preview for url '%s'", url) + length, headers, uri, code = await self.client.get_file( + url, + output_stream=output_stream, + max_size=self.max_spider_size, + headers={"Accept-Language": self.url_preview_accept_language}, + ) + except SynapseError: + # Pass SynapseErrors through directly, so that the servlet + # handler will return a SynapseError to the client instead of + # blank data or a 500. + raise + except DNSLookupError: + # DNS lookup returned no results + # Note: This will also be the case if one of the resolved IP + # addresses is blacklisted + raise SynapseError( + 502, + "DNS resolution failure during URL preview generation", + Codes.UNKNOWN, + ) + except Exception as e: + # FIXME: pass through 404s and other error messages nicely + logger.warning("Error downloading %s: %r", url, e) + + raise SynapseError( + 500, + "Failed to download content: %s" + % (traceback.format_exception_only(sys.exc_info()[0], e),), + Codes.UNKNOWN, + ) + + if b"Content-Type" in headers: + media_type = headers[b"Content-Type"][0].decode("ascii") + else: + media_type = "application/octet-stream" + + download_name = get_filename_from_headers(headers) + + # FIXME: we should calculate a proper expiration based on the + # Cache-Control and Expire headers. But for now, assume 1 hour. + expires = ONE_HOUR + etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None + + return DownloadResult( + length, uri, code, media_type, download_name, expires, etag + ) + + async def _parse_data_url( + self, url: str, output_stream: BinaryIO + ) -> DownloadResult: + """ + Parses a data: URL. + + Args: + url: The URL to parse. + output_stream: The stream to write the content to. + + Returns: + A tuple of: + Media length, URL downloaded, the HTTP response code, + the media type, the downloaded file name, the number of + milliseconds the result is valid for, the etag header. + """ + + try: + logger.debug("Trying to parse data url '%s'", url) + with urlopen(url) as url_info: + # TODO Can this be more efficient. + output_stream.write(url_info.read()) + except Exception as e: + logger.warning("Error parsing data: URL %s: %r", url, e) + + raise SynapseError( + 500, + "Failed to parse data URL: %s" + % (traceback.format_exception_only(sys.exc_info()[0], e),), + Codes.UNKNOWN, + ) + + return DownloadResult( + # Read back the length that has been written. + length=output_stream.tell(), + uri=url, + # If it was parsed, consider this a 200 OK. + response_code=200, + # urlopen shoves the media-type from the data URL into the content type + # header object. + media_type=url_info.headers.get_content_type(), + # Some features are not supported by data: URLs. + download_name=None, + expires=ONE_HOUR, + etag=None, + ) + + async def _handle_url( + self, url: str, user: UserID, allow_data_urls: bool = False + ) -> MediaInfo: + """ + Fetches content from a URL and parses the result to generate a MediaInfo. + + It uses the media storage provider to persist the fetched content and + stores the mapping into the database. + + Args: + url: The URL to fetch. + user: The user who ahs requested this URL. + allow_data_urls: True if data URLs should be allowed. + + Returns: + A MediaInfo object describing the fetched content. + """ + # TODO: we should probably honour robots.txt... except in practice # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? @@ -377,61 +519,27 @@ async def _download_url(self, url: str, user: UserID) -> MediaInfo: file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) with self.media_storage.store_into_file(file_info) as (f, fname, finish): - try: - logger.debug("Trying to get preview for url '%s'", url) - length, headers, uri, code = await self.client.get_file( - url, - output_stream=f, - max_size=self.max_spider_size, - headers={"Accept-Language": self.url_preview_accept_language}, - ) - except SynapseError: - # Pass SynapseErrors through directly, so that the servlet - # handler will return a SynapseError to the client instead of - # blank data or a 500. - raise - except DNSLookupError: - # DNS lookup returned no results - # Note: This will also be the case if one of the resolved IP - # addresses is blacklisted - raise SynapseError( - 502, - "DNS resolution failure during URL preview generation", - Codes.UNKNOWN, - ) - except Exception as e: - # FIXME: pass through 404s and other error messages nicely - logger.warning("Error downloading %s: %r", url, e) - - raise SynapseError( - 500, - "Failed to download content: %s" - % (traceback.format_exception_only(sys.exc_info()[0], e),), - Codes.UNKNOWN, - ) - await finish() + if url.startswith("data:"): + if not allow_data_urls: + raise SynapseError( + 500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN + ) - if b"Content-Type" in headers: - media_type = headers[b"Content-Type"][0].decode("ascii") + download_result = await self._parse_data_url(url, f) else: - media_type = "application/octet-stream" + download_result = await self._download_url(url, f) - download_name = get_filename_from_headers(headers) - - # FIXME: we should calculate a proper expiration based on the - # Cache-Control and Expire headers. But for now, assume 1 hour. - expires = ONE_HOUR - etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None + await finish() try: time_now_ms = self.clock.time_msec() await self.store.store_local_media( media_id=file_id, - media_type=media_type, + media_type=download_result.media_type, time_now_ms=time_now_ms, - upload_name=download_name, - media_length=length, + upload_name=download_result.download_name, + media_length=download_result.length, user_id=user, url_cache=url, ) @@ -444,16 +552,16 @@ async def _download_url(self, url: str, user: UserID) -> MediaInfo: raise return MediaInfo( - media_type=media_type, - media_length=length, - download_name=download_name, + media_type=download_result.media_type, + media_length=download_result.length, + download_name=download_result.download_name, created_ts_ms=time_now_ms, filesystem_id=file_id, filename=fname, - uri=uri, - response_code=code, - expires=expires, - etag=etag, + uri=download_result.uri, + response_code=download_result.response_code, + expires=download_result.expires, + etag=download_result.etag, ) async def _precache_image_url( @@ -474,8 +582,8 @@ async def _precache_image_url( # FIXME: it might be cleaner to use the same flow as the main /preview_url # request itself and benefit from the same caching etc. But for now we # just rely on the caching on the master request to speed things up. - image_info = await self._download_url( - rebase_url(og["og:image"], media_info.uri), user + image_info = await self._handle_url( + rebase_url(og["og:image"], media_info.uri), user, allow_data_urls=True ) if _is_media(image_info.media_type): diff --git a/tests/test_preview.py b/tests/rest/media/v1/test_html_preview.py similarity index 94% rename from tests/test_preview.py rename to tests/rest/media/v1/test_html_preview.py index 46e02f483fef..a4b57e3d1feb 100644 --- a/tests/test_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -16,10 +16,11 @@ _get_html_media_encodings, decode_body, parse_html_to_open_graph, + rebase_url, summarize_paragraphs, ) -from . import unittest +from tests import unittest try: import lxml @@ -447,3 +448,34 @@ def test_unknown_invalid(self): 'text/html; charset="invalid"', ) self.assertEqual(list(encodings), ["utf-8", "cp1252"]) + + +class RebaseUrlTestCase(unittest.TestCase): + def test_relative(self): + """Relative URLs should be resolved based on the context of the base URL.""" + self.assertEqual( + rebase_url("subpage", "https://example.com/foo/"), + "https://example.com/foo/subpage", + ) + self.assertEqual( + rebase_url("sibling", "https://example.com/foo"), + "https://example.com/sibling", + ) + self.assertEqual( + rebase_url("/bar", "https://example.com/foo/"), + "https://example.com/bar", + ) + + def test_absolute(self): + """Absolute URLs should not be modified.""" + self.assertEqual( + rebase_url("https://alice.com/a/", "https://example.com/foo/"), + "https://alice.com/a/", + ) + + def test_data(self): + """Data URLs should not be modified.""" + self.assertEqual( + rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"), + "data:,Hello%2C%20World%21", + ) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 16e904f15b45..53f618621305 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -12,9 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import base64 import json import os import re +from urllib.parse import urlencode from twisted.internet._resolver import HostResolution from twisted.internet.address import IPv4Address, IPv6Address @@ -23,6 +25,7 @@ from synapse.config.oembed import OEmbedEndpointConfig from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS +from synapse.types import JsonDict from synapse.util.stringutils import parse_and_validate_mxc_uri from tests import unittest @@ -142,6 +145,14 @@ def resolveHostName( def create_test_resource(self): return self.hs.get_media_repository_resource() + def _assert_small_png(self, json_body: JsonDict) -> None: + """Assert properties from the SMALL_PNG test image.""" + self.assertTrue(json_body["og:image"].startswith("mxc://")) + self.assertEqual(json_body["og:image:height"], 1) + self.assertEqual(json_body["og:image:width"], 1) + self.assertEqual(json_body["og:image:type"], "image/png") + self.assertEqual(json_body["matrix:image:size"], 67) + def test_cache_returns_correct_type(self): self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] @@ -569,6 +580,66 @@ def test_accept_language_config_option(self): server.data, ) + def test_data_url(self): + """ + Requesting to preview a data URL is not supported. + """ + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + + data = base64.b64encode(SMALL_PNG).decode() + + query_params = urlencode( + { + "url": f'' + } + ) + + channel = self.make_request( + "GET", + f"preview_url?{query_params}", + shorthand=False, + ) + self.pump() + + self.assertEqual(channel.code, 500) + + def test_inline_data_url(self): + """ + An inline image (as a data URL) should be parsed properly. + """ + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + + data = base64.b64encode(SMALL_PNG) + + end_content = ( + b"" b'' b"" + ) % (data,) + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(end_content),) + + end_content + ) + + self.pump() + self.assertEqual(channel.code, 200) + self._assert_small_png(channel.json_body) + def test_oembed_photo(self): """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL.""" self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")] @@ -626,10 +697,7 @@ def test_oembed_photo(self): self.assertEqual(channel.code, 200) body = channel.json_body self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345") - self.assertTrue(body["og:image"].startswith("mxc://")) - self.assertEqual(body["og:image:height"], 1) - self.assertEqual(body["og:image:width"], 1) - self.assertEqual(body["og:image:type"], "image/png") + self._assert_small_png(body) def test_oembed_rich(self): """Test an oEmbed endpoint which returns HTML content via the 'rich' type.""" @@ -820,10 +888,7 @@ def test_oembed_autodiscovery(self): self.assertEqual( body["og:url"], "http://www.twitter.com/matrixdotorg/status/12345" ) - self.assertTrue(body["og:image"].startswith("mxc://")) - self.assertEqual(body["og:image:height"], 1) - self.assertEqual(body["og:image:width"], 1) - self.assertEqual(body["og:image:type"], "image/png") + self._assert_small_png(body) def _download_image(self): """Downloads an image into the URL cache. diff --git a/tests/server.py b/tests/server.py index a0cd14ea45b4..82990c2eb9df 100644 --- a/tests/server.py +++ b/tests/server.py @@ -313,7 +313,7 @@ def make_request( req = request(channel, site) req.content = BytesIO(content) # Twisted expects to be at the end of the content when parsing the request. - req.content.seek(SEEK_END) + req.content.seek(0, SEEK_END) if access_token: req.requestHeaders.addRawHeader( From 02d99f044efbed3c347c19a430d56cfbb41c87a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 24 Jan 2022 09:38:37 -0500 Subject: [PATCH 144/474] Apply a timeout to reading the body when fetching a file. (#11784) This prevents the URL preview code from reading a stream forever. --- changelog.d/11784.bugfix | 1 + synapse/http/client.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11784.bugfix diff --git a/changelog.d/11784.bugfix b/changelog.d/11784.bugfix new file mode 100644 index 000000000000..6569a8c2997a --- /dev/null +++ b/changelog.d/11784.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. diff --git a/synapse/http/client.py b/synapse/http/client.py index ca33b45cb21f..743a7ffcb159 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -731,15 +731,24 @@ async def get_file( # straight back in again try: - length = await make_deferred_yieldable( - read_body_with_max_size(response, output_stream, max_size) - ) + d = read_body_with_max_size(response, output_stream, max_size) + + # Ensure that the body is not read forever. + d = timeout_deferred(d, 30, self.hs.get_reactor()) + + length = await make_deferred_yieldable(d) except BodyExceededMaxSize: raise SynapseError( HTTPStatus.BAD_GATEWAY, "Requested file is too large > %r bytes" % (max_size,), Codes.TOO_LARGE, ) + except defer.TimeoutError: + raise SynapseError( + HTTPStatus.BAD_GATEWAY, + "Requested file took too long to download", + Codes.TOO_LARGE, + ) except Exception as e: raise SynapseError( HTTPStatus.BAD_GATEWAY, ("Failed to download remote body: %s" % e) From 2d327d25bf5d48460714ca8860dca881b6bfa72c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 Jan 2022 18:31:23 +0000 Subject: [PATCH 145/474] Skip the initial amd64-only Docker build (#11810) PyNaCl's recent 1.5.0 release on PyPi includes arm64 wheels, which means our arm64 docker images now build in a sensible amount of time, so we can skip the amd64-only build. --- .github/workflows/docker.yml | 14 ++------------ changelog.d/11810.misc | 1 + 2 files changed, 3 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11810.misc diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3276d1e122e2..124b17458f45 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -34,6 +34,8 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + # TODO: consider using https://github.com/docker/metadata-action instead of this + # custom magic - name: Calculate docker image tag id: set-tag run: | @@ -53,18 +55,6 @@ jobs: esac echo "::set-output name=tag::$tag" - # for release builds, we want to get the amd64 image out asap, so first - # we do an amd64-only build, before following up with a multiarch build. - - name: Build and push amd64 - uses: docker/build-push-action@v2 - if: "${{ startsWith(github.ref, 'refs/tags/v') }}" - with: - push: true - labels: "gitsha1=${{ github.sha }}" - tags: "matrixdotorg/synapse:${{ steps.set-tag.outputs.tag }}" - file: "docker/Dockerfile" - platforms: linux/amd64 - - name: Build and push all platforms uses: docker/build-push-action@v2 with: diff --git a/changelog.d/11810.misc b/changelog.d/11810.misc new file mode 100644 index 000000000000..5579b85979b1 --- /dev/null +++ b/changelog.d/11810.misc @@ -0,0 +1 @@ +Docker: skip the initial amd64-only build and go straight to multiarch. From 15c2a6a1067f57707688cc59f2efa7ff0000dcd2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 25 Jan 2022 06:07:10 -0500 Subject: [PATCH 146/474] Ignore the jsonschema type. (#11817) --- changelog.d/11817.misc | 1 + synapse/events/validator.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11817.misc diff --git a/changelog.d/11817.misc b/changelog.d/11817.misc new file mode 100644 index 000000000000..bd29d8d6ebf3 --- /dev/null +++ b/changelog.d/11817.misc @@ -0,0 +1 @@ +Compatibility with updated type hints for jsonschema 4.4.0. diff --git a/synapse/events/validator.py b/synapse/events/validator.py index cf8693496846..424557301708 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -246,7 +246,9 @@ def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None: # This could return something newer than Draft 7, but that's the current "latest" # validator. -def _create_power_level_validator() -> jsonschema.Draft7Validator: +# +# See https://github.com/python/typeshed/issues/7028 for the ignored return type. +def _create_power_level_validator() -> jsonschema.Draft7Validator: # type: ignore[valid-type] validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA) # by default jsonschema does not consider a frozendict to be an object so From 874365fc05349d5fbbc613eea25ddca6e1b41f35 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 25 Jan 2022 11:30:02 +0000 Subject: [PATCH 147/474] 1.51.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ce3d657c98cc..aebda124562f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.51.0 (2022-01-25) +=========================== + +No significant changes since 1.51.0rc2. + + Synapse 1.51.0rc2 (2022-01-24) ============================== diff --git a/debian/changelog b/debian/changelog index a07bf4f2efb1..7b51a0b2d6fa 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.51.0) stable; urgency=medium + + * New synapse release 1.51.0. + + -- Synapse Packaging team Tue, 25 Jan 2022 11:28:51 +0000 + matrix-synapse-py3 (1.51.0~rc2) stable; urgency=medium * New synapse release 1.51.0~rc2. diff --git a/synapse/__init__.py b/synapse/__init__.py index ef859c9deddb..26bdfec33ae3 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.51.0rc2" +__version__ = "1.51.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 6e9e923ed5c038f845621e99f9e20813438c1f7e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 25 Jan 2022 11:41:31 +0000 Subject: [PATCH 148/474] Call out deprecation --- CHANGES.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index aebda124562f..651d1c5a6d78 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,7 @@ Synapse 1.51.0 (2022-01-25) No significant changes since 1.51.0rc2. +Synapse 1.51 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. Synapse 1.51.0rc2 (2022-01-24) ============================== @@ -61,7 +62,7 @@ Deprecations and Removals - Remove the unstable `/send_relation` endpoint. ([\#11682](https://github.com/matrix-org/synapse/issues/11682)) - Remove `python_twisted_reactor_pending_calls` Prometheus metric. ([\#11724](https://github.com/matrix-org/synapse/issues/11724)) - Remove the `password_hash` field from the response dictionaries of the [Users Admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html). ([\#11576](https://github.com/matrix-org/synapse/issues/11576)) -- Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783)) +- **Deprecate support for `webclient` listeners and non-HTTP(S) `web_client_location` configuration. ([\#11774](https://github.com/matrix-org/synapse/issues/11774), [\#11783](https://github.com/matrix-org/synapse/issues/11783))** Internal Changes From 343d4f13d8cb9f813097267216eaa0a74935b4ca Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 25 Jan 2022 11:42:32 +0000 Subject: [PATCH 149/474] Correct version number --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 651d1c5a6d78..d3dcd0ed2ef4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ Synapse 1.51.0 (2022-01-25) No significant changes since 1.51.0rc2. -Synapse 1.51 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. +Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. Synapse 1.51.0rc2 (2022-01-24) ============================== From 0d6cfea9b867a14fa0fa885b04c8cbfdb4a7c4a9 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 25 Jan 2022 13:06:29 +0100 Subject: [PATCH 150/474] Add admin API to reset connection timeouts for remote server (#11639) * Fix get federation status of destination if no error occured --- changelog.d/11639.feature | 1 + .../administration/admin_api/federation.md | 40 +++++++++++++- .../federation/transport/server/__init__.py | 16 +++--- synapse/federation/transport/server/_base.py | 14 +++-- .../federation/transport/server/federation.py | 24 ++++++-- .../transport/server/groups_local.py | 8 ++- .../transport/server/groups_server.py | 8 ++- synapse/rest/admin/__init__.py | 6 +- synapse/rest/admin/federation.py | 44 ++++++++++++++- tests/rest/admin/test_federation.py | 55 +++++++++++++++++-- 10 files changed, 183 insertions(+), 33 deletions(-) create mode 100644 changelog.d/11639.feature diff --git a/changelog.d/11639.feature b/changelog.d/11639.feature new file mode 100644 index 000000000000..e9f6704f7a1f --- /dev/null +++ b/changelog.d/11639.feature @@ -0,0 +1 @@ +Add admin API to reset connection timeouts for remote server. \ No newline at end of file diff --git a/docs/usage/administration/admin_api/federation.md b/docs/usage/administration/admin_api/federation.md index 8f9535f57b09..5e609561a64d 100644 --- a/docs/usage/administration/admin_api/federation.md +++ b/docs/usage/administration/admin_api/federation.md @@ -86,7 +86,7 @@ The following fields are returned in the JSON response body: - `next_token`: string representing a positive integer - Indication for pagination. See above. - `total` - integer - Total number of destinations. -# Destination Details API +## Destination Details API This API gets the retry timing info for a specific remote server. @@ -108,7 +108,45 @@ A response body like the following is returned: } ``` +**Parameters** + +The following parameters should be set in the URL: + +- `destination` - Name of the remote server. + **Response** The response fields are the same like in the `destinations` array in [List of destinations](#list-of-destinations) response. + +## Reset connection timeout + +Synapse makes federation requests to other homeservers. If a federation request fails, +Synapse will mark the destination homeserver as offline, preventing any future requests +to that server for a "cooldown" period. This period grows over time if the server +continues to fail its responses +([exponential backoff](https://en.wikipedia.org/wiki/Exponential_backoff)). + +Admins can cancel the cooldown period with this API. + +This API resets the retry timing for a specific remote server and tries to connect to +the remote server again. It does not wait for the next `retry_interval`. +The connection must have previously run into an error and `retry_last_ts` +([Destination Details API](#destination-details-api)) must not be equal to `0`. + +The connection attempt is carried out in the background and can take a while +even if the API already returns the http status 200. + +The API is: + +``` +POST /_synapse/admin/v1/federation/destinations//reset_connection + +{} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `destination` - Name of the remote server. diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 77b936361a4a..db4fe2c79803 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Dict, Iterable, List, Optional, Tuple, Type +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Type from typing_extensions import Literal @@ -36,17 +36,19 @@ parse_integer_from_args, parse_string_from_args, ) -from synapse.server import HomeServer from synapse.types import JsonDict, ThirdPartyInstanceID from synapse.util.ratelimitutils import FederationRateLimiter +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class TransportLayerServer(JsonResource): """Handles incoming federation HTTP requests""" - def __init__(self, hs: HomeServer, servlet_groups: Optional[List[str]] = None): + def __init__(self, hs: "HomeServer", servlet_groups: Optional[List[str]] = None): """Initialize the TransportLayerServer Will by default register all servlets. For custom behaviour, pass in @@ -113,7 +115,7 @@ class PublicRoomList(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, @@ -203,7 +205,7 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, @@ -251,7 +253,7 @@ class OpenIdUserInfo(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, @@ -297,7 +299,7 @@ async def on_GET( def register_servlets( - hs: HomeServer, + hs: "HomeServer", resource: HttpServer, authenticator: Authenticator, ratelimiter: FederationRateLimiter, diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index da1fbf8b6361..2ca7c0583501 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -15,7 +15,7 @@ import functools import logging import re -from typing import Any, Awaitable, Callable, Optional, Tuple, cast +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, cast from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.urls import FEDERATION_V1_PREFIX @@ -29,11 +29,13 @@ start_active_span_follows_from, whitelisted_homeserver, ) -from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import parse_and_validate_server_name +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -46,7 +48,7 @@ class NoAuthenticationError(AuthenticationError): class Authenticator: - def __init__(self, hs: HomeServer): + def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname @@ -114,11 +116,11 @@ async def authenticate_request( # alive retry_timings = await self.store.get_destination_retry_timings(origin) if retry_timings and retry_timings.retry_last_ts: - run_in_background(self._reset_retry_timings, origin) + run_in_background(self.reset_retry_timings, origin) return origin - async def _reset_retry_timings(self, origin: str) -> None: + async def reset_retry_timings(self, origin: str) -> None: try: logger.info("Marking origin %r as up", origin) await self.store.set_destination_retry_timings(origin, None, 0, 0) @@ -227,7 +229,7 @@ class BaseFederationServlet: def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index beadfa422ba3..9c1ad5851f69 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -12,7 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + TYPE_CHECKING, + Dict, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) from typing_extensions import Literal @@ -30,11 +40,13 @@ parse_string_from_args, parse_strings_from_args, ) -from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) issue_8631_logger = logging.getLogger("synapse.8631_debug") @@ -47,7 +59,7 @@ class BaseFederationServerServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, @@ -596,7 +608,7 @@ class FederationSpaceSummaryServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, @@ -670,7 +682,7 @@ class FederationRoomHierarchyServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, @@ -706,7 +718,7 @@ class RoomComplexityServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, diff --git a/synapse/federation/transport/server/groups_local.py b/synapse/federation/transport/server/groups_local.py index a12cd18d5806..496472e1dcd8 100644 --- a/synapse/federation/transport/server/groups_local.py +++ b/synapse/federation/transport/server/groups_local.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple, Type +from typing import TYPE_CHECKING, Dict, List, Tuple, Type from synapse.api.errors import SynapseError from synapse.federation.transport.server._base import ( @@ -19,10 +19,12 @@ BaseFederationServlet, ) from synapse.handlers.groups_local import GroupsLocalHandler -from synapse.server import HomeServer from synapse.types import JsonDict, get_domain_from_id from synapse.util.ratelimitutils import FederationRateLimiter +if TYPE_CHECKING: + from synapse.server import HomeServer + class BaseGroupsLocalServlet(BaseFederationServlet): """Abstract base class for federation servlet classes which provides a groups local handler. @@ -32,7 +34,7 @@ class BaseGroupsLocalServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, diff --git a/synapse/federation/transport/server/groups_server.py b/synapse/federation/transport/server/groups_server.py index b30e92a5eb74..851b50152ec5 100644 --- a/synapse/federation/transport/server/groups_server.py +++ b/synapse/federation/transport/server/groups_server.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple, Type +from typing import TYPE_CHECKING, Dict, List, Tuple, Type from typing_extensions import Literal @@ -22,10 +22,12 @@ BaseFederationServlet, ) from synapse.http.servlet import parse_string_from_args -from synapse.server import HomeServer from synapse.types import JsonDict, get_domain_from_id from synapse.util.ratelimitutils import FederationRateLimiter +if TYPE_CHECKING: + from synapse.server import HomeServer + class BaseGroupsServerServlet(BaseFederationServlet): """Abstract base class for federation servlet classes which provides a groups server handler. @@ -35,7 +37,7 @@ class BaseGroupsServerServlet(BaseFederationServlet): def __init__( self, - hs: HomeServer, + hs: "HomeServer", authenticator: Authenticator, ratelimiter: FederationRateLimiter, server_name: str, diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 465e06772b26..b1e49d51b7de 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -41,7 +41,8 @@ EventReportsRestServlet, ) from synapse.rest.admin.federation import ( - DestinationsRestServlet, + DestinationResetConnectionRestServlet, + DestinationRestServlet, ListDestinationsRestServlet, ) from synapse.rest.admin.groups import DeleteGroupAdminRestServlet @@ -267,7 +268,8 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ListRegistrationTokensRestServlet(hs).register(http_server) NewRegistrationTokenRestServlet(hs).register(http_server) RegistrationTokenRestServlet(hs).register(http_server) - DestinationsRestServlet(hs).register(http_server) + DestinationResetConnectionRestServlet(hs).register(http_server) + DestinationRestServlet(hs).register(http_server) ListDestinationsRestServlet(hs).register(http_server) # Some servlets only get registered for the main process. diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index 8cd3fa189e8d..0f33f9e4da88 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -16,6 +16,7 @@ from typing import TYPE_CHECKING, Tuple from synapse.api.errors import Codes, NotFoundError, SynapseError +from synapse.federation.transport.server import Authenticator from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin @@ -90,7 +91,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return HTTPStatus.OK, response -class DestinationsRestServlet(RestServlet): +class DestinationRestServlet(RestServlet): """Get details of a destination. This needs user to have administrator access in Synapse. @@ -145,3 +146,44 @@ async def on_GET( } return HTTPStatus.OK, response + + +class DestinationResetConnectionRestServlet(RestServlet): + """Reset destinations' connection timeouts and wake it up. + This needs user to have administrator access in Synapse. + + POST /_synapse/admin/v1/federation/destinations//reset_connection + {} + + returns: + 200 OK otherwise an error. + """ + + PATTERNS = admin_patterns( + "/federation/destinations/(?P[^/]+)/reset_connection$" + ) + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastore() + self._authenticator = Authenticator(hs) + + async def on_POST( + self, request: SynapseRequest, destination: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + if not await self._store.is_destination_known(destination): + raise NotFoundError("Unknown destination") + + retry_timings = await self._store.get_destination_retry_timings(destination) + if not (retry_timings and retry_timings.retry_last_ts): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The retry timing does not need to be reset for this destination.", + ) + + # reset timings and wake up + await self._authenticator.reset_retry_timings(destination) + + return HTTPStatus.OK, {} diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index b70350b6f1d7..e2d3cff2a3d8 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -43,11 +43,15 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @parameterized.expand( [ - ("/_synapse/admin/v1/federation/destinations",), - ("/_synapse/admin/v1/federation/destinations/dummy",), + ("GET", "/_synapse/admin/v1/federation/destinations"), + ("GET", "/_synapse/admin/v1/federation/destinations/dummy"), + ( + "POST", + "/_synapse/admin/v1/federation/destinations/dummy/reset_connection", + ), ] ) - def test_requester_is_no_admin(self, url: str) -> None: + def test_requester_is_no_admin(self, method: str, url: str) -> None: """ If the user is not a server admin, an error 403 is returned. """ @@ -56,7 +60,7 @@ def test_requester_is_no_admin(self, url: str) -> None: other_user_tok = self.login("user", "pass") channel = self.make_request( - "GET", + method, url, content={}, access_token=other_user_tok, @@ -120,6 +124,16 @@ def test_invalid_parameter(self) -> None: self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + # invalid destination + channel = self.make_request( + "POST", + self.url + "/dummy/reset_connection", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + def test_limit(self) -> None: """ Testing list of destinations with limit @@ -444,6 +458,39 @@ def test_get_single_destination_no_retry_timings(self) -> None: self.assertIsNone(channel.json_body["failure_ts"]) self.assertIsNone(channel.json_body["last_successful_stream_ordering"]) + def test_destination_reset_connection(self) -> None: + """Reset timeouts and wake up destination.""" + self._create_destination("sub0.example.com", 100, 100, 100) + + channel = self.make_request( + "POST", + self.url + "/sub0.example.com/reset_connection", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + + retry_timings = self.get_success( + self.store.get_destination_retry_timings("sub0.example.com") + ) + self.assertIsNone(retry_timings) + + def test_destination_reset_connection_not_required(self) -> None: + """Try to reset timeouts of a destination with no timeouts and get an error.""" + self._create_destination("sub0.example.com", None, 0, 0) + + channel = self.make_request( + "POST", + self.url + "/sub0.example.com/reset_connection", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual( + "The retry timing does not need to be reset for this destination.", + channel.json_body["error"], + ) + def _create_destination( self, destination: str, From 105fbce55c4fe33298e768d8becee249f5f500b8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 25 Jan 2022 12:28:30 +0000 Subject: [PATCH 151/474] Point to upgrade notes in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index d3dcd0ed2ef4..1334bd68fb38 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ Synapse 1.51.0 (2022-01-25) No significant changes since 1.51.0rc2. -Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. +Synapse 1.51.0 deprecates `webclient` listeners and non-HTTP(S) `web_client_location`s. Support for these will be removed in Synapse 1.53.0, at which point Synapse will not be capable of directly serving a web client for Matrix. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1510). Synapse 1.51.0rc2 (2022-01-24) ============================== From 4e09d727b699c3bdf6b0288e9436f472631c01b8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 25 Jan 2022 12:53:29 +0000 Subject: [PATCH 152/474] Use changelog from develop It had already accounted for 1.50.2 (ordered chronologically rather than sem-ver-ically); it just seems this wasn't merged into master when we released 1.50.2. --- CHANGES.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ba76b5017e9b..37b9e6bb9613 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,6 +14,17 @@ Bugfixes - Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) +Synapse 1.50.2 (2022-01-24) +=========================== + +This release includes the same bugfix as Synapse 1.51.0rc2. + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) + + Synapse 1.51.0rc1 (2022-01-21) ============================== @@ -89,15 +100,6 @@ Internal Changes - Add some comments and type annotations for `_update_outliers_txn`. ([\#11776](https://github.com/matrix-org/synapse/issues/11776)) -Synapse 1.50.2 (2022-01-24) -=========================== - -Bugfixes --------- - -- Backport the sole fix from v1.51.0rc2. This fixes a bug introduced in Synapse 1.40.0 that caused Synapse to fail to process incoming federation traffic after handling a large amount of events in a v1 room. ([\#11806](https://github.com/matrix-org/synapse/issues/11806)) - - Synapse 1.50.1 (2022-01-18) =========================== From 4210143f535d0bb0df5e3836bb3f6b0857631b46 Mon Sep 17 00:00:00 2001 From: Forest Johnson Date: Tue, 25 Jan 2022 14:09:56 +0000 Subject: [PATCH 153/474] Docs: add missing PR submission process how-tos (#11821) * Docs: add missing PR submission process how-tos The documentation says that in order to submit a pull request you have to run the linter and links to [Run the linters](https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-linters). IMO "Run the linters" should explain that development dependencies are a pre-requisite. I also included `pip install wheel` which I had to run inside my virtual environment on ubuntu before I `pip install -e ".[all,dev]"` would succeed. --- changelog.d/11821.doc | 1 + docs/development/contributing_guide.md | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11821.doc diff --git a/changelog.d/11821.doc b/changelog.d/11821.doc new file mode 100644 index 000000000000..a16a6ef956e2 --- /dev/null +++ b/changelog.d/11821.doc @@ -0,0 +1 @@ +Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index c14298169351..8448685952dc 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -55,6 +55,7 @@ setup a *virtualenv*, as follows: cd path/where/you/have/cloned/the/repository python3 -m venv ./env source ./env/bin/activate +pip install wheel pip install -e ".[all,dev]" pip install tox ``` @@ -116,7 +117,7 @@ The linters look at your code and do two things: - ensure that your code follows the coding style adopted by the project; - catch a number of errors in your code. -They're pretty fast, don't hesitate! +The linter takes no time at all to run as soon as you've [downloaded the dependencies into your python virtual environment](#4-install-the-dependencies). ```sh source ./env/bin/activate From fc8598bc87d5bcc7e8526492f309e73c8dcff3f6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 25 Jan 2022 14:11:13 +0000 Subject: [PATCH 154/474] Minor updates, and docs, for schema delta files (#11823) * Make functions in python deltas optional It's annoying to always have to write stubs for these. * Documentation for delta files * changelog --- changelog.d/11823.misc | 1 + docs/development/database_schema.md | 54 +++++++++++++++++++++++++++++ synapse/storage/prepare_database.py | 9 +++-- 3 files changed, 61 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11823.misc diff --git a/changelog.d/11823.misc b/changelog.d/11823.misc new file mode 100644 index 000000000000..2d153eae4ade --- /dev/null +++ b/changelog.d/11823.misc @@ -0,0 +1 @@ +Minor updates and documentation for database schema delta files. diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index 256a62921059..a767d3af9fd3 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -96,6 +96,60 @@ Ensure postgres is installed, then run: NB at the time of writing, this script predates the split into separate `state`/`main` databases so will require updates to handle that correctly. +## Delta files + +Delta files define the steps required to upgrade the database from an earlier version. +They can be written as either a file containing a series of SQL statements, or a Python +module. + +Synapse remembers which delta files it has applied to a database (they are stored in the +`applied_schema_deltas` table) and will not re-apply them (even if a given file is +subsequently updated). + +Delta files should be placed in a directory named `synapse/storage/schema//delta//`. +They are applied in alphanumeric order, so by convention the first two characters +of the filename should be an integer such as `01`, to put the file in the right order. + +### SQL delta files + +These should be named `*.sql`, or — for changes which should only be applied for a +given database engine — `*.sql.posgres` or `*.sql.sqlite`. For example, a delta which +adds a new column to the `foo` table might be called `01add_bar_to_foo.sql`. + +Note that our SQL parser is a bit simple - it understands comments (`--` and `/*...*/`), +but complex statements which require a `;` in the middle of them (such as `CREATE +TRIGGER`) are beyond it and you'll have to use a Python delta file. + +### Python delta files + +For more flexibility, a delta file can take the form of a python module. These should +be named `*.py`. Note that database-engine-specific modules are not supported here – +instead you can write `if isinstance(database_engine, PostgresEngine)` or similar. + +A Python delta module should define either or both of the following functions: + +```python +import synapse.config.homeserver +import synapse.storage.engines +import synapse.storage.types + + +def run_create( + cur: synapse.storage.types.Cursor, + database_engine: synapse.storage.engines.BaseDatabaseEngine, +) -> None: + """Called whenever an existing or new database is to be upgraded""" + ... + +def run_upgrade( + cur: synapse.storage.types.Cursor, + database_engine: synapse.storage.engines.BaseDatabaseEngine, + config: synapse.config.homeserver.HomeServerConfig, +) -> None: + """Called whenever an existing database is to be upgraded.""" + ... +``` + ## Boolean columns Boolean columns require special treatment, since SQLite treats booleans the diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 1823e1872049..e3153d1a4aa9 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -499,9 +499,12 @@ def _upgrade_existing_database( module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) # type: ignore - logger.info("Running script %s", relative_path) - module.run_create(cur, database_engine) # type: ignore - if not is_empty: + if hasattr(module, "run_create"): + logger.info("Running %s:run_create", relative_path) + module.run_create(cur, database_engine) # type: ignore + + if not is_empty and hasattr(module, "run_upgrade"): + logger.info("Running %s:run_upgrade", relative_path) module.run_upgrade(cur, database_engine, config=config) # type: ignore elif ext == ".pyc" or file_name == "__pycache__": # Sometimes .pyc files turn up anyway even though we've From b59d285f7c2ffce56a273686e63bcb34a461317b Mon Sep 17 00:00:00 2001 From: Nick Barrett Date: Tue, 25 Jan 2022 14:14:46 +0000 Subject: [PATCH 155/474] Db txn set isolation level (#11799) Co-authored-by: Brendan Abolivier --- changelog.d/11799.misc | 1 + synapse/storage/database.py | 10 ++++++++++ synapse/storage/engines/_base.py | 19 ++++++++++++++++++- synapse/storage/engines/postgres.py | 29 +++++++++++++++++++++++++---- synapse/storage/engines/sqlite.py | 7 +++++++ 5 files changed, 61 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11799.misc diff --git a/changelog.d/11799.misc b/changelog.d/11799.misc new file mode 100644 index 000000000000..5c3b2bcaf4e5 --- /dev/null +++ b/changelog.d/11799.misc @@ -0,0 +1 @@ +Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 57cc1d76e02f..7455326ed331 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -702,6 +702,7 @@ async def runInteraction( func: Callable[..., R], *args: Any, db_autocommit: bool = False, + isolation_level: Optional[int] = None, **kwargs: Any, ) -> R: """Starts a transaction on the database and runs a given function @@ -724,6 +725,7 @@ async def runInteraction( called multiple times if the transaction is retried, so must correctly handle that case. + isolation_level: Set the server isolation level for this transaction. args: positional args to pass to `func` kwargs: named args to pass to `func` @@ -763,6 +765,7 @@ async def runWithConnection( func: Callable[..., R], *args: Any, db_autocommit: bool = False, + isolation_level: Optional[int] = None, **kwargs: Any, ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -775,6 +778,7 @@ async def runWithConnection( db_autocommit: Whether to run the function in "autocommit" mode, i.e. outside of a transaction. This is useful for transaction that are only a single query. Currently only affects postgres. + isolation_level: Set the server isolation level for this transaction. kwargs: named args to pass to `func` Returns: @@ -834,6 +838,10 @@ def inner_func(conn, *args, **kwargs): try: if db_autocommit: self.engine.attempt_to_set_autocommit(conn, True) + if isolation_level is not None: + self.engine.attempt_to_set_isolation_level( + conn, isolation_level + ) db_conn = LoggingDatabaseConnection( conn, self.engine, "runWithConnection" @@ -842,6 +850,8 @@ def inner_func(conn, *args, **kwargs): finally: if db_autocommit: self.engine.attempt_to_set_autocommit(conn, False) + if isolation_level: + self.engine.attempt_to_set_isolation_level(conn, None) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 20cd63c3300a..143cd98ca292 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -12,11 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import abc -from typing import Generic, TypeVar +from enum import IntEnum +from typing import Generic, Optional, TypeVar from synapse.storage.types import Connection +class IsolationLevel(IntEnum): + READ_COMMITTED: int = 1 + REPEATABLE_READ: int = 2 + SERIALIZABLE: int = 3 + + class IncorrectDatabaseSetup(RuntimeError): pass @@ -109,3 +116,13 @@ def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): commit/rollback the connections. """ ... + + @abc.abstractmethod + def attempt_to_set_isolation_level( + self, conn: Connection, isolation_level: Optional[int] + ): + """Attempt to set the connections isolation level. + + Note: This has no effect on SQLite3, as transactions are SERIALIZABLE by default. + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index b3d71f661c03..808342fafbbc 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -13,8 +13,13 @@ # limitations under the License. import logging +from typing import Mapping, Optional -from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.engines._base import ( + BaseDatabaseEngine, + IncorrectDatabaseSetup, + IsolationLevel, +) from synapse.storage.types import Connection logger = logging.getLogger(__name__) @@ -34,6 +39,15 @@ def _disable_bytes_adapter(_): self.synchronous_commit = database_config.get("synchronous_commit", True) self._version = None # unknown as yet + self.isolation_level_map: Mapping[int, int] = { + IsolationLevel.READ_COMMITTED: self.module.extensions.ISOLATION_LEVEL_READ_COMMITTED, + IsolationLevel.REPEATABLE_READ: self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ, + IsolationLevel.SERIALIZABLE: self.module.extensions.ISOLATION_LEVEL_SERIALIZABLE, + } + self.default_isolation_level = ( + self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ + ) + @property def single_threaded(self) -> bool: return False @@ -104,9 +118,7 @@ def convert_param_style(self, sql): return sql.replace("?", "%s") def on_new_connection(self, db_conn): - db_conn.set_isolation_level( - self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ - ) + db_conn.set_isolation_level(self.default_isolation_level) # Set the bytea output to escape, vs the default of hex cursor = db_conn.cursor() @@ -175,3 +187,12 @@ def in_transaction(self, conn: Connection) -> bool: def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): return conn.set_session(autocommit=autocommit) # type: ignore + + def attempt_to_set_isolation_level( + self, conn: Connection, isolation_level: Optional[int] + ): + if isolation_level is None: + isolation_level = self.default_isolation_level + else: + isolation_level = self.isolation_level_map[isolation_level] + return conn.set_isolation_level(isolation_level) # type: ignore diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 70d17d4f2cd8..6c19e55999bd 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -15,6 +15,7 @@ import struct import threading import typing +from typing import Optional from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Connection @@ -122,6 +123,12 @@ def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): # set the connection to autocommit mode. pass + def attempt_to_set_isolation_level( + self, conn: Connection, isolation_level: Optional[int] + ): + # All transactions are SERIALIZABLE by default in sqllite + pass + # Following functions taken from: https://github.com/coleifer/peewee From 1d5f7b2cc622a6ed91b9bf5b61c1c243b015c495 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 25 Jan 2022 14:35:35 +0000 Subject: [PATCH 156/474] Log modules at startup (#11813) --- changelog.d/11813.misc | 1 + synapse/app/_base.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11813.misc diff --git a/changelog.d/11813.misc b/changelog.d/11813.misc new file mode 100644 index 000000000000..f90d183b45cb --- /dev/null +++ b/changelog.d/11813.misc @@ -0,0 +1 @@ +Log module names at startup. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index e5ee03b79ff9..9efdd071cce3 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -435,7 +435,8 @@ def run_sighup(*args: Any, **kwargs: Any) -> None: # before we start the listeners. module_api = hs.get_module_api() for module, config in hs.config.modules.loaded_modules: - module(config=config, api=module_api) + m = module(config=config, api=module_api) + logger.info("Loaded module %s", m) load_legacy_spam_checkers(hs) load_legacy_third_party_event_rules(hs) From 0938f32e937f6e37002fc87c711ec7a5515eb880 Mon Sep 17 00:00:00 2001 From: kegsay Date: Tue, 25 Jan 2022 15:05:22 +0000 Subject: [PATCH 157/474] CI: run Complement on the VM, not inside Docker (#11811) * CI: run Complement on the VM, not inside Docker This requires https://github.com/matrix-org/complement/pull/289 We now run Complement on the VM instead of inside a Docker container. This is to allow Complement to bind to any high-numbered port when it starts up its own federation servers. We want to do this to allow for more concurrency when running complement tests. Previously, Complement only ever bound to `:8448` when running its own federation server. This prevented multiple federation tests running at the same time as they would fight each other on the port. This did however allow Complement to run in Docker, as the host could just port forward `:8448` to allow homeserver containers to communicate to Complement. Now that we are using random ports however, we cannot use Docker to run Complement. This ends up being a good thing because: - Running Complement tests locally is closer to how they run in CI. - Allows the `CI` env var to be removed in Complement. - Slightly speeds up runs as we don't need to pull down the Complement image prior to running tests. This assumes GHA caches actions sensibly. * Changelog * Full stop * Update .github/workflows/tests.yml Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> * Review comments * Update .github/workflows/tests.yml Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- .github/workflows/tests.yml | 28 ++++++++++++++++++---------- changelog.d/11811.misc | 1 + 2 files changed, 19 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11811.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e47671102ec9..e0f80aaaa783 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -323,17 +323,22 @@ jobs: if: ${{ !failure() && !cancelled() }} needs: linting-done runs-on: ubuntu-latest - container: - # https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile - image: matrixdotorg/complement:latest - env: - CI: true - ports: - - 8448:8448 - volumes: - - /var/run/docker.sock:/var/run/docker.sock steps: + # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. + # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path + - name: "Set Go Version" + run: | + # Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2 + echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH + # Add the Go path to the PATH: We need this so we can call gotestfmt + echo "~/go/bin" >> $GITHUB_PATH + + - name: "Install Complement Dependencies" + run: | + sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev + go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest + - name: Run actions/checkout@v2 for synapse uses: actions/checkout@v2 with: @@ -376,8 +381,11 @@ jobs: working-directory: complement/dockerfiles # Run Complement - - run: set -o pipefail && go test -v -json -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt + - run: | + set -o pipefail + go test -v -json -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt shell: bash + name: Run Complement Tests env: COMPLEMENT_BASE_IMAGE: complement-synapse:latest working-directory: complement diff --git a/changelog.d/11811.misc b/changelog.d/11811.misc new file mode 100644 index 000000000000..b911a2d042d2 --- /dev/null +++ b/changelog.d/11811.misc @@ -0,0 +1 @@ +Run Complement on the Github Actions VM and not inside a Docker container. \ No newline at end of file From 6a72c910f180ee8b4bd78223775af48492769472 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 25 Jan 2022 17:11:40 +0100 Subject: [PATCH 158/474] Add admin API to get a list of federated rooms (#11658) --- changelog.d/11658.feature | 1 + .../administration/admin_api/federation.md | 60 ++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/federation.py | 56 ++++ .../storage/databases/main/transactions.py | 48 +++ tests/rest/admin/test_federation.py | 302 ++++++++++++++++-- 6 files changed, 444 insertions(+), 25 deletions(-) create mode 100644 changelog.d/11658.feature diff --git a/changelog.d/11658.feature b/changelog.d/11658.feature new file mode 100644 index 000000000000..2ec9fb5eecf2 --- /dev/null +++ b/changelog.d/11658.feature @@ -0,0 +1 @@ +Add an admin API to get a list of rooms that federate with a given remote homeserver. \ No newline at end of file diff --git a/docs/usage/administration/admin_api/federation.md b/docs/usage/administration/admin_api/federation.md index 5e609561a64d..60cbc5265ef3 100644 --- a/docs/usage/administration/admin_api/federation.md +++ b/docs/usage/administration/admin_api/federation.md @@ -119,6 +119,66 @@ The following parameters should be set in the URL: The response fields are the same like in the `destinations` array in [List of destinations](#list-of-destinations) response. +## Destination rooms + +This API gets the rooms that federate with a specific remote server. + +The API is: + +``` +GET /_synapse/admin/v1/federation/destinations//rooms +``` + +A response body like the following is returned: + +```json +{ + "rooms":[ + { + "room_id": "!OGEhHVWSdvArJzumhm:matrix.org", + "stream_ordering": 8326 + }, + { + "room_id": "!xYvNcQPhnkrdUmYczI:matrix.org", + "stream_ordering": 93534 + } + ], + "total": 2 +} +``` + +To paginate, check for `next_token` and if present, call the endpoint again +with `from` set to the value of `next_token`. This will return a new page. + +If the endpoint does not return a `next_token` then there are no more destinations +to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +- `destination` - Name of the remote server. + +The following query parameters are available: + +- `from` - Offset in the returned list. Defaults to `0`. +- `limit` - Maximum amount of destinations to return. Defaults to `100`. +- `dir` - Direction of room order by `room_id`. Either `f` for forwards or `b` for + backwards. Defaults to `f`. + +**Response** + +The following fields are returned in the JSON response body: + +- `rooms` - An array of objects, each containing information about a room. + Room objects contain the following fields: + - `room_id` - string - The ID of the room. + - `stream_ordering` - integer - The stream ordering of the most recent + successfully-sent [PDU](understanding_synapse_through_grafana_graphs.md#federation) + to this destination in this room. +- `next_token`: string representing a positive integer - Indication for pagination. See above. +- `total` - integer - Total number of destinations. + ## Reset connection timeout Synapse makes federation requests to other homeservers. If a federation request fails, diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index b1e49d51b7de..9be9e33c8eff 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -41,6 +41,7 @@ EventReportsRestServlet, ) from synapse.rest.admin.federation import ( + DestinationMembershipRestServlet, DestinationResetConnectionRestServlet, DestinationRestServlet, ListDestinationsRestServlet, @@ -268,6 +269,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ListRegistrationTokensRestServlet(hs).register(http_server) NewRegistrationTokenRestServlet(hs).register(http_server) RegistrationTokenRestServlet(hs).register(http_server) + DestinationMembershipRestServlet(hs).register(http_server) DestinationResetConnectionRestServlet(hs).register(http_server) DestinationRestServlet(hs).register(http_server) ListDestinationsRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index 0f33f9e4da88..d162e0081ea1 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -148,6 +148,62 @@ async def on_GET( return HTTPStatus.OK, response +class DestinationMembershipRestServlet(RestServlet): + """Get list of rooms of a destination. + This needs user to have administrator access in Synapse. + + GET /_synapse/admin/v1/federation/destinations//rooms?from=0&limit=10 + + returns: + 200 OK with a list of rooms if success otherwise an error. + + The parameters `from` and `limit` are required only for pagination. + By default, a `limit` of 100 is used. + """ + + PATTERNS = admin_patterns("/federation/destinations/(?P[^/]*)/rooms$") + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastore() + + async def on_GET( + self, request: SynapseRequest, destination: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + if not await self._store.is_destination_known(destination): + raise NotFoundError("Unknown destination") + + start = parse_integer(request, "from", default=0) + limit = parse_integer(request, "limit", default=100) + + if start < 0: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Query parameter from must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if limit < 0: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Query parameter limit must be a string representing a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + direction = parse_string(request, "dir", default="f", allowed_values=("f", "b")) + + rooms, total = await self._store.get_destination_rooms_paginate( + destination, start, limit, direction + ) + response = {"rooms": rooms, "total": total} + if (start + limit) < total: + response["next_token"] = str(start + len(rooms)) + + return HTTPStatus.OK, response + + class DestinationResetConnectionRestServlet(RestServlet): """Reset destinations' connection timeouts and wake it up. This needs user to have administrator access in Synapse. diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 4b78b4d098a9..ba79e19f7fe8 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -561,6 +561,54 @@ def get_destinations_paginate_txn( "get_destinations_paginate_txn", get_destinations_paginate_txn ) + async def get_destination_rooms_paginate( + self, destination: str, start: int, limit: int, direction: str = "f" + ) -> Tuple[List[JsonDict], int]: + """Function to retrieve a paginated list of destination's rooms. + This will return a json list of rooms and the + total number of rooms. + + Args: + destination: the destination to query + start: start number to begin the query from + limit: number of rows to retrieve + direction: sort ascending or descending by room_id + Returns: + A tuple of a dict of rooms and a count of total rooms. + """ + + def get_destination_rooms_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[JsonDict], int]: + + if direction == "b": + order = "DESC" + else: + order = "ASC" + + sql = """ + SELECT COUNT(*) as total_rooms + FROM destination_rooms + WHERE destination = ? + """ + txn.execute(sql, [destination]) + count = cast(Tuple[int], txn.fetchone())[0] + + rooms = self.db_pool.simple_select_list_paginate_txn( + txn=txn, + table="destination_rooms", + orderby="room_id", + start=start, + limit=limit, + retcols=("room_id", "stream_ordering"), + order_direction=order, + ) + return rooms, count + + return await self.db_pool.runInteraction( + "get_destination_rooms_paginate_txn", get_destination_rooms_paginate_txn + ) + async def is_destination_known(self, destination: str) -> bool: """Check if a destination is known to the server.""" result = await self.db_pool.simple_select_one_onecol( diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index e2d3cff2a3d8..71068d16cd18 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -20,7 +20,7 @@ import synapse.rest.admin from synapse.api.errors import Codes -from synapse.rest.client import login +from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -52,9 +52,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ] ) def test_requester_is_no_admin(self, method: str, url: str) -> None: - """ - If the user is not a server admin, an error 403 is returned. - """ + """If the user is not a server admin, an error 403 is returned.""" self.register_user("user", "pass", admin=False) other_user_tok = self.login("user", "pass") @@ -70,9 +68,7 @@ def test_requester_is_no_admin(self, method: str, url: str) -> None: self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_invalid_parameter(self) -> None: - """ - If parameters are invalid, an error is returned. - """ + """If parameters are invalid, an error is returned.""" # negative limit channel = self.make_request( @@ -135,9 +131,7 @@ def test_invalid_parameter(self) -> None: self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) def test_limit(self) -> None: - """ - Testing list of destinations with limit - """ + """Testing list of destinations with limit""" number_destinations = 20 self._create_destinations(number_destinations) @@ -155,9 +149,7 @@ def test_limit(self) -> None: self._check_fields(channel.json_body["destinations"]) def test_from(self) -> None: - """ - Testing list of destinations with a defined starting point (from) - """ + """Testing list of destinations with a defined starting point (from)""" number_destinations = 20 self._create_destinations(number_destinations) @@ -175,9 +167,7 @@ def test_from(self) -> None: self._check_fields(channel.json_body["destinations"]) def test_limit_and_from(self) -> None: - """ - Testing list of destinations with a defined starting point and limit - """ + """Testing list of destinations with a defined starting point and limit""" number_destinations = 20 self._create_destinations(number_destinations) @@ -195,9 +185,7 @@ def test_limit_and_from(self) -> None: self._check_fields(channel.json_body["destinations"]) def test_next_token(self) -> None: - """ - Testing that `next_token` appears at the right place - """ + """Testing that `next_token` appears at the right place""" number_destinations = 20 self._create_destinations(number_destinations) @@ -256,9 +244,7 @@ def test_next_token(self) -> None: self.assertNotIn("next_token", channel.json_body) def test_list_all_destinations(self) -> None: - """ - List all destinations. - """ + """List all destinations.""" number_destinations = 5 self._create_destinations(number_destinations) @@ -277,9 +263,7 @@ def test_list_all_destinations(self) -> None: self._check_fields(channel.json_body["destinations"]) def test_order_by(self) -> None: - """ - Testing order list with parameter `order_by` - """ + """Testing order list with parameter `order_by`""" def _order_test( expected_destination_list: List[str], @@ -543,3 +527,271 @@ def _check_fields(self, content: List[JsonDict]) -> None: self.assertIn("retry_interval", c) self.assertIn("failure_ts", c) self.assertIn("last_successful_stream_ordering", c) + + +class DestinationMembershipTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastore() + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.dest = "sub0.example.com" + self.url = f"/_synapse/admin/v1/federation/destinations/{self.dest}/rooms" + + # Record that we successfully contacted a destination in the DB. + self.get_success( + self.store.set_destination_retry_timings(self.dest, None, 0, 0) + ) + + def test_requester_is_no_admin(self) -> None: + """If the user is not a server admin, an error 403 is returned.""" + + self.register_user("user", "pass", admin=False) + other_user_tok = self.login("user", "pass") + + channel = self.make_request( + "GET", + self.url, + access_token=other_user_tok, + ) + + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_invalid_parameter(self) -> None: + """If parameters are invalid, an error is returned.""" + + # negative limit + channel = self.make_request( + "GET", + self.url + "?limit=-5", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # negative from + channel = self.make_request( + "GET", + self.url + "?from=-5", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # invalid search order + channel = self.make_request( + "GET", + self.url + "?dir=bar", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # invalid destination + channel = self.make_request( + "GET", + "/_synapse/admin/v1/federation/destinations/%s/rooms" % ("invalid",), + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_limit(self) -> None: + """Testing list of destinations with limit""" + + number_rooms = 5 + self._create_destination_rooms(number_rooms) + + channel = self.make_request( + "GET", + self.url + "?limit=3", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(len(channel.json_body["rooms"]), 3) + self.assertEqual(channel.json_body["next_token"], "3") + self._check_fields(channel.json_body["rooms"]) + + def test_from(self) -> None: + """Testing list of rooms with a defined starting point (from)""" + + number_rooms = 10 + self._create_destination_rooms(number_rooms) + + channel = self.make_request( + "GET", + self.url + "?from=5", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(len(channel.json_body["rooms"]), 5) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["rooms"]) + + def test_limit_and_from(self) -> None: + """Testing list of rooms with a defined starting point and limit""" + + number_rooms = 10 + self._create_destination_rooms(number_rooms) + + channel = self.make_request( + "GET", + self.url + "?from=3&limit=5", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(channel.json_body["next_token"], "8") + self.assertEqual(len(channel.json_body["rooms"]), 5) + self._check_fields(channel.json_body["rooms"]) + + def test_order_direction(self) -> None: + """Testing order list with parameter `dir`""" + number_rooms = 4 + self._create_destination_rooms(number_rooms) + + # get list in forward direction + channel_asc = self.make_request( + "GET", + self.url + "?dir=f", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel_asc.code, msg=channel_asc.json_body) + self.assertEqual(channel_asc.json_body["total"], number_rooms) + self.assertEqual(number_rooms, len(channel_asc.json_body["rooms"])) + self._check_fields(channel_asc.json_body["rooms"]) + + # get list in backward direction + channel_desc = self.make_request( + "GET", + self.url + "?dir=b", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel_desc.code, msg=channel_desc.json_body) + self.assertEqual(channel_desc.json_body["total"], number_rooms) + self.assertEqual(number_rooms, len(channel_desc.json_body["rooms"])) + self._check_fields(channel_desc.json_body["rooms"]) + + # test that both lists have different directions + for i in range(0, number_rooms): + self.assertEqual( + channel_asc.json_body["rooms"][i]["room_id"], + channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"], + ) + + def test_next_token(self) -> None: + """Testing that `next_token` appears at the right place""" + + number_rooms = 5 + self._create_destination_rooms(number_rooms) + + # `next_token` does not appear + # Number of results is the number of entries + channel = self.make_request( + "GET", + self.url + "?limit=5", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(len(channel.json_body["rooms"]), number_rooms) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does not appear + # Number of max results is larger than the number of entries + channel = self.make_request( + "GET", + self.url + "?limit=6", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(len(channel.json_body["rooms"]), number_rooms) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does appear + # Number of max results is smaller than the number of entries + channel = self.make_request( + "GET", + self.url + "?limit=4", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(len(channel.json_body["rooms"]), 4) + self.assertEqual(channel.json_body["next_token"], "4") + + # Check + # Set `from` to value of `next_token` for request remaining entries + # `next_token` does not appear + channel = self.make_request( + "GET", + self.url + "?from=4", + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(len(channel.json_body["rooms"]), 1) + self.assertNotIn("next_token", channel.json_body) + + def test_destination_rooms(self) -> None: + """Testing that request the list of rooms is successfully.""" + number_rooms = 3 + self._create_destination_rooms(number_rooms) + + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(channel.json_body["total"], number_rooms) + self.assertEqual(number_rooms, len(channel.json_body["rooms"])) + self._check_fields(channel.json_body["rooms"]) + + def _create_destination_rooms(self, number_rooms: int) -> None: + """Create a number rooms for destination + + Args: + number_rooms: Number of rooms to be created + """ + for _ in range(0, number_rooms): + room_id = self.helper.create_room_as( + self.admin_user, tok=self.admin_user_tok + ) + self.get_success( + self.store.store_destination_rooms_entries((self.dest,), room_id, 1234) + ) + + def _check_fields(self, content: List[JsonDict]) -> None: + """Checks that the expected room attributes are present in content + + Args: + content: List that is checked for content + """ + for c in content: + self.assertIn("room_id", c) + self.assertIn("stream_ordering", c) From b8bf6007002131b8931f4b5f49a77bf5aba85067 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 25 Jan 2022 10:35:18 -0800 Subject: [PATCH 159/474] Check that `gc` method is available before using in `synapse/app/_base` (#11816) * add check that gc.freeze is available before calling * newsfragment * lint * Update comment Co-authored-by: Dan Callahan Co-authored-by: Dan Callahan --- changelog.d/11816.misc | 1 + synapse/app/_base.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11816.misc diff --git a/changelog.d/11816.misc b/changelog.d/11816.misc new file mode 100644 index 000000000000..b1f048f7f52d --- /dev/null +++ b/changelog.d/11816.misc @@ -0,0 +1 @@ +Drop support for Python 3.6, which is EOL. \ No newline at end of file diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 9efdd071cce3..bbab8a052a93 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -468,12 +468,14 @@ def run_sighup(*args: Any, **kwargs: Any) -> None: # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). # - gc.collect() - gc.freeze() - - # Speed up shutdowns by freezing all allocated objects. This moves everything - # into the permanent generation and excludes them from the final GC. - atexit.register(gc.freeze) + # PyPy does not (yet?) implement gc.freeze() + if hasattr(gc, "freeze"): + gc.collect() + gc.freeze() + + # Speed up shutdowns by freezing all allocated objects. This moves everything + # into the permanent generation and excludes them from the final GC. + atexit.register(gc.freeze) def setup_sentry(hs: "HomeServer") -> None: From 74e4419eb4b18ef7d2a3b4416290a4f103042436 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 25 Jan 2022 20:29:28 +0000 Subject: [PATCH 160/474] Fix another jsonschema typecheck error (#11830) Similar to #11817. In `_create_power_level_validator` we - retrieve `validator`. This is a class implementing the `jsonschema.protocols.Validator` interface. In other words, `validator: Type[jsonschema.protocols.Validator]`. - we then create an second validator class by modifying the original `validator`. We return that class, which is also of type `Type[jsonschema.protocols.Validator]`. So the original annotation was incorrect: it claimed we were returning an instance of jsonSchema.Draft7Validator, not the class (or a subclass) itself. (Strictly speaking this is incorrect, because `POWER_LEVELS_SCHEMA` isn't pinned to a particular version of JSON Schema. But there are other complications with the type stubs if you try to fix this; I felt like the change herein was a decent compromise that better expresses intent). (I suspect/hope the typeshed project would welcome an effort to improve the jsonschema stubs. Let's see if I get some spare time.) --- changelog.d/11817.misc | 2 +- changelog.d/11830.misc | 1 + synapse/events/validator.py | 6 ++---- 3 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11830.misc diff --git a/changelog.d/11817.misc b/changelog.d/11817.misc index bd29d8d6ebf3..3d6b2ea4d485 100644 --- a/changelog.d/11817.misc +++ b/changelog.d/11817.misc @@ -1 +1 @@ -Compatibility with updated type hints for jsonschema 4.4.0. +Correct a type annotation in the event validation logic. diff --git a/changelog.d/11830.misc b/changelog.d/11830.misc new file mode 100644 index 000000000000..fe248d00ab94 --- /dev/null +++ b/changelog.d/11830.misc @@ -0,0 +1 @@ +Correct a type annotation in the event validation logic. \ No newline at end of file diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 424557301708..360d24274a52 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections.abc -from typing import Iterable, Union +from typing import Iterable, Type, Union import jsonschema @@ -246,9 +246,7 @@ def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None: # This could return something newer than Draft 7, but that's the current "latest" # validator. -# -# See https://github.com/python/typeshed/issues/7028 for the ignored return type. -def _create_power_level_validator() -> jsonschema.Draft7Validator: # type: ignore[valid-type] +def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]: validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA) # by default jsonschema does not consider a frozendict to be an object so From 95b3f952fa43e51feae166fa1678761c5e32d900 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 26 Jan 2022 12:02:54 +0000 Subject: [PATCH 161/474] Add a config flag to inhibit `M_USER_IN_USE` during registration (#11743) This is mostly motivated by the tchap use case, where usernames are automatically generated from the user's email address (in a way that allows figuring out the email address from the username). Therefore, it's an issue if we respond to requests on /register and /register/available with M_USER_IN_USE, because it can potentially leak email addresses (which include the user's real name and place of work). This commit adds a flag to inhibit the M_USER_IN_USE errors that are raised both by /register/available, and when providing a username early into the registration process. This error will still be raised if the user completes the registration process but the username conflicts. This is particularly useful when using modules (https://github.com/matrix-org/synapse/pull/11790 adds a module callback to set the username of users at registration) or SSO, since they can ensure the username is unique. More context is available in the PR that introduced this behaviour to synapse-dinsic: matrix-org/synapse-dinsic#48 - as well as the issue in the matrix-dinsic repo: matrix-org/matrix-dinsic#476 --- changelog.d/11743.feature | 1 + docs/sample_config.yaml | 10 ++++++++ synapse/config/registration.py | 12 +++++++++ synapse/handlers/register.py | 26 ++++++++++--------- synapse/rest/client/register.py | 11 ++++++++ tests/rest/client/test_register.py | 41 ++++++++++++++++++++++++++++++ 6 files changed, 89 insertions(+), 12 deletions(-) create mode 100644 changelog.d/11743.feature diff --git a/changelog.d/11743.feature b/changelog.d/11743.feature new file mode 100644 index 000000000000..9809f48b9688 --- /dev/null +++ b/changelog.d/11743.feature @@ -0,0 +1 @@ +Add a config flag to inhibit M_USER_IN_USE during registration. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 1b86d0295d73..b38e6d6c88da 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1428,6 +1428,16 @@ account_threepid_delegates: # #auto_join_rooms_for_guests: false +# Whether to inhibit errors raised when registering a new account if the user ID +# already exists. If turned on, that requests to /register/available will always +# show a user ID as available, and Synapse won't raise an error when starting +# a registration with a user ID that already exists. However, Synapse will still +# raise an error if the registration completes and the username conflicts. +# +# Defaults to false. +# +#inhibit_user_in_use_error: true + ## Metrics ### diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 7a059c6decc0..ea9b50fe97e4 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -190,6 +190,8 @@ def read_config(self, config, **kwargs): # The success template used during fallback auth. self.fallback_success_template = self.read_template("auth_success.html") + self.inhibit_user_in_use_error = config.get("inhibit_user_in_use_error", False) + def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( @@ -446,6 +448,16 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # Defaults to true. # #auto_join_rooms_for_guests: false + + # Whether to inhibit errors raised when registering a new account if the user ID + # already exists. If turned on, that requests to /register/available will always + # show a user ID as available, and Synapse won't raise an error when starting + # a registration with a user ID that already exists. However, Synapse will still + # raise an error if the registration completes and the username conflicts. + # + # Defaults to false. + # + #inhibit_user_in_use_error: true """ % locals() ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index f08a516a7588..a719d5eef351 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -132,6 +132,7 @@ async def check_username( localpart: str, guest_access_token: Optional[str] = None, assigned_user_id: Optional[str] = None, + inhibit_user_in_use_error: bool = False, ) -> None: if types.contains_invalid_mxid_characters(localpart): raise SynapseError( @@ -171,21 +172,22 @@ async def check_username( users = await self.store.get_users_by_id_case_insensitive(user_id) if users: - if not guest_access_token: + if not inhibit_user_in_use_error and not guest_access_token: raise SynapseError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE ) - user_data = await self.auth.get_user_by_access_token(guest_access_token) - if ( - not user_data.is_guest - or UserID.from_string(user_data.user_id).localpart != localpart - ): - raise AuthError( - 403, - "Cannot register taken user ID without valid guest " - "credentials for that user.", - errcode=Codes.FORBIDDEN, - ) + if guest_access_token: + user_data = await self.auth.get_user_by_access_token(guest_access_token) + if ( + not user_data.is_guest + or UserID.from_string(user_data.user_id).localpart != localpart + ): + raise AuthError( + 403, + "Cannot register taken user ID without valid guest " + "credentials for that user.", + errcode=Codes.FORBIDDEN, + ) if guest_access_token is None: try: diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 8b56c76aed66..c59dae7c0370 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -339,12 +339,19 @@ def __init__(self, hs: "HomeServer"): ), ) + self.inhibit_user_in_use_error = ( + hs.config.registration.inhibit_user_in_use_error + ) + async def on_GET(self, request: Request) -> Tuple[int, JsonDict]: if not self.hs.config.registration.enable_registration: raise SynapseError( 403, "Registration has been disabled", errcode=Codes.FORBIDDEN ) + if self.inhibit_user_in_use_error: + return 200, {"available": True} + ip = request.getClientIP() with self.ratelimiter.ratelimit(ip) as wait_deferred: await wait_deferred @@ -422,6 +429,9 @@ def __init__(self, hs: "HomeServer"): self._refresh_tokens_enabled = ( hs.config.registration.refreshable_access_token_lifetime is not None ) + self._inhibit_user_in_use_error = ( + hs.config.registration.inhibit_user_in_use_error + ) self._registration_flows = _calculate_registration_flows( hs.config, self.auth_handler @@ -564,6 +574,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: desired_username, guest_access_token=guest_access_token, assigned_user_id=registered_user_id, + inhibit_user_in_use_error=self._inhibit_user_in_use_error, ) # Check if the user-interactive authentication flows are complete, if diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 6e7c0f11df3e..407dd32a7383 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -726,6 +726,47 @@ def test_reject_invalid_email(self): {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, ) + @override_config( + { + "inhibit_user_in_use_error": True, + } + ) + def test_inhibit_user_in_use_error(self): + """Tests that the 'inhibit_user_in_use_error' configuration flag behaves + correctly. + """ + username = "arthur" + + # Manually register the user, so we know the test isn't passing because of a lack + # of clashing. + reg_handler = self.hs.get_registration_handler() + self.get_success(reg_handler.register_user(username)) + + # Check that /available correctly ignores the username provided despite the + # username being already registered. + channel = self.make_request("GET", "register/available?username=" + username) + self.assertEquals(200, channel.code, channel.result) + + # Test that when starting a UIA registration flow the request doesn't fail because + # of a conflicting username + channel = self.make_request( + "POST", + "register", + {"username": username, "type": "m.login.password", "password": "foo"}, + ) + self.assertEqual(channel.code, 401) + self.assertIn("session", channel.json_body) + + # Test that finishing the registration fails because of a conflicting username. + session = channel.json_body["session"] + channel = self.make_request( + "POST", + "register", + {"auth": {"session": session, "type": LoginType.DUMMY}}, + ) + self.assertEqual(channel.code, 400, channel.json_body) + self.assertEqual(channel.json_body["errcode"], Codes.USER_IN_USE) + class AccountValidityTestCase(unittest.HomeserverTestCase): From c5815567a40a3f50de24b73f9cf150c9b5dbfd42 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 26 Jan 2022 12:06:56 +0000 Subject: [PATCH 162/474] Avoid type annotation problems in prom-client (#11834) --- changelog.d/11834.misc | 1 + synapse/python_dependencies.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11834.misc diff --git a/changelog.d/11834.misc b/changelog.d/11834.misc new file mode 100644 index 000000000000..29a5635f7a1e --- /dev/null +++ b/changelog.d/11834.misc @@ -0,0 +1 @@ +Workaround a type annotation problem in `prometheus_client` 0.13.0. \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 22b4606ae0ea..80786464c2fb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -76,7 +76,8 @@ "msgpack>=0.5.2", "phonenumbers>=8.2.0", # we use GaugeHistogramMetric, which was added in prom-client 0.4.0. - "prometheus_client>=0.4.0", + # 0.13.0 has an incorrect type annotation, see #11832. + "prometheus_client>=0.4.0,<0.13.0", # we use `order`, which arrived in attrs 19.2.0. # Note: 21.1.0 broke `/sync`, see #9936 "attrs>=19.2.0,!=21.1.0", From d8df8e6c1432d25ea1c0310a5f2dc48d1688345f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 26 Jan 2022 12:47:34 +0000 Subject: [PATCH 163/474] Don't print HTTPStatus.* in "Processed..." logs (#11827) * Don't print HTTPStatus.* in "Processed..." logs Fixes #11812. See also #7118 and https://github.com/matrix-org/synapse/pull/7188#r401719326 in particular. Co-authored-by: Brendan Abolivier --- changelog.d/11827.bugfix | 1 + synapse/http/site.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11827.bugfix diff --git a/changelog.d/11827.bugfix b/changelog.d/11827.bugfix new file mode 100644 index 000000000000..30222dfb6269 --- /dev/null +++ b/changelog.d/11827.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. \ No newline at end of file diff --git a/synapse/http/site.py b/synapse/http/site.py index c180a1d3231b..40f6c0489451 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -407,7 +407,10 @@ def _finished_processing(self) -> None: user_agent = get_request_user_agent(self, "-") - code = str(self.code) + # int(self.code) looks redundant, because self.code is already an int. + # But self.code might be an HTTPStatus (which inherits from int)---which has + # a different string representation. So ensure we really have an integer. + code = str(int(self.code)) if not self.finished: # we didn't send the full response before we gave up (presumably because # the connection dropped) From 2897fb6b4fb8bdaea0e919233d5ccaf5dea12742 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 26 Jan 2022 08:27:04 -0500 Subject: [PATCH 164/474] Improvements to bundling aggregations. (#11815) This is some odds and ends found during the review of #11791 and while continuing to work in this code: * Return attrs classes instead of dictionaries from some methods to improve type safety. * Call `get_bundled_aggregations` fewer times. * Adds a missing assertion in the tests. * Do not return empty bundled aggregations for an event (preferring to not include the bundle at all, as the docstring states). --- changelog.d/11815.misc | 1 + synapse/events/utils.py | 57 ++++++++++----- synapse/handlers/room.py | 77 +++++++++++---------- synapse/handlers/search.py | 45 ++++++------ synapse/handlers/sync.py | 3 +- synapse/push/mailer.py | 2 +- synapse/rest/admin/rooms.py | 39 +++++++---- synapse/rest/client/room.py | 39 +++++++---- synapse/rest/client/sync.py | 3 +- synapse/storage/databases/main/relations.py | 61 ++++++++++------ synapse/storage/databases/main/stream.py | 22 ++++-- tests/rest/client/test_relations.py | 2 +- 12 files changed, 212 insertions(+), 139 deletions(-) create mode 100644 changelog.d/11815.misc diff --git a/changelog.d/11815.misc b/changelog.d/11815.misc new file mode 100644 index 000000000000..83aa6d6eb046 --- /dev/null +++ b/changelog.d/11815.misc @@ -0,0 +1 @@ +Improve type safety of bundled aggregations code. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 918adeecf8cd..243696b35724 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -14,7 +14,17 @@ # limitations under the License. import collections.abc import re -from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Mapping, + Optional, + Union, +) from frozendict import frozendict @@ -26,6 +36,10 @@ from . import EventBase +if TYPE_CHECKING: + from synapse.storage.databases.main.relations import BundledAggregations + + # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # (? JsonDict: """Serializes a single event. @@ -415,7 +429,7 @@ def _inject_bundled_aggregations( self, event: EventBase, time_now: int, - aggregations: JsonDict, + aggregations: "BundledAggregations", serialized_event: JsonDict, ) -> None: """Potentially injects bundled aggregations into the unsigned portion of the serialized event. @@ -427,13 +441,18 @@ def _inject_bundled_aggregations( serialized_event: The serialized event which may be modified. """ - # Make a copy in-case the object is cached. - aggregations = aggregations.copy() + serialized_aggregations = {} + + if aggregations.annotations: + serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations + + if aggregations.references: + serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references - if RelationTypes.REPLACE in aggregations: + if aggregations.replace: # If there is an edit replace the content, preserving existing # relations. - edit = aggregations[RelationTypes.REPLACE] + edit = aggregations.replace # Ensure we take copies of the edit content, otherwise we risk modifying # the original event. @@ -451,24 +470,28 @@ def _inject_bundled_aggregations( else: serialized_event["content"].pop("m.relates_to", None) - aggregations[RelationTypes.REPLACE] = { + serialized_aggregations[RelationTypes.REPLACE] = { "event_id": edit.event_id, "origin_server_ts": edit.origin_server_ts, "sender": edit.sender, } # If this event is the start of a thread, include a summary of the replies. - if RelationTypes.THREAD in aggregations: - # Serialize the latest thread event. - latest_thread_event = aggregations[RelationTypes.THREAD]["latest_event"] - - # Don't bundle aggregations as this could recurse forever. - aggregations[RelationTypes.THREAD]["latest_event"] = self.serialize_event( - latest_thread_event, time_now, bundle_aggregations=None - ) + if aggregations.thread: + serialized_aggregations[RelationTypes.THREAD] = { + # Don't bundle aggregations as this could recurse forever. + "latest_event": self.serialize_event( + aggregations.thread.latest_event, time_now, bundle_aggregations=None + ), + "count": aggregations.thread.count, + "current_user_participated": aggregations.thread.current_user_participated, + } # Include the bundled aggregations in the event. - serialized_event["unsigned"].setdefault("m.relations", {}).update(aggregations) + if serialized_aggregations: + serialized_event["unsigned"].setdefault("m.relations", {}).update( + serialized_aggregations + ) def serialize_events( self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f963078e596c..1420d6772955 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -30,6 +30,7 @@ Tuple, ) +import attr from typing_extensions import TypedDict from synapse.api.constants import ( @@ -60,6 +61,7 @@ from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.rest.admin._base import assert_user_is_admin +from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.state import StateFilter from synapse.streams import EventSource from synapse.types import ( @@ -90,6 +92,17 @@ FIVE_MINUTES_IN_MS = 5 * 60 * 1000 +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventContext: + events_before: List[EventBase] + event: EventBase + events_after: List[EventBase] + state: List[EventBase] + aggregations: Dict[str, BundledAggregations] + start: str + end: str + + class RoomCreationHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() @@ -1119,7 +1132,7 @@ async def get_event_context( limit: int, event_filter: Optional[Filter], use_admin_priviledge: bool = False, - ) -> Optional[JsonDict]: + ) -> Optional[EventContext]: """Retrieves events, pagination tokens and state around a given event in a room. @@ -1167,38 +1180,28 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: results = await self.store.get_events_around( room_id, event_id, before_limit, after_limit, event_filter ) + events_before = results.events_before + events_after = results.events_after if event_filter: - results["events_before"] = await event_filter.filter( - results["events_before"] - ) - results["events_after"] = await event_filter.filter(results["events_after"]) + events_before = await event_filter.filter(events_before) + events_after = await event_filter.filter(events_after) - results["events_before"] = await filter_evts(results["events_before"]) - results["events_after"] = await filter_evts(results["events_after"]) + events_before = await filter_evts(events_before) + events_after = await filter_evts(events_after) # filter_evts can return a pruned event in case the user is allowed to see that # there's something there but not see the content, so use the event that's in # `filtered` rather than the event we retrieved from the datastore. - results["event"] = filtered[0] + event = filtered[0] # Fetch the aggregations. aggregations = await self.store.get_bundled_aggregations( - [results["event"]], user.to_string() + itertools.chain(events_before, (event,), events_after), + user.to_string(), ) - aggregations.update( - await self.store.get_bundled_aggregations( - results["events_before"], user.to_string() - ) - ) - aggregations.update( - await self.store.get_bundled_aggregations( - results["events_after"], user.to_string() - ) - ) - results["aggregations"] = aggregations - if results["events_after"]: - last_event_id = results["events_after"][-1].event_id + if events_after: + last_event_id = events_after[-1].event_id else: last_event_id = event_id @@ -1206,9 +1209,9 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: state_filter = StateFilter.from_lazy_load_member_list( ev.sender for ev in itertools.chain( - results["events_before"], - (results["event"],), - results["events_after"], + events_before, + (event,), + events_after, ) ) else: @@ -1226,21 +1229,23 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: if event_filter: state_events = await event_filter.filter(state_events) - results["state"] = await filter_evts(state_events) - # We use a dummy token here as we only care about the room portion of # the token, which we replace. token = StreamToken.START - results["start"] = await token.copy_and_replace( - "room_key", results["start"] - ).to_string(self.store) - - results["end"] = await token.copy_and_replace( - "room_key", results["end"] - ).to_string(self.store) - - return results + return EventContext( + events_before=events_before, + event=event, + events_after=events_after, + state=await filter_evts(state_events), + aggregations=aggregations, + start=await token.copy_and_replace("room_key", results.start).to_string( + self.store + ), + end=await token.copy_and_replace("room_key", results.end).to_string( + self.store + ), + ) class TimestampLookupHandler: diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0b153a682261..02bb5ae72f51 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -361,36 +361,37 @@ async def search( logger.info( "Context for search returned %d and %d events", - len(res["events_before"]), - len(res["events_after"]), + len(res.events_before), + len(res.events_after), ) - res["events_before"] = await filter_events_for_client( - self.storage, user.to_string(), res["events_before"] + events_before = await filter_events_for_client( + self.storage, user.to_string(), res.events_before ) - res["events_after"] = await filter_events_for_client( - self.storage, user.to_string(), res["events_after"] + events_after = await filter_events_for_client( + self.storage, user.to_string(), res.events_after ) - res["start"] = await now_token.copy_and_replace( - "room_key", res["start"] - ).to_string(self.store) - - res["end"] = await now_token.copy_and_replace( - "room_key", res["end"] - ).to_string(self.store) + context = { + "events_before": events_before, + "events_after": events_after, + "start": await now_token.copy_and_replace( + "room_key", res.start + ).to_string(self.store), + "end": await now_token.copy_and_replace( + "room_key", res.end + ).to_string(self.store), + } if include_profile: senders = { ev.sender - for ev in itertools.chain( - res["events_before"], [event], res["events_after"] - ) + for ev in itertools.chain(events_before, [event], events_after) } - if res["events_after"]: - last_event_id = res["events_after"][-1].event_id + if events_after: + last_event_id = events_after[-1].event_id else: last_event_id = event.event_id @@ -402,7 +403,7 @@ async def search( last_event_id, state_filter ) - res["profile_info"] = { + context["profile_info"] = { s.state_key: { "displayname": s.content.get("displayname", None), "avatar_url": s.content.get("avatar_url", None), @@ -411,7 +412,7 @@ async def search( if s.type == EventTypes.Member and s.state_key in senders } - contexts[event.event_id] = res + contexts[event.event_id] = context else: contexts = {} @@ -421,10 +422,10 @@ async def search( for context in contexts.values(): context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now + context["events_before"], time_now # type: ignore[arg-type] ) context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now + context["events_after"], time_now # type: ignore[arg-type] ) state_results = {} diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7e2a892b63ae..c72ed7c2907a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -37,6 +37,7 @@ from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import NotifCounts +from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( @@ -100,7 +101,7 @@ class TimelineBatch: limited: bool # A mapping of event ID to the bundled aggregations for the above events. # This is only calculated if limited is true. - bundled_aggregations: Optional[Dict[str, Dict[str, Any]]] = None + bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index dadfc574134c..3df8452eecf7 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -455,7 +455,7 @@ async def _get_notif_vars( } the_events = await filter_events_for_client( - self.storage, user_id, results["events_before"] + self.storage, user_id, results.events_before ) the_events.append(notif_event) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index efe25fe7ebf7..5b706efbcff0 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -729,7 +729,7 @@ async def on_GET( else: event_filter = None - results = await self.room_context_handler.get_event_context( + event_context = await self.room_context_handler.get_event_context( requester, room_id, event_id, @@ -738,25 +738,34 @@ async def on_GET( use_admin_priviledge=True, ) - if not results: + if not event_context: raise SynapseError( HTTPStatus.NOT_FOUND, "Event not found.", errcode=Codes.NOT_FOUND ) time_now = self.clock.time_msec() - aggregations = results.pop("aggregations", None) - results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], time_now, bundle_aggregations=aggregations - ) - results["event"] = self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=aggregations - ) - results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], time_now, bundle_aggregations=aggregations - ) - results["state"] = self._event_serializer.serialize_events( - results["state"], time_now - ) + results = { + "events_before": self._event_serializer.serialize_events( + event_context.events_before, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "event": self._event_serializer.serialize_event( + event_context.event, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "events_after": self._event_serializer.serialize_events( + event_context.events_after, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "state": self._event_serializer.serialize_events( + event_context.state, time_now + ), + "start": event_context.start, + "end": event_context.end, + } return HTTPStatus.OK, results diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 90bb9142a098..90355e44b25e 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -706,27 +706,36 @@ async def on_GET( else: event_filter = None - results = await self.room_context_handler.get_event_context( + event_context = await self.room_context_handler.get_event_context( requester, room_id, event_id, limit, event_filter ) - if not results: + if not event_context: raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() - aggregations = results.pop("aggregations", None) - results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], time_now, bundle_aggregations=aggregations - ) - results["event"] = self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=aggregations - ) - results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], time_now, bundle_aggregations=aggregations - ) - results["state"] = self._event_serializer.serialize_events( - results["state"], time_now - ) + results = { + "events_before": self._event_serializer.serialize_events( + event_context.events_before, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "event": self._event_serializer.serialize_event( + event_context.event, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "events_after": self._event_serializer.serialize_events( + event_context.events_after, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "state": self._event_serializer.serialize_events( + event_context.state, time_now + ), + "start": event_context.start, + "end": event_context.end, + } return 200, results diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index d20ae1421e19..f9615da52583 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -48,6 +48,7 @@ from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace +from synapse.storage.databases.main.relations import BundledAggregations from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -526,7 +527,7 @@ async def encode_room( def serialize( events: Iterable[EventBase], - aggregations: Optional[Dict[str, Dict[str, Any]]] = None, + aggregations: Optional[Dict[str, BundledAggregations]] = None, ) -> List[JsonDict]: return self._event_serializer.serialize_events( events, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 2cb5d06c1352..a9a5dd5f03b6 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -13,17 +13,7 @@ # limitations under the License. import logging -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Iterable, - List, - Optional, - Tuple, - Union, - cast, -) +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union, cast import attr from frozendict import frozendict @@ -43,6 +33,7 @@ PaginationChunk, RelationPaginationToken, ) +from synapse.types import JsonDict from synapse.util.caches.descriptors import cached if TYPE_CHECKING: @@ -51,6 +42,30 @@ logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _ThreadAggregation: + latest_event: EventBase + count: int + current_user_participated: bool + + +@attr.s(slots=True, auto_attribs=True) +class BundledAggregations: + """ + The bundled aggregations for an event. + + Some values require additional processing during serialization. + """ + + annotations: Optional[JsonDict] = None + references: Optional[JsonDict] = None + replace: Optional[EventBase] = None + thread: Optional[_ThreadAggregation] = None + + def __bool__(self) -> bool: + return bool(self.annotations or self.references or self.replace or self.thread) + + class RelationsWorkerStore(SQLBaseStore): def __init__( self, @@ -585,7 +600,7 @@ def _get_if_user_has_annotated_event(txn: LoggingTransaction) -> bool: async def _get_bundled_aggregation_for_event( self, event: EventBase, user_id: str - ) -> Optional[Dict[str, Any]]: + ) -> Optional[BundledAggregations]: """Generate bundled aggregations for an event. Note that this does not use a cache, but depends on cached methods. @@ -616,24 +631,24 @@ async def _get_bundled_aggregation_for_event( # The bundled aggregations to include, a mapping of relation type to a # type-specific value. Some types include the direct return type here # while others need more processing during serialization. - aggregations: Dict[str, Any] = {} + aggregations = BundledAggregations() annotations = await self.get_aggregation_groups_for_event(event_id, room_id) if annotations.chunk: - aggregations[RelationTypes.ANNOTATION] = annotations.to_dict() + aggregations.annotations = annotations.to_dict() references = await self.get_relations_for_event( event_id, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: - aggregations[RelationTypes.REFERENCE] = references.to_dict() + aggregations.references = references.to_dict() edit = None if event.type == EventTypes.Message: edit = await self.get_applicable_edit(event_id, room_id) if edit: - aggregations[RelationTypes.REPLACE] = edit + aggregations.replace = edit # If this event is the start of a thread, include a summary of the replies. if self._msc3440_enabled: @@ -644,11 +659,11 @@ async def _get_bundled_aggregation_for_event( event_id, room_id, user_id ) if latest_thread_event: - aggregations[RelationTypes.THREAD] = { - "latest_event": latest_thread_event, - "count": thread_count, - "current_user_participated": participated, - } + aggregations.thread = _ThreadAggregation( + latest_event=latest_thread_event, + count=thread_count, + current_user_participated=participated, + ) # Store the bundled aggregations in the event metadata for later use. return aggregations @@ -657,7 +672,7 @@ async def get_bundled_aggregations( self, events: Iterable[EventBase], user_id: str, - ) -> Dict[str, Dict[str, Any]]: + ) -> Dict[str, BundledAggregations]: """Generate bundled aggregations for events. Args: @@ -676,7 +691,7 @@ async def get_bundled_aggregations( results = {} for event in events: event_result = await self._get_bundled_aggregation_for_event(event, user_id) - if event_result is not None: + if event_result: results[event.event_id] = event_result return results diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 319464b1fa83..a898f847e7d5 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -81,6 +81,14 @@ class _EventDictReturn: stream_ordering: int +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _EventsAround: + events_before: List[EventBase] + events_after: List[EventBase] + start: RoomStreamToken + end: RoomStreamToken + + def generate_pagination_where_clause( direction: str, column_names: Tuple[str, str], @@ -846,7 +854,7 @@ async def get_events_around( before_limit: int, after_limit: int, event_filter: Optional[Filter] = None, - ) -> dict: + ) -> _EventsAround: """Retrieve events and pagination tokens around a given event in a room. """ @@ -869,12 +877,12 @@ async def get_events_around( list(results["after"]["event_ids"]), get_prev_content=True ) - return { - "events_before": events_before, - "events_after": events_after, - "start": results["before"]["token"], - "end": results["after"]["token"], - } + return _EventsAround( + events_before=events_before, + events_after=events_after, + start=results["before"]["token"], + end=results["after"]["token"], + ) def _get_events_around_txn( self, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c9b220e73d1a..96ae7790bb15 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -577,7 +577,7 @@ def assert_bundle(event_json: JsonDict) -> None: self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) - self._find_event_in_chunk(room_timeline["events"]) + assert_bundle(self._find_event_in_chunk(room_timeline["events"])) def test_aggregation_get_event_for_annotation(self): """Test that annotations do not get bundled aggregations included From 2d3bd9aa670eedd299cc03093459929adec41918 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 26 Jan 2022 14:21:13 +0000 Subject: [PATCH 165/474] Add a module callback to set username at registration (#11790) This is in the context of mainlining the Tchap fork of Synapse. Currently in Tchap usernames are derived from the user's email address (extracted from the UIA results, more specifically the m.login.email.identity step). This change also exports the check_username method from the registration handler as part of the module API, so that a module can check if the username it's trying to generate is correct and doesn't conflict with an existing one, and fallback gracefully if not. Co-authored-by: David Robertson --- changelog.d/11790.feature | 1 + .../password_auth_provider_callbacks.md | 62 +++++++++++++++ synapse/handlers/auth.py | 58 ++++++++++++++ synapse/module_api/__init__.py | 22 ++++++ synapse/rest/client/register.py | 12 ++- tests/handlers/test_password_providers.py | 79 ++++++++++++++++++- 6 files changed, 231 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11790.feature diff --git a/changelog.d/11790.feature b/changelog.d/11790.feature new file mode 100644 index 000000000000..4a5cc8ec37db --- /dev/null +++ b/changelog.d/11790.feature @@ -0,0 +1 @@ +Add a module callback to set username at registration. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index e53abf640989..ec8324d292d8 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -105,6 +105,68 @@ device ID), and the (now deactivated) access token. If multiple modules implement this callback, Synapse runs them all in order. +### `get_username_for_registration` + +_First introduced in Synapse v1.52.0_ + +```python +async def get_username_for_registration( + uia_results: Dict[str, Any], + params: Dict[str, Any], +) -> Optional[str] +``` + +Called when registering a new user. The module can return a username to set for the user +being registered by returning it as a string, or `None` if it doesn't wish to force a +username for this user. If a username is returned, it will be used as the local part of a +user's full Matrix ID (e.g. it's `alice` in `@alice:example.com`). + +This callback is called once [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api) +has been completed by the user. It is not called when registering a user via SSO. It is +passed two dictionaries, which include the information that the user has provided during +the registration process. + +The first dictionary contains the results of the [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api) +flow followed by the user. Its keys are the identifiers of every step involved in the flow, +associated with either a boolean value indicating whether the step was correctly completed, +or additional information (e.g. email address, phone number...). A list of most existing +identifiers can be found in the [Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#authentication-types). +Here's an example featuring all currently supported keys: + +```python +{ + "m.login.dummy": True, # Dummy authentication + "m.login.terms": True, # User has accepted the terms of service for the homeserver + "m.login.recaptcha": True, # User has completed the recaptcha challenge + "m.login.email.identity": { # User has provided and verified an email address + "medium": "email", + "address": "alice@example.com", + "validated_at": 1642701357084, + }, + "m.login.msisdn": { # User has provided and verified a phone number + "medium": "msisdn", + "address": "33123456789", + "validated_at": 1642701357084, + }, + "org.matrix.msc3231.login.registration_token": "sometoken", # User has registered through the flow described in MSC3231 +} +``` + +The second dictionary contains the parameters provided by the user's client in the request +to `/_matrix/client/v3/register`. See the [Matrix specification](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3register) +for a complete list of these parameters. + +If the module cannot, or does not wish to, generate a username for this user, it must +return `None`. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `None`, Synapse falls through to the next one. The value of the first +callback that does not return `None` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. If every callback return `None`, +the username provided by the user is used, if any (otherwise one is automatically +generated). + + ## Example The example module below implements authentication checkers for two different login types: diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index bd1a3225638a..e32c93e234d8 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -2060,6 +2060,10 @@ def run(*args: Tuple, **kwargs: Dict) -> Awaitable: Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] ], ] +GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[ + [JsonDict, JsonDict], + Awaitable[Optional[str]], +] class PasswordAuthProvider: @@ -2072,6 +2076,9 @@ def __init__(self) -> None: # lists of callbacks self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = [] self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = [] + self.get_username_for_registration_callbacks: List[ + GET_USERNAME_FOR_REGISTRATION_CALLBACK + ] = [] # Mapping from login type to login parameters self._supported_login_types: Dict[str, Iterable[str]] = {} @@ -2086,6 +2093,9 @@ def register_password_auth_provider_callbacks( auth_checkers: Optional[ Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, + get_username_for_registration: Optional[ + GET_USERNAME_FOR_REGISTRATION_CALLBACK + ] = None, ) -> None: # Register check_3pid_auth callback if check_3pid_auth is not None: @@ -2130,6 +2140,11 @@ def register_password_auth_provider_callbacks( # Add the new method to the list of auth_checker_callbacks for this login type self.auth_checker_callbacks.setdefault(login_type, []).append(callback) + if get_username_for_registration is not None: + self.get_username_for_registration_callbacks.append( + get_username_for_registration, + ) + def get_supported_login_types(self) -> Mapping[str, Iterable[str]]: """Get the login types supported by this password provider @@ -2285,3 +2300,46 @@ async def on_logged_out( except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue + + async def get_username_for_registration( + self, + uia_results: JsonDict, + params: JsonDict, + ) -> Optional[str]: + """Defines the username to use when registering the user, using the credentials + and parameters provided during the UIA flow. + + Stops at the first callback that returns a string. + + Args: + uia_results: The credentials provided during the UIA flow. + params: The parameters provided by the registration request. + + Returns: + The localpart to use when registering this user, or None if no module + returned a localpart. + """ + for callback in self.get_username_for_registration_callbacks: + try: + res = await callback(uia_results, params) + + if isinstance(res, str): + return res + elif res is not None: + # mypy complains that this line is unreachable because it assumes the + # data returned by the module fits the expected type. We just want + # to make sure this is the case. + logger.warning( # type: ignore[unreachable] + "Ignoring non-string value returned by" + " get_username_for_registration callback %s: %s", + callback, + res, + ) + except Exception as e: + logger.error( + "Module raised an exception in get_username_for_registration: %s", + e, + ) + raise SynapseError(code=500, msg="Internal Server Error") + + return None diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 662e60bc3394..788b2e47d523 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -71,6 +71,7 @@ from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, CHECK_AUTH_CALLBACK, + GET_USERNAME_FOR_REGISTRATION_CALLBACK, ON_LOGGED_OUT_CALLBACK, AuthHandler, ) @@ -177,6 +178,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._presence_stream = hs.get_event_sources().sources.presence self._state = hs.get_state_handler() self._clock: Clock = hs.get_clock() + self._registration_handler = hs.get_registration_handler() self._send_email_handler = hs.get_send_email_handler() self.custom_template_dir = hs.config.server.custom_template_directory @@ -310,6 +312,9 @@ def register_password_auth_provider_callbacks( auth_checkers: Optional[ Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, + get_username_for_registration: Optional[ + GET_USERNAME_FOR_REGISTRATION_CALLBACK + ] = None, ) -> None: """Registers callbacks for password auth provider capabilities. @@ -319,6 +324,7 @@ def register_password_auth_provider_callbacks( check_3pid_auth=check_3pid_auth, on_logged_out=on_logged_out, auth_checkers=auth_checkers, + get_username_for_registration=get_username_for_registration, ) def register_background_update_controller_callbacks( @@ -1202,6 +1208,22 @@ async def defer_to_thread( """ return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs) + async def check_username(self, username: str) -> None: + """Checks if the provided username uses the grammar defined in the Matrix + specification, and is already being used by an existing user. + + Added in Synapse v1.52.0. + + Args: + username: The username to check. This is the local part of the user's full + Matrix user ID, i.e. it's "alice" if the full user ID is "@alice:foo.com". + + Raises: + SynapseError with the errcode "M_USER_IN_USE" if the username is already in + use. + """ + await self._registration_handler.check_username(username) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index c59dae7c0370..e3492f9f9399 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -425,6 +425,7 @@ def __init__(self, hs: "HomeServer"): self.ratelimiter = hs.get_registration_ratelimiter() self.password_policy_handler = hs.get_password_policy_handler() self.clock = hs.get_clock() + self.password_auth_provider = hs.get_password_auth_provider() self._registration_enabled = self.hs.config.registration.enable_registration self._refresh_tokens_enabled = ( hs.config.registration.refreshable_access_token_lifetime is not None @@ -638,7 +639,16 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = params.get("username", None) + desired_username = await ( + self.password_auth_provider.get_username_for_registration( + auth_result, + params, + ) + ) + + if desired_username is None: + desired_username = params.get("username", None) + guest_access_token = params.get("guest_access_token", None) if desired_username is not None: diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 2add72b28a3a..94809cb8bea3 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -20,10 +20,11 @@ from twisted.internet import defer import synapse +from synapse.api.constants import LoginType from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.module_api import ModuleApi -from synapse.rest.client import devices, login, logout -from synapse.types import JsonDict +from synapse.rest.client import devices, login, logout, register +from synapse.types import JsonDict, UserID from tests import unittest from tests.server import FakeChannel @@ -156,6 +157,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): login.register_servlets, devices.register_servlets, logout.register_servlets, + register.register_servlets, ] def setUp(self): @@ -745,6 +747,79 @@ async def on_logged_out(user_id, device_id, access_token): on_logged_out.assert_called_once() self.assertTrue(self.called) + def test_username(self): + """Tests that the get_username_for_registration callback can define the username + of a user when registering. + """ + self._setup_get_username_for_registration() + + username = "rin" + channel = self.make_request( + "POST", + "/register", + { + "username": username, + "password": "bar", + "auth": {"type": LoginType.DUMMY}, + }, + ) + self.assertEqual(channel.code, 200) + + # Our callback takes the username and appends "-foo" to it, check that's what we + # have. + mxid = channel.json_body["user_id"] + self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo") + + def test_username_uia(self): + """Tests that the get_username_for_registration callback is only called at the + end of the UIA flow. + """ + m = self._setup_get_username_for_registration() + + # Initiate the UIA flow. + username = "rin" + channel = self.make_request( + "POST", + "register", + {"username": username, "type": "m.login.password", "password": "bar"}, + ) + self.assertEqual(channel.code, 401) + self.assertIn("session", channel.json_body) + + # Check that the callback hasn't been called yet. + m.assert_not_called() + + # Finish the UIA flow. + session = channel.json_body["session"] + channel = self.make_request( + "POST", + "register", + {"auth": {"session": session, "type": LoginType.DUMMY}}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + mxid = channel.json_body["user_id"] + self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo") + + # Check that the callback has been called. + m.assert_called_once() + + def _setup_get_username_for_registration(self) -> Mock: + """Registers a get_username_for_registration callback that appends "-foo" to the + username the client is trying to register. + """ + + async def get_username_for_registration(uia_results, params): + self.assertIn(LoginType.DUMMY, uia_results) + username = params["username"] + return username + "-foo" + + m = Mock(side_effect=get_username_for_registration) + + password_auth_provider = self.hs.get_password_auth_provider() + password_auth_provider.get_username_for_registration_callbacks.append(m) + + return m + def _get_login_flows(self) -> JsonDict: channel = self.make_request("GET", "/_matrix/client/r0/login") self.assertEqual(channel.code, 200, channel.result) From cef0d5d90a4782096c03ec79f825d114b8d61b6e Mon Sep 17 00:00:00 2001 From: Vaishnav Nair <64798176+totallynotvaishnav@users.noreply.github.com> Date: Wed, 26 Jan 2022 20:18:27 +0530 Subject: [PATCH 166/474] Include `prev_content` field in AS events (#11798) * Include 'prev_content' field in AS events Signed-off-by: Vaishnav Nair Co-authored-by: Brendan Abolivier --- changelog.d/11798.bugfix | 1 + synapse/storage/databases/main/appservice.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11798.bugfix diff --git a/changelog.d/11798.bugfix b/changelog.d/11798.bugfix new file mode 100644 index 000000000000..2651fd11cff0 --- /dev/null +++ b/changelog.d/11798.bugfix @@ -0,0 +1 @@ +Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. \ No newline at end of file diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 92c95a41d793..2bb52884315e 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -384,7 +384,7 @@ def get_new_events_for_appservice_txn(txn): "get_new_events_for_appservice", get_new_events_for_appservice_txn ) - events = await self.get_events_as_list(event_ids) + events = await self.get_events_as_list(event_ids, get_prev_content=True) return upper_bound, events From ec07062e314308e221b1739e80d8472f7bbb3628 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 26 Jan 2022 16:05:29 -0800 Subject: [PATCH 167/474] Update installation docs to indicate that we support Python 3.10 (#11820) --- changelog.d/11820.doc | 1 + docs/setup/installation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11820.doc diff --git a/changelog.d/11820.doc b/changelog.d/11820.doc new file mode 100644 index 000000000000..4f563b9b569b --- /dev/null +++ b/changelog.d/11820.doc @@ -0,0 +1 @@ +Update pypi installation docs to indicate that we now support Python 3.10. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index fe657a15dfa6..69ade036c39d 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -194,7 +194,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.7 or later, up to Python 3.9. +- Python 3.7 or later, up to Python 3.10. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org To install the Synapse homeserver run: From fd65139714433763e8ef5be6017ab6ba5988e766 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 27 Jan 2022 11:06:29 +0100 Subject: [PATCH 168/474] Fix some indentation inconsistencies in the sample config (modules) (#11838) --- changelog.d/11838.misc | 1 + docs/sample_config.yaml | 10 +++++----- synapse/config/modules.py | 10 +++++----- 3 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11838.misc diff --git a/changelog.d/11838.misc b/changelog.d/11838.misc new file mode 100644 index 000000000000..4747bbe88b0d --- /dev/null +++ b/changelog.d/11838.misc @@ -0,0 +1 @@ +Fix some indentation inconsistencies in the sample config. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b38e6d6c88da..abf28e449089 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -41,11 +41,11 @@ # documentation on how to configure or create custom modules for Synapse. # modules: - # - module: my_super_module.MySuperClass - # config: - # do_thing: true - # - module: my_other_super_module.SomeClass - # config: {} + #- module: my_super_module.MySuperClass + # config: + # do_thing: true + #- module: my_other_super_module.SomeClass + # config: {} ## Server ## diff --git a/synapse/config/modules.py b/synapse/config/modules.py index 85fb05890d7c..2ef02b8f5566 100644 --- a/synapse/config/modules.py +++ b/synapse/config/modules.py @@ -41,9 +41,9 @@ def generate_config_section(self, **kwargs): # documentation on how to configure or create custom modules for Synapse. # modules: - # - module: my_super_module.MySuperClass - # config: - # do_thing: true - # - module: my_other_super_module.SomeClass - # config: {} + #- module: my_super_module.MySuperClass + # config: + # do_thing: true + #- module: my_other_super_module.SomeClass + # config: {} """ From 57e4786e907c390502f4ec6fb915e24cf5124351 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Jan 2022 10:54:27 +0000 Subject: [PATCH 169/474] Create singletons for `StateFilter.{all,none}()` (#11836) No point recreating these for each call, since they are frozen --- changelog.d/11836.misc | 1 + synapse/storage/state.py | 14 +++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11836.misc diff --git a/changelog.d/11836.misc b/changelog.d/11836.misc new file mode 100644 index 000000000000..be7e331c639c --- /dev/null +++ b/changelog.d/11836.misc @@ -0,0 +1 @@ +Minor performance improvement in room state lookup. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index df8b2f108877..913448f0f95e 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -74,21 +74,21 @@ def __attrs_post_init__(self): @staticmethod def all() -> "StateFilter": - """Creates a filter that fetches everything. + """Returns a filter that fetches everything. Returns: - The new state filter. + The state filter. """ - return StateFilter(types=frozendict(), include_others=True) + return _ALL_STATE_FILTER @staticmethod def none() -> "StateFilter": - """Creates a filter that fetches nothing. + """Returns a filter that fetches nothing. Returns: The new state filter. """ - return StateFilter(types=frozendict(), include_others=False) + return _NONE_STATE_FILTER @staticmethod def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter": @@ -527,6 +527,10 @@ def approx_difference(self, other: "StateFilter") -> "StateFilter": ) +_ALL_STATE_FILTER = StateFilter(types=frozendict(), include_others=True) +_NONE_STATE_FILTER = StateFilter(types=frozendict(), include_others=False) + + class StateGroupStorage: """High level interface to fetching state for event.""" From 6d482ba259a55947678390c06b24a7ba91f0ebab Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 27 Jan 2022 17:45:39 +0000 Subject: [PATCH 170/474] Pass `isolation_level` to `runWithConnection` (#11847) This was missed in https://github.com/matrix-org/synapse/pull/11799 --- changelog.d/11847.misc | 1 + synapse/storage/database.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/11847.misc diff --git a/changelog.d/11847.misc b/changelog.d/11847.misc new file mode 100644 index 000000000000..5c3b2bcaf4e5 --- /dev/null +++ b/changelog.d/11847.misc @@ -0,0 +1 @@ +Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 7455326ed331..99802228c9f7 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -748,6 +748,7 @@ async def runInteraction( func, *args, db_autocommit=db_autocommit, + isolation_level=isolation_level, **kwargs, ) From bf60da1a60096fac5fb778b732ff2214862ac808 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 28 Jan 2022 14:41:33 +0000 Subject: [PATCH 171/474] Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19 --- changelog.d/11846.feature | 1 + docs/sample_config.yaml | 14 +++ synapse/config/server.py | 27 ++++++ synapse/handlers/profile.py | 67 +++++++++++++ synapse/handlers/room_member.py | 6 ++ tests/handlers/test_profile.py | 94 +++++++++++++++++- tests/rest/client/test_profile.py | 156 ++++++++++++++++++++++++++++++ 7 files changed, 363 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11846.feature diff --git a/changelog.d/11846.feature b/changelog.d/11846.feature new file mode 100644 index 000000000000..fcf6affdb53d --- /dev/null +++ b/changelog.d/11846.feature @@ -0,0 +1 @@ +Allow configuring a maximum file size as well as a list of allowed content types for avatars. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index abf28e449089..689b207fc0c0 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -471,6 +471,20 @@ limit_remote_rooms: # #allow_per_room_profiles: false +# The largest allowed file size for a user avatar. Defaults to no restriction. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's media repository. +# +#max_avatar_size: 10M + +# The MIME types allowed for user avatars. Defaults to no restriction. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's media repository. +# +#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # diff --git a/synapse/config/server.py b/synapse/config/server.py index f200d0c1f1cf..a460cf25b42f 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -489,6 +489,19 @@ def read_config(self, config, **kwargs): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + # The maximum size an avatar can have, in bytes. + self.max_avatar_size = config.get("max_avatar_size") + if self.max_avatar_size is not None: + self.max_avatar_size = self.parse_size(self.max_avatar_size) + + # The MIME types allowed for an avatar. + self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes") + if self.allowed_avatar_mimetypes and not isinstance( + self.allowed_avatar_mimetypes, + list, + ): + raise ConfigError("allowed_avatar_mimetypes must be a list") + self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])] # no_tls is not really supported any more, but let's grandfather it in @@ -1168,6 +1181,20 @@ def generate_config_section( # #allow_per_room_profiles: false + # The largest allowed file size for a user avatar. Defaults to no restriction. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's media repository. + # + #max_avatar_size: 10M + + # The MIME types allowed for user avatars. Defaults to no restriction. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's media repository. + # + #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 6b5a6ded8b29..36e3ad2ba971 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -31,6 +31,8 @@ create_requester, get_domain_from_id, ) +from synapse.util.caches.descriptors import cached +from synapse.util.stringutils import parse_and_validate_mxc_uri if TYPE_CHECKING: from synapse.server import HomeServer @@ -64,6 +66,11 @@ def __init__(self, hs: "HomeServer"): self.user_directory_handler = hs.get_user_directory_handler() self.request_ratelimiter = hs.get_request_ratelimiter() + self.max_avatar_size = hs.config.server.max_avatar_size + self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes + + self.server_name = hs.config.server.server_name + if hs.config.worker.run_background_tasks: self.clock.looping_call( self._update_remote_profile_cache, self.PROFILE_UPDATE_MS @@ -286,6 +293,9 @@ async def set_avatar_url( 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) ) + if not await self.check_avatar_size_and_mime_type(new_avatar_url): + raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN) + avatar_url_to_set: Optional[str] = new_avatar_url if new_avatar_url == "": avatar_url_to_set = None @@ -307,6 +317,63 @@ async def set_avatar_url( await self._update_join_states(requester, target_user) + @cached() + async def check_avatar_size_and_mime_type(self, mxc: str) -> bool: + """Check that the size and content type of the avatar at the given MXC URI are + within the configured limits. + + Args: + mxc: The MXC URI at which the avatar can be found. + + Returns: + A boolean indicating whether the file can be allowed to be set as an avatar. + """ + if not self.max_avatar_size and not self.allowed_avatar_mimetypes: + return True + + server_name, _, media_id = parse_and_validate_mxc_uri(mxc) + + if server_name == self.server_name: + media_info = await self.store.get_local_media(media_id) + else: + media_info = await self.store.get_cached_remote_media(server_name, media_id) + + if media_info is None: + # Both configuration options need to access the file's metadata, and + # retrieving remote avatars just for this becomes a bit of a faff, especially + # if e.g. the file is too big. It's also generally safe to assume most files + # used as avatar are uploaded locally, or if the upload didn't happen as part + # of a PUT request on /avatar_url that the file was at least previewed by the + # user locally (and therefore downloaded to the remote media cache). + logger.warning("Forbidding avatar change to %s: avatar not on server", mxc) + return False + + if self.max_avatar_size: + # Ensure avatar does not exceed max allowed avatar size + if media_info["media_length"] > self.max_avatar_size: + logger.warning( + "Forbidding avatar change to %s: %d bytes is above the allowed size " + "limit", + mxc, + media_info["media_length"], + ) + return False + + if self.allowed_avatar_mimetypes: + # Ensure the avatar's file type is allowed + if ( + self.allowed_avatar_mimetypes + and media_info["media_type"] not in self.allowed_avatar_mimetypes + ): + logger.warning( + "Forbidding avatar change to %s: mimetype %s not allowed", + mxc, + media_info["media_type"], + ) + return False + + return True + async def on_profile_query(self, args: JsonDict) -> JsonDict: """Handles federation profile query requests.""" diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 6aa910dd10f9..3dd5e1b6e4dc 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -590,6 +590,12 @@ async def update_membership_locked( errcode=Codes.BAD_JSON, ) + if "avatar_url" in content: + if not await self.profile_handler.check_avatar_size_and_mime_type( + content["avatar_url"], + ): + raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN) + # The event content should *not* include the authorising user as # it won't be properly signed. Strip it out since it might come # back from a client updating a display name / avatar. diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index c153018fd81b..60235e569987 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -11,12 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Any, Dict from unittest.mock import Mock import synapse.types from synapse.api.errors import AuthError, SynapseError from synapse.rest import admin +from synapse.server import HomeServer from synapse.types import UserID from tests import unittest @@ -46,7 +47,7 @@ def register_query_handler(query_type, handler): ) return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor, clock, hs: HomeServer): self.store = hs.get_datastore() self.frank = UserID.from_string("@1234abcd:test") @@ -248,3 +249,92 @@ def test_set_my_avatar_if_disabled(self): ), SynapseError, ) + + def test_avatar_constraints_no_config(self): + """Tests that the method to check an avatar against configured constraints skips + all of its check if no constraint is configured. + """ + # The first check that's done by this method is whether the file exists; if we + # don't get an error on a non-existing file then it means all of the checks were + # successfully skipped. + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/unknown_file") + ) + self.assertTrue(res) + + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_constraints_missing(self): + """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't + be found. + """ + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/unknown_file") + ) + self.assertFalse(res) + + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_constraints_file_size(self): + """Tests that a file that's above the allowed file size is forbidden but one + that's below it is allowed. + """ + self._setup_local_files( + { + "small": {"size": 40}, + "big": {"size": 60}, + } + ) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/small") + ) + self.assertTrue(res) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/big") + ) + self.assertFalse(res) + + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_avatar_constraint_mime_type(self): + """Tests that a file with an unauthorised MIME type is forbidden but one with + an authorised content type is allowed. + """ + self._setup_local_files( + { + "good": {"mimetype": "image/png"}, + "bad": {"mimetype": "application/octet-stream"}, + } + ) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/good") + ) + self.assertTrue(res) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/bad") + ) + self.assertFalse(res) + + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): + """Stores metadata about files in the database. + + Args: + names_and_props: A dictionary with one entry per file, with the key being the + file's name, and the value being a dictionary of properties. Supported + properties are "mimetype" (for the file's type) and "size" (for the + file's size). + """ + store = self.hs.get_datastore() + + for name, props in names_and_props.items(): + self.get_success( + store.store_local_media( + media_id=name, + media_type=props.get("mimetype", "image/png"), + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=props.get("size", 50), + user_id=UserID.from_string("@rin:test"), + ) + ) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 2860579c2e54..ead883ded886 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -13,8 +13,12 @@ # limitations under the License. """Tests REST events for /profile paths.""" +from typing import Any, Dict + +from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import login, profile, room +from synapse.types import UserID from tests import unittest @@ -25,6 +29,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): admin.register_servlets_for_client_rest_resource, login.register_servlets, profile.register_servlets, + room.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -150,6 +155,157 @@ def _get_avatar_url(self, name=None): self.assertEqual(channel.code, 200, channel.result) return channel.json_body.get("avatar_url") + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_size_limit_global(self): + """Tests that the maximum size limit for avatars is enforced when updating a + global profile. + """ + self._setup_local_files( + { + "small": {"size": 40}, + "big": {"size": 60}, + } + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/big"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/small"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_size_limit_per_room(self): + """Tests that the maximum size limit for avatars is enforced when updating a + per-room profile. + """ + self._setup_local_files( + { + "small": {"size": 40}, + "big": {"size": 60}, + } + ) + + room_id = self.helper.create_room_as(tok=self.owner_tok) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/big"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/small"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_avatar_allowed_mime_type_global(self): + """Tests that the MIME type whitelist for avatars is enforced when updating a + global profile. + """ + self._setup_local_files( + { + "good": {"mimetype": "image/png"}, + "bad": {"mimetype": "application/octet-stream"}, + } + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/bad"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/good"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_avatar_allowed_mime_type_per_room(self): + """Tests that the MIME type whitelist for avatars is enforced when updating a + per-room profile. + """ + self._setup_local_files( + { + "good": {"mimetype": "image/png"}, + "bad": {"mimetype": "application/octet-stream"}, + } + ) + + room_id = self.helper.create_room_as(tok=self.owner_tok) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/bad"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/good"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): + """Stores metadata about files in the database. + + Args: + names_and_props: A dictionary with one entry per file, with the key being the + file's name, and the value being a dictionary of properties. Supported + properties are "mimetype" (for the file's type) and "size" (for the + file's size). + """ + store = self.hs.get_datastore() + + for name, props in names_and_props.items(): + self.get_success( + store.store_local_media( + media_id=name, + media_type=props.get("mimetype", "image/png"), + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=props.get("size", 50), + user_id=UserID.from_string("@rin:test"), + ) + ) + class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): From 7eb198ddc8f6dd7c6aa4984656db995de4358158 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 31 Jan 2022 15:40:20 +0100 Subject: [PATCH 172/474] Remove not needed old table of contents in documentation (#11860) --- changelog.d/11860.doc | 1 + docs/MSC1711_certificates_FAQ.md | 21 --------------------- docs/admin_api/media_admin_api.md | 17 ----------------- docs/admin_api/rooms.md | 15 --------------- 4 files changed, 1 insertion(+), 53 deletions(-) create mode 100644 changelog.d/11860.doc diff --git a/changelog.d/11860.doc b/changelog.d/11860.doc new file mode 100644 index 000000000000..04b88c5f2c30 --- /dev/null +++ b/changelog.d/11860.doc @@ -0,0 +1 @@ +Remove not needed old table of contents in documentation. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 086899a9d836..32ba15652d1a 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -44,27 +44,6 @@ For more details and context on the release of the r0.1 Server/Server API and imminent Matrix 1.0 release, you can also see our [main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/). -## Contents -* Timeline -* Configuring certificates for compatibility with Synapse 1.0 -* FAQ - * Synapse 0.99.0 has just been released, what do I need to do right now? - * How do I upgrade? - * What will happen if I do not set up a valid federation certificate - immediately? - * What will happen if I do nothing at all? - * When do I need a SRV record or .well-known URI? - * Can I still use an SRV record? - * I have created a .well-known URI. Do I still need an SRV record? - * It used to work just fine, why are you breaking everything? - * Can I manage my own certificates rather than having Synapse renew - certificates itself? - * Do you still recommend against using a reverse proxy on the federation port? - * Do I still need to give my TLS certificates to Synapse if I am using a - reverse proxy? - * Do I need the same certificate for the client and federation port? - * How do I tell Synapse to reload my keys/certificates after I replace them? - ## Timeline **5th Feb 2019 - Synapse 0.99.0 is released.** diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 60b8bc7379a2..2c44e1f758d8 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -1,20 +1,3 @@ -# Contents -- [Querying media](#querying-media) - * [List all media in a room](#list-all-media-in-a-room) - * [List all media uploaded by a user](#list-all-media-uploaded-by-a-user) -- [Quarantine media](#quarantine-media) - * [Quarantining media by ID](#quarantining-media-by-id) - * [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id) - * [Quarantining media in a room](#quarantining-media-in-a-room) - * [Quarantining all media of a user](#quarantining-all-media-of-a-user) - * [Protecting media from being quarantined](#protecting-media-from-being-quarantined) - * [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined) -- [Delete local media](#delete-local-media) - * [Delete a specific local media](#delete-a-specific-local-media) - * [Delete local media by date or size](#delete-local-media-by-date-or-size) - * [Delete media uploaded by a user](#delete-media-uploaded-by-a-user) -- [Purge Remote Media API](#purge-remote-media-api) - # Querying media These APIs allow extracting media information from the homeserver. diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 0f1a74134ff9..daf2522f6ab9 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -1,18 +1,3 @@ -# Contents -- [List Room API](#list-room-api) -- [Room Details API](#room-details-api) -- [Room Members API](#room-members-api) -- [Room State API](#room-state-api) -- [Block Room API](#block-room-api) -- [Delete Room API](#delete-room-api) - * [Version 1 (old version)](#version-1-old-version) - * [Version 2 (new version)](#version-2-new-version) - * [Status of deleting rooms](#status-of-deleting-rooms) - * [Undoing room shutdowns](#undoing-room-shutdowns) -- [Make Room Admin API](#make-room-admin-api) -- [Forward Extremities Admin API](#forward-extremities-admin-api) -- [Event Context API](#event-context-api) - # List Room API The List Room admin API allows server admins to get a list of rooms on their From 02755c31882b75ae6e71d9033ed1e72d0fb7c00b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 31 Jan 2022 10:13:32 -0500 Subject: [PATCH 173/474] Remove the obsolete MSC1849 configuration flag. (#11843) MSC1849 was replaced by MSC2675, which was merged. The configuration flag, which defaulted to true, is no longer useful. --- changelog.d/11843.removal | 1 + synapse/config/experimental.py | 2 -- synapse/storage/databases/main/relations.py | 4 ---- 3 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 changelog.d/11843.removal diff --git a/changelog.d/11843.removal b/changelog.d/11843.removal new file mode 100644 index 000000000000..d6d49b4ad50b --- /dev/null +++ b/changelog.d/11843.removal @@ -0,0 +1 @@ +Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index dbaeb1091861..65c807a19af7 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -24,8 +24,6 @@ class ExperimentalConfig(Config): def read_config(self, config: JsonDict, **kwargs): experimental = config.get("experimental_features") or {} - # Whether to enable experimental MSC1849 (aka relations) support - self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True) # MSC3440 (thread relation) self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index a9a5dd5f03b6..37468a518381 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -75,7 +75,6 @@ def __init__( ): super().__init__(database, db_conn, hs) - self._msc1849_enabled = hs.config.experimental.msc1849_enabled self._msc3440_enabled = hs.config.experimental.msc3440_enabled @cached(tree=True) @@ -683,9 +682,6 @@ async def get_bundled_aggregations( A map of event ID to the bundled aggregation for the event. Not all events may have bundled aggregations in the results. """ - # If bundled aggregations are disabled, nothing to do. - if not self._msc1849_enabled: - return {} # TODO Parallelize. results = {} From 0da2301b21399b85a955d92dc355024a67b8cb40 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 31 Jan 2022 17:24:29 +0100 Subject: [PATCH 174/474] Consolidate the `access_token` information in the admin api (#11861) Co-authored-by: reivilibre --- changelog.d/11861.doc | 1 + docs/admin_api/account_validity.md | 3 ++ docs/admin_api/delete_group.md | 6 +-- docs/admin_api/event_reports.md | 7 ++- docs/admin_api/media_admin_api.md | 8 ++-- docs/admin_api/purge_history_api.md | 9 ++-- docs/admin_api/room_membership.md | 6 +-- docs/admin_api/rooms.md | 6 +-- docs/admin_api/statistics.md | 6 +-- docs/admin_api/user_admin_api.md | 73 ++--------------------------- 10 files changed, 30 insertions(+), 95 deletions(-) create mode 100644 changelog.d/11861.doc diff --git a/changelog.d/11861.doc b/changelog.d/11861.doc new file mode 100644 index 000000000000..53c75a088378 --- /dev/null +++ b/changelog.d/11861.doc @@ -0,0 +1 @@ +Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md index b74b5d0c1a96..d878bf7451e3 100644 --- a/docs/admin_api/account_validity.md +++ b/docs/admin_api/account_validity.md @@ -4,6 +4,9 @@ This API allows a server administrator to manage the validity of an account. To use it, you must enable the account validity feature (under `account_validity`) in Synapse's configuration. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## Renew account This API extends the validity of an account by as much time as configured in the diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md index 2e0a1d24741f..73a96842ac72 100644 --- a/docs/admin_api/delete_group.md +++ b/docs/admin_api/delete_group.md @@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all users out of the group so that their clients will correctly handle the group being deleted. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The API is: ``` POST /_synapse/admin/v1/delete_group/ ``` - -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index f523774ba8e3..be6f0961bfcb 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -2,12 +2,13 @@ This API returns information about reported events. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The api is: ``` GET /_synapse/admin/v1/event_reports?from=0&limit=10 ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). It returns a JSON body like the following: @@ -94,8 +95,6 @@ The api is: ``` GET /_synapse/admin/v1/event_reports/ ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). It returns a JSON body like the following: diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 2c44e1f758d8..a8cdf1972726 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -2,6 +2,9 @@ These APIs allow extracting media information from the homeserver. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## List all media in a room This API gets a list of known media in a room. @@ -11,8 +14,6 @@ The API is: ``` GET /_synapse/admin/v1/room//media ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). The API returns a JSON body like the following: ```json @@ -300,8 +301,5 @@ The following fields are returned in the JSON response body: * `deleted`: integer - The number of media items successfully deleted -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). - If the user re-requests purged remote media, synapse will re-request the media from the originating server. diff --git a/docs/admin_api/purge_history_api.md b/docs/admin_api/purge_history_api.md index 277e28d9cbcb..2527e2758ba3 100644 --- a/docs/admin_api/purge_history_api.md +++ b/docs/admin_api/purge_history_api.md @@ -10,15 +10,15 @@ paginate further back in the room from the point being purged from. Note that Synapse requires at least one message in each room, so it will never delete the last message in a room. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The API is: ``` POST /_synapse/admin/v1/purge_history/[/] ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - By default, events sent by local users are not deleted, as they may represent the only copies of this content in existence. (Events sent by remote users are deleted.) @@ -57,9 +57,6 @@ It is possible to poll for updates on recent purges with a second API; GET /_synapse/admin/v1/purge_history_status/ ``` -Again, you will need to authenticate by providing an `access_token` for a -server admin. - This API returns a JSON body like the following: ```json diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md index 548b790a5c0c..310d6ae628fa 100644 --- a/docs/admin_api/room_membership.md +++ b/docs/admin_api/room_membership.md @@ -5,6 +5,9 @@ to a room with a given `room_id_or_alias`. You can only modify the membership of local users. The server administrator must be in the room and have permission to invite users. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## Parameters The following parameters are available: @@ -23,9 +26,6 @@ POST /_synapse/admin/v1/join/ } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). - Response: ```json diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index daf2522f6ab9..d4873f94905f 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -4,6 +4,9 @@ The List Room admin API allows server admins to get a list of rooms on their server. There are various parameters available that allow for filtering and sorting the returned list. This API supports pagination. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + **Parameters** The following query parameters are available: @@ -478,9 +481,6 @@ several minutes or longer. The local server will only have the power to move local user and room aliases to the new room. Users on other servers will be unaffected. -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see [Admin API](../usage/administration/admin_api). - ## Version 1 (old version) This version works synchronously. That means you only get the response once the server has diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md index 1901f1eea025..a26c76f9f317 100644 --- a/docs/admin_api/statistics.md +++ b/docs/admin_api/statistics.md @@ -3,15 +3,15 @@ Returns information about all local media usage of users. Gives the possibility to filter them by time and user. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The API is: ``` GET /_synapse/admin/v1/statistics/users/media ``` -To use it, you will need to authenticate by providing an `access_token` -for a server admin: see [Admin API](../usage/administration/admin_api). - A response body like the following is returned: ```json diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index fdc1f2d1cfd1..4f5f377b3808 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1,5 +1,8 @@ # User Admin API +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## Query User Account This API returns information about a specific user account. @@ -10,9 +13,6 @@ The api is: GET /_synapse/admin/v2/users/ ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - It returns a JSON body like the following: ```jsonc @@ -104,9 +104,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - Returns HTTP status code: - `201` - When a new user object was created. - `200` - When a user was modified. @@ -156,9 +153,6 @@ By default, the response is ordered by ascending user ID. GET /_synapse/admin/v2/users?from=0&limit=10&guests=false ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -278,9 +272,6 @@ GET /_matrix/client/r0/admin/whois/ See also: [Client Server API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - It returns a JSON body like the following: ```json @@ -335,9 +326,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - The erase parameter is optional and defaults to `false`. An empty body may be passed for backwards compatibility. @@ -394,9 +382,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - The parameter `new_password` is required. The parameter `logout_devices` is optional and defaults to `true`. @@ -409,9 +394,6 @@ The api is: GET /_synapse/admin/v1/users//admin ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -439,10 +421,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - - ## List room memberships of a user Gets a list of all `room_id` that a specific `user_id` is member. @@ -453,9 +431,6 @@ The API is: GET /_synapse/admin/v1/users//joined_rooms ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -574,9 +549,6 @@ The API is: GET /_synapse/admin/v1/users//media ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -691,9 +663,6 @@ The API is: DELETE /_synapse/admin/v1/users//media ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -766,9 +735,6 @@ The API is: GET /_synapse/admin/v2/users//devices ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -834,9 +800,6 @@ POST /_synapse/admin/v2/users//delete_devices } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. **Parameters** @@ -858,9 +821,6 @@ The API is: GET /_synapse/admin/v2/users//devices/ ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -906,9 +866,6 @@ PUT /_synapse/admin/v2/users//devices/ } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. **Parameters** @@ -935,9 +892,6 @@ DELETE /_synapse/admin/v2/users//devices/ {} ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. **Parameters** @@ -956,9 +910,6 @@ The API is: GET /_synapse/admin/v1/users//pushers ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -1053,9 +1004,6 @@ To un-shadow-ban a user the API is: DELETE /_synapse/admin/v1/users//shadow_ban ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned in both cases. **Parameters** @@ -1078,9 +1026,6 @@ The API is: GET /_synapse/admin/v1/users//override_ratelimit ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -1120,9 +1065,6 @@ The API is: POST /_synapse/admin/v1/users//override_ratelimit ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -1165,9 +1107,6 @@ The API is: DELETE /_synapse/admin/v1/users//override_ratelimit ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. ```json @@ -1196,7 +1135,5 @@ The API is: GET /_synapse/admin/v1/username_available?username=$localpart ``` -The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API. - -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) +The request and response format is the same as the +[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API. From 901b264c0c88f39cbfb8b2229e0dc57968882658 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 31 Jan 2022 20:20:05 +0100 Subject: [PATCH 175/474] Add type hints to `tests/rest/admin` (#11851) --- changelog.d/11851.misc | 1 + mypy.ini | 3 - tests/rest/admin/test_admin.py | 134 +++------- tests/rest/admin/test_user.py | 262 ++++++++++---------- tests/rest/admin/test_username_available.py | 16 +- 5 files changed, 184 insertions(+), 232 deletions(-) create mode 100644 changelog.d/11851.misc diff --git a/changelog.d/11851.misc b/changelog.d/11851.misc new file mode 100644 index 000000000000..ccc3ec3482b1 --- /dev/null +++ b/changelog.d/11851.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/admin`. diff --git a/mypy.ini b/mypy.ini index 85fa22d28f2b..2884078d0ad3 100644 --- a/mypy.ini +++ b/mypy.ini @@ -77,9 +77,6 @@ exclude = (?x) |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py - |tests/rest/admin/test_admin.py - |tests/rest/admin/test_user.py - |tests/rest/admin/test_username_available.py |tests/rest/client/test_account.py |tests/rest/client/test_events.py |tests/rest/client/test_filter.py diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 3adadcb46bc4..849d00ab4d94 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -12,18 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import urllib.parse from http import HTTPStatus -from unittest.mock import Mock +from typing import List -from twisted.internet.defer import Deferred +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.http.server import JsonResource -from synapse.logging.context import make_deferred_yieldable from synapse.rest.admin import VersionServlet from synapse.rest.client import groups, login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request @@ -33,12 +35,12 @@ class VersionTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v1/server_version" - def create_test_resource(self): + def create_test_resource(self) -> JsonResource: resource = JsonResource(self.hs) VersionServlet(self.hs).register(resource) return resource - def test_version_string(self): + def test_version_string(self) -> None: channel = self.make_request("GET", self.url, shorthand=False) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) @@ -54,14 +56,14 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): groups.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") self.other_user_token = self.login("user", "pass") - def test_delete_group(self): + def test_delete_group(self) -> None: # Create a new group channel = self.make_request( "POST", @@ -112,7 +114,7 @@ def test_delete_group(self): self.assertNotIn(group_id, self._get_groups_user_is_in(self.admin_user_tok)) self.assertNotIn(group_id, self._get_groups_user_is_in(self.other_user_token)) - def _check_group(self, group_id, expect_code): + def _check_group(self, group_id: str, expect_code: int) -> None: """Assert that trying to fetch the given group results in the given HTTP status code """ @@ -124,7 +126,7 @@ def _check_group(self, group_id, expect_code): self.assertEqual(expect_code, channel.code, msg=channel.json_body) - def _get_groups_user_is_in(self, access_token): + def _get_groups_user_is_in(self, access_token: str) -> List[str]: """Returns the list of groups the user is in (given their access token)""" channel = self.make_request("GET", b"/joined_groups", access_token=access_token) @@ -143,59 +145,15 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Allow for uploading and downloading to/from the media repo self.media_repo = hs.get_media_repository_resource() self.download_resource = self.media_repo.children[b"download"] self.upload_resource = self.media_repo.children[b"upload"] - def make_homeserver(self, reactor, clock): - - self.fetches = [] - - async def get_file(destination, path, output_stream, args=None, max_size=None): - """ - Returns tuple[int,dict,str,int] of file length, response headers, - absolute URI, and response code. - """ - - def write_to(r): - data, response = r - output_stream.write(data) - return response - - d = Deferred() - d.addCallback(write_to) - self.fetches.append((d, destination, path, args)) - return await make_deferred_yieldable(d) - - client = Mock() - client.get_file = get_file - - self.storage_path = self.mktemp() - self.media_store_path = self.mktemp() - os.mkdir(self.storage_path) - os.mkdir(self.media_store_path) - - config = self.default_config() - config["media_store_path"] = self.media_store_path - config["thumbnail_requirements"] = {} - config["max_image_pixels"] = 2000000 - - provider_config = { - "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", - "store_local": True, - "store_synchronous": False, - "store_remote": True, - "config": {"directory": self.storage_path}, - } - config["media_storage_providers"] = [provider_config] - - hs = self.setup_test_homeserver(config=config, federation_http_client=client) - - return hs - - def _ensure_quarantined(self, admin_user_tok, server_and_media_id): + def _ensure_quarantined( + self, admin_user_tok: str, server_and_media_id: str + ) -> None: """Ensure a piece of media is quarantined when trying to access it.""" channel = make_request( self.reactor, @@ -216,12 +174,18 @@ def _ensure_quarantined(self, admin_user_tok, server_and_media_id): ), ) - def test_quarantine_media_requires_admin(self): + @parameterized.expand( + [ + # Attempt quarantine media APIs as non-admin + "/_synapse/admin/v1/media/quarantine/example.org/abcde12345", + # And the roomID/userID endpoint + "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine", + ] + ) + def test_quarantine_media_requires_admin(self, url: str) -> None: self.register_user("nonadmin", "pass", admin=False) non_admin_user_tok = self.login("nonadmin", "pass") - # Attempt quarantine media APIs as non-admin - url = "/_synapse/admin/v1/media/quarantine/example.org/abcde12345" channel = self.make_request( "POST", url.encode("ascii"), @@ -235,22 +199,7 @@ def test_quarantine_media_requires_admin(self): msg="Expected forbidden on quarantining media as a non-admin", ) - # And the roomID/userID endpoint - url = "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine" - channel = self.make_request( - "POST", - url.encode("ascii"), - access_token=non_admin_user_tok, - ) - - # Expect a forbidden error - self.assertEqual( - HTTPStatus.FORBIDDEN, - channel.code, - msg="Expected forbidden on quarantining media as a non-admin", - ) - - def test_quarantine_media_by_id(self): + def test_quarantine_media_by_id(self) -> None: self.register_user("id_admin", "pass", admin=True) admin_user_tok = self.login("id_admin", "pass") @@ -295,7 +244,15 @@ def test_quarantine_media_by_id(self): # Attempt to access the media self._ensure_quarantined(admin_user_tok, server_name_and_media_id) - def test_quarantine_all_media_in_room(self, override_url_template=None): + @parameterized.expand( + [ + # regular API path + "/_synapse/admin/v1/room/%s/media/quarantine", + # deprecated API path + "/_synapse/admin/v1/quarantine_media/%s", + ] + ) + def test_quarantine_all_media_in_room(self, url: str) -> None: self.register_user("room_admin", "pass", admin=True) admin_user_tok = self.login("room_admin", "pass") @@ -333,16 +290,9 @@ def test_quarantine_all_media_in_room(self, override_url_template=None): tok=non_admin_user_tok, ) - # Quarantine all media in the room - if override_url_template: - url = override_url_template % urllib.parse.quote(room_id) - else: - url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote( - room_id - ) channel = self.make_request( "POST", - url, + url % urllib.parse.quote(room_id), access_token=admin_user_tok, ) self.pump(1.0) @@ -359,11 +309,7 @@ def test_quarantine_all_media_in_room(self, override_url_template=None): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) self._ensure_quarantined(admin_user_tok, server_and_media_id_2) - def test_quarantine_all_media_in_room_deprecated_api_path(self): - # Perform the above test with the deprecated API path - self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s") - - def test_quarantine_all_media_by_user(self): + def test_quarantine_all_media_by_user(self) -> None: self.register_user("user_admin", "pass", admin=True) admin_user_tok = self.login("user_admin", "pass") @@ -401,7 +347,7 @@ def test_quarantine_all_media_by_user(self): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) self._ensure_quarantined(admin_user_tok, server_and_media_id_2) - def test_cannot_quarantine_safe_media(self): + def test_cannot_quarantine_safe_media(self) -> None: self.register_user("user_admin", "pass", admin=True) admin_user_tok = self.login("user_admin", "pass") @@ -475,7 +421,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -488,7 +434,7 @@ def prepare(self, reactor, clock, hs): self.url = f"/_synapse/admin/v1/purge_history/{self.room_id}" self.url_status = "/_synapse/admin/v1/purge_history_status/" - def test_purge_history(self): + def test_purge_history(self) -> None: """ Simple test of purge history API. Test only that is is possible to call, get status HTTPStatus.OK and purge_id. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 9711405735db..272637e96575 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -23,13 +23,17 @@ from parameterized import parameterized, parameterized_class +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions from synapse.rest.client import devices, login, logout, profile, room, sync from synapse.rest.media.v1.filepath import MediaFilePaths +from synapse.server import HomeServer from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request @@ -44,7 +48,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): profile.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.url = "/_synapse/admin/v1/register" @@ -61,12 +65,12 @@ def make_homeserver(self, reactor, clock): self.hs.config.registration.registration_shared_secret = "shared" - self.hs.get_media_repository = Mock() - self.hs.get_deactivate_account_handler = Mock() + self.hs.get_media_repository = Mock() # type: ignore[assignment] + self.hs.get_deactivate_account_handler = Mock() # type: ignore[assignment] return self.hs - def test_disabled(self): + def test_disabled(self) -> None: """ If there is no shared secret, registration through this method will be prevented. @@ -80,7 +84,7 @@ def test_disabled(self): "Shared secret registration is not enabled", channel.json_body["error"] ) - def test_get_nonce(self): + def test_get_nonce(self) -> None: """ Calling GET on the endpoint will return a randomised nonce, using the homeserver's secrets provider. @@ -93,7 +97,7 @@ def test_get_nonce(self): self.assertEqual(channel.json_body, {"nonce": "abcd"}) - def test_expired_nonce(self): + def test_expired_nonce(self) -> None: """ Calling GET on the endpoint will return a randomised nonce, which will only last for SALT_TIMEOUT (60s). @@ -118,7 +122,7 @@ def test_expired_nonce(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("unrecognised nonce", channel.json_body["error"]) - def test_register_incorrect_nonce(self): + def test_register_incorrect_nonce(self) -> None: """ Only the provided nonce can be used, as it's checked in the MAC. """ @@ -141,7 +145,7 @@ def test_register_incorrect_nonce(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("HMAC incorrect", channel.json_body["error"]) - def test_register_correct_nonce(self): + def test_register_correct_nonce(self) -> None: """ When the correct nonce is provided, and the right key is provided, the user is registered. @@ -168,7 +172,7 @@ def test_register_correct_nonce(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["user_id"]) - def test_nonce_reuse(self): + def test_nonce_reuse(self) -> None: """ A valid unrecognised nonce. """ @@ -197,14 +201,14 @@ def test_nonce_reuse(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("unrecognised nonce", channel.json_body["error"]) - def test_missing_parts(self): + def test_missing_parts(self) -> None: """ Synapse will complain if you don't give nonce, username, password, and mac. Admin and user_types are optional. Additional checks are done for length and type. """ - def nonce(): + def nonce() -> str: channel = self.make_request("GET", self.url) return channel.json_body["nonce"] @@ -297,7 +301,7 @@ def nonce(): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Invalid user type", channel.json_body["error"]) - def test_displayname(self): + def test_displayname(self) -> None: """ Test that displayname of new user is set """ @@ -400,7 +404,7 @@ def test_displayname(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) - def test_register_mau_limit_reached(self): + def test_register_mau_limit_reached(self) -> None: """ Check we can register a user via the shared secret registration API even if the MAU limit is reached. @@ -450,13 +454,13 @@ class UsersListTestCase(unittest.HomeserverTestCase): ] url = "/_synapse/admin/v2/users" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to list users without authentication. """ @@ -465,7 +469,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -477,7 +481,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_all_users(self): + def test_all_users(self) -> None: """ List all users, including deactivated users. """ @@ -497,7 +501,7 @@ def test_all_users(self): # Check that all fields are available self._check_fields(channel.json_body["users"]) - def test_search_term(self): + def test_search_term(self) -> None: """Test that searching for a users works correctly""" def _search_test( @@ -505,7 +509,7 @@ def _search_test( search_term: str, search_field: Optional[str] = "name", expected_http_code: Optional[int] = HTTPStatus.OK, - ): + ) -> None: """Search for a user and check that the returned user's id is a match Args: @@ -575,7 +579,7 @@ def _search_test( _search_test(None, "foo", "user_id") _search_test(None, "bar", "user_id") - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -640,7 +644,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - def test_limit(self): + def test_limit(self) -> None: """ Testing list of users with limit """ @@ -661,7 +665,7 @@ def test_limit(self): self.assertEqual(channel.json_body["next_token"], "5") self._check_fields(channel.json_body["users"]) - def test_from(self): + def test_from(self) -> None: """ Testing list of users with a defined starting point (from) """ @@ -682,7 +686,7 @@ def test_from(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["users"]) - def test_limit_and_from(self): + def test_limit_and_from(self) -> None: """ Testing list of users with a defined starting point and limit """ @@ -703,7 +707,7 @@ def test_limit_and_from(self): self.assertEqual(len(channel.json_body["users"]), 10) self._check_fields(channel.json_body["users"]) - def test_next_token(self): + def test_next_token(self) -> None: """ Testing that `next_token` appears at the right place """ @@ -765,7 +769,7 @@ def test_next_token(self): self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) - def test_order_by(self): + def test_order_by(self) -> None: """ Testing order list with parameter `order_by` """ @@ -843,7 +847,7 @@ def _order_test( expected_user_list: List[str], order_by: Optional[str], dir: Optional[str] = None, - ): + ) -> None: """Request the list of users in a certain order. Assert that order is what we expect Args: @@ -870,7 +874,7 @@ def _order_test( self.assertEqual(expected_user_list, returned_order) self._check_fields(channel.json_body["users"]) - def _check_fields(self, content: List[JsonDict]): + def _check_fields(self, content: List[JsonDict]) -> None: """Checks that the expected user attributes are present in content Args: content: List that is checked for content @@ -886,7 +890,7 @@ def _check_fields(self, content: List[JsonDict]): self.assertIn("avatar_url", u) self.assertIn("creation_ts", u) - def _create_users(self, number_users: int): + def _create_users(self, number_users: int) -> None: """ Create a number of users Args: @@ -908,7 +912,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -931,7 +935,7 @@ def prepare(self, reactor, clock, hs): self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0) ) - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to deactivate users without authentication. """ @@ -940,7 +944,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_not_admin(self): + def test_requester_is_not_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -961,7 +965,7 @@ def test_requester_is_not_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that deactivation for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -975,7 +979,7 @@ def test_user_does_not_exist(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - def test_erase_is_not_bool(self): + def test_erase_is_not_bool(self) -> None: """ If parameter `erase` is not boolean, return an error """ @@ -990,7 +994,7 @@ def test_erase_is_not_bool(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that deactivation for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -1001,7 +1005,7 @@ def test_user_is_not_local(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only deactivate local users", channel.json_body["error"]) - def test_deactivate_user_erase_true(self): + def test_deactivate_user_erase_true(self) -> None: """ Test deactivating a user and set `erase` to `true` """ @@ -1046,7 +1050,7 @@ def test_deactivate_user_erase_true(self): self._is_erased("@user:test", True) - def test_deactivate_user_erase_false(self): + def test_deactivate_user_erase_false(self) -> None: """ Test deactivating a user and set `erase` to `false` """ @@ -1091,7 +1095,7 @@ def test_deactivate_user_erase_false(self): self._is_erased("@user:test", False) - def test_deactivate_user_erase_true_no_profile(self): + def test_deactivate_user_erase_true_no_profile(self) -> None: """ Test deactivating a user and set `erase` to `true` if user has no profile information (stored in the database table `profiles`). @@ -1162,7 +1166,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.auth_handler = hs.get_auth_handler() @@ -1185,7 +1189,7 @@ def prepare(self, reactor, clock, hs): self.url_prefix = "/_synapse/admin/v2/users/%s" self.url_other_user = self.url_prefix % self.other_user - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -1210,7 +1214,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -1224,7 +1228,7 @@ def test_user_does_not_exist(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -1319,7 +1323,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) - def test_get_user(self): + def test_get_user(self) -> None: """ Test a simple get of a user. """ @@ -1334,7 +1338,7 @@ def test_get_user(self): self.assertEqual("User", channel.json_body["displayname"]) self._check_fields(channel.json_body) - def test_create_server_admin(self): + def test_create_server_admin(self) -> None: """ Check that a new admin user is created successfully. """ @@ -1383,7 +1387,7 @@ def test_create_server_admin(self): self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"]) self._check_fields(channel.json_body) - def test_create_user(self): + def test_create_user(self) -> None: """ Check that a new regular user is created successfully. """ @@ -1450,7 +1454,7 @@ def test_create_user(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) - def test_create_user_mau_limit_reached_active_admin(self): + def test_create_user_mau_limit_reached_active_admin(self) -> None: """ Check that an admin can register a new user via the admin API even if the MAU limit is reached. @@ -1496,7 +1500,7 @@ def test_create_user_mau_limit_reached_active_admin(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) - def test_create_user_mau_limit_reached_passive_admin(self): + def test_create_user_mau_limit_reached_passive_admin(self) -> None: """ Check that an admin can register a new user via the admin API even if the MAU limit is reached. @@ -1541,7 +1545,7 @@ def test_create_user_mau_limit_reached_passive_admin(self): "public_baseurl": "https://example.com", } ) - def test_create_user_email_notif_for_new_users(self): + def test_create_user_email_notif_for_new_users(self) -> None: """ Check that a new regular user is created successfully and got an email pusher. @@ -1584,7 +1588,7 @@ def test_create_user_email_notif_for_new_users(self): "public_baseurl": "https://example.com", } ) - def test_create_user_email_no_notif_for_new_users(self): + def test_create_user_email_no_notif_for_new_users(self) -> None: """ Check that a new regular user is created successfully and got not an email pusher. @@ -1615,7 +1619,7 @@ def test_create_user_email_no_notif_for_new_users(self): pushers = list(pushers) self.assertEqual(len(pushers), 0) - def test_set_password(self): + def test_set_password(self) -> None: """ Test setting a new password for another user. """ @@ -1631,7 +1635,7 @@ def test_set_password(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self._check_fields(channel.json_body) - def test_set_displayname(self): + def test_set_displayname(self) -> None: """ Test setting the displayname of another user. """ @@ -1659,7 +1663,7 @@ def test_set_displayname(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) - def test_set_threepid(self): + def test_set_threepid(self) -> None: """ Test setting threepid for an other user. """ @@ -1740,7 +1744,7 @@ def test_set_threepid(self): self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) - def test_set_duplicate_threepid(self): + def test_set_duplicate_threepid(self) -> None: """ Test setting the same threepid for a second user. First user loses and second user gets mapping of this threepid. @@ -1827,7 +1831,7 @@ def test_set_duplicate_threepid(self): self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) - def test_set_external_id(self): + def test_set_external_id(self) -> None: """ Test setting external id for an other user. """ @@ -1925,7 +1929,7 @@ def test_set_external_id(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(0, len(channel.json_body["external_ids"])) - def test_set_duplicate_external_id(self): + def test_set_duplicate_external_id(self) -> None: """ Test that setting the same external id for a second user fails and external id from user must not be changed. @@ -2048,7 +2052,7 @@ def test_set_duplicate_external_id(self): ) self._check_fields(channel.json_body) - def test_deactivate_user(self): + def test_deactivate_user(self) -> None: """ Test deactivating another user. """ @@ -2113,7 +2117,7 @@ def test_deactivate_user(self): self.assertNotIn("password_hash", channel.json_body) @override_config({"user_directory": {"enabled": True, "search_all_users": True}}) - def test_change_name_deactivate_user_user_directory(self): + def test_change_name_deactivate_user_user_directory(self) -> None: """ Test change profile information of a deactivated user and check that it does not appear in user directory @@ -2156,7 +2160,7 @@ def test_change_name_deactivate_user_user_directory(self): profile = self.get_success(self.store.get_user_in_directory(self.other_user)) self.assertIsNone(profile) - def test_reactivate_user(self): + def test_reactivate_user(self) -> None: """ Test reactivating another user. """ @@ -2189,7 +2193,7 @@ def test_reactivate_user(self): self.assertNotIn("password_hash", channel.json_body) @override_config({"password_config": {"localdb_enabled": False}}) - def test_reactivate_user_localdb_disabled(self): + def test_reactivate_user_localdb_disabled(self) -> None: """ Test reactivating another user when using SSO. """ @@ -2223,7 +2227,7 @@ def test_reactivate_user_localdb_disabled(self): self.assertNotIn("password_hash", channel.json_body) @override_config({"password_config": {"enabled": False}}) - def test_reactivate_user_password_disabled(self): + def test_reactivate_user_password_disabled(self) -> None: """ Test reactivating another user when using SSO. """ @@ -2256,7 +2260,7 @@ def test_reactivate_user_password_disabled(self): # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", channel.json_body) - def test_set_user_as_admin(self): + def test_set_user_as_admin(self) -> None: """ Test setting the admin flag on a user. """ @@ -2284,7 +2288,7 @@ def test_set_user_as_admin(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["admin"]) - def test_set_user_type(self): + def test_set_user_type(self) -> None: """ Test changing user type. """ @@ -2335,7 +2339,7 @@ def test_set_user_type(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertIsNone(channel.json_body["user_type"]) - def test_accidental_deactivation_prevention(self): + def test_accidental_deactivation_prevention(self) -> None: """ Ensure an account can't accidentally be deactivated by using a str value for the deactivated body parameter @@ -2418,7 +2422,7 @@ def _deactivate_user(self, user_id: str) -> None: # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", channel.json_body) - def _check_fields(self, content: JsonDict): + def _check_fields(self, content: JsonDict) -> None: """Checks that the expected user attributes are present in content Args: @@ -2448,7 +2452,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -2457,7 +2461,7 @@ def prepare(self, reactor, clock, hs): self.other_user ) - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to list rooms of an user without authentication. """ @@ -2466,7 +2470,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -2481,7 +2485,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that a lookup for a user that does not exist returns an empty list """ @@ -2496,7 +2500,7 @@ def test_user_does_not_exist(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that a lookup for a user that is not a local and participates in no conversation returns an empty list """ @@ -2512,7 +2516,7 @@ def test_user_is_not_local(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) - def test_no_memberships(self): + def test_no_memberships(self) -> None: """ Tests that a normal lookup for rooms is successfully if user has no memberships @@ -2528,7 +2532,7 @@ def test_no_memberships(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) - def test_get_rooms(self): + def test_get_rooms(self) -> None: """ Tests that a normal lookup for rooms is successfully """ @@ -2549,7 +2553,7 @@ def test_get_rooms(self): self.assertEqual(number_rooms, channel.json_body["total"]) self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) - def test_get_rooms_with_nonlocal_user(self): + def test_get_rooms_with_nonlocal_user(self) -> None: """ Tests that a normal lookup for rooms is successful with a non-local user """ @@ -2604,7 +2608,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -2615,7 +2619,7 @@ def prepare(self, reactor, clock, hs): self.other_user ) - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to list pushers of an user without authentication. """ @@ -2624,7 +2628,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -2639,7 +2643,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -2653,7 +2657,7 @@ def test_user_does_not_exist(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -2668,7 +2672,7 @@ def test_user_is_not_local(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) - def test_get_pushers(self): + def test_get_pushers(self) -> None: """ Tests that a normal lookup for pushers is successfully """ @@ -2732,7 +2736,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.media_repo = hs.get_media_repository_resource() self.filepaths = MediaFilePaths(hs.config.media.media_store_path) @@ -2746,7 +2750,7 @@ def prepare(self, reactor, clock, hs): ) @parameterized.expand(["GET", "DELETE"]) - def test_no_auth(self, method: str): + def test_no_auth(self, method: str) -> None: """Try to list media of an user without authentication.""" channel = self.make_request(method, self.url, {}) @@ -2754,7 +2758,7 @@ def test_no_auth(self, method: str): self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) - def test_requester_is_no_admin(self, method: str): + def test_requester_is_no_admin(self, method: str) -> None: """If the user is not a server admin, an error is returned.""" other_user_token = self.login("user", "pass") @@ -2768,7 +2772,7 @@ def test_requester_is_no_admin(self, method: str): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) - def test_user_does_not_exist(self, method: str): + def test_user_does_not_exist(self, method: str) -> None: """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND""" url = "/_synapse/admin/v1/users/@unknown_person:test/media" channel = self.make_request( @@ -2781,7 +2785,7 @@ def test_user_does_not_exist(self, method: str): self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) - def test_user_is_not_local(self, method: str): + def test_user_is_not_local(self, method: str) -> None: """Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST""" url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media" @@ -2794,7 +2798,7 @@ def test_user_is_not_local(self, method: str): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) - def test_limit_GET(self): + def test_limit_GET(self) -> None: """Testing list of media with limit""" number_media = 20 @@ -2813,7 +2817,7 @@ def test_limit_GET(self): self.assertEqual(channel.json_body["next_token"], 5) self._check_fields(channel.json_body["media"]) - def test_limit_DELETE(self): + def test_limit_DELETE(self) -> None: """Testing delete of media with limit""" number_media = 20 @@ -2830,7 +2834,7 @@ def test_limit_DELETE(self): self.assertEqual(channel.json_body["total"], 5) self.assertEqual(len(channel.json_body["deleted_media"]), 5) - def test_from_GET(self): + def test_from_GET(self) -> None: """Testing list of media with a defined starting point (from)""" number_media = 20 @@ -2849,7 +2853,7 @@ def test_from_GET(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["media"]) - def test_from_DELETE(self): + def test_from_DELETE(self) -> None: """Testing delete of media with a defined starting point (from)""" number_media = 20 @@ -2866,7 +2870,7 @@ def test_from_DELETE(self): self.assertEqual(channel.json_body["total"], 15) self.assertEqual(len(channel.json_body["deleted_media"]), 15) - def test_limit_and_from_GET(self): + def test_limit_and_from_GET(self) -> None: """Testing list of media with a defined starting point and limit""" number_media = 20 @@ -2885,7 +2889,7 @@ def test_limit_and_from_GET(self): self.assertEqual(len(channel.json_body["media"]), 10) self._check_fields(channel.json_body["media"]) - def test_limit_and_from_DELETE(self): + def test_limit_and_from_DELETE(self) -> None: """Testing delete of media with a defined starting point and limit""" number_media = 20 @@ -2903,7 +2907,7 @@ def test_limit_and_from_DELETE(self): self.assertEqual(len(channel.json_body["deleted_media"]), 10) @parameterized.expand(["GET", "DELETE"]) - def test_invalid_parameter(self, method: str): + def test_invalid_parameter(self, method: str) -> None: """If parameters are invalid, an error is returned.""" # unkown order_by channel = self.make_request( @@ -2945,7 +2949,7 @@ def test_invalid_parameter(self, method: str): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - def test_next_token(self): + def test_next_token(self) -> None: """ Testing that `next_token` appears at the right place @@ -3010,7 +3014,7 @@ def test_next_token(self): self.assertEqual(len(channel.json_body["media"]), 1) self.assertNotIn("next_token", channel.json_body) - def test_user_has_no_media_GET(self): + def test_user_has_no_media_GET(self) -> None: """ Tests that a normal lookup for media is successfully if user has no media created @@ -3026,7 +3030,7 @@ def test_user_has_no_media_GET(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["media"])) - def test_user_has_no_media_DELETE(self): + def test_user_has_no_media_DELETE(self) -> None: """ Tests that a delete is successful if user has no media """ @@ -3041,7 +3045,7 @@ def test_user_has_no_media_DELETE(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["deleted_media"])) - def test_get_media(self): + def test_get_media(self) -> None: """Tests that a normal lookup for media is successful""" number_media = 5 @@ -3060,7 +3064,7 @@ def test_get_media(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["media"]) - def test_delete_media(self): + def test_delete_media(self) -> None: """Tests that a normal delete of media is successful""" number_media = 5 @@ -3089,7 +3093,7 @@ def test_delete_media(self): for local_path in local_paths: self.assertFalse(os.path.exists(local_path)) - def test_order_by(self): + def test_order_by(self) -> None: """ Testing order list with parameter `order_by` """ @@ -3252,7 +3256,7 @@ def _create_media_and_access( return media_id - def _check_fields(self, content: List[JsonDict]): + def _check_fields(self, content: List[JsonDict]) -> None: """Checks that the expected user attributes are present in content Args: content: List that is checked for content @@ -3272,7 +3276,7 @@ def _order_test( expected_media_list: List[str], order_by: Optional[str], dir: Optional[str] = None, - ): + ) -> None: """Request the list of media in a certain order. Assert that order is what we expect Args: @@ -3312,7 +3316,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): logout.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3331,14 +3335,14 @@ def _get_token(self) -> str: self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) return channel.json_body["access_token"] - def test_no_auth(self): + def test_no_auth(self) -> None: """Try to login as a user without authentication.""" channel = self.make_request("POST", self.url, b"{}") self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_not_admin(self): + def test_not_admin(self) -> None: """Try to login as a user as a non-admin user.""" channel = self.make_request( "POST", self.url, b"{}", access_token=self.other_user_tok @@ -3346,7 +3350,7 @@ def test_not_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) - def test_send_event(self): + def test_send_event(self) -> None: """Test that sending event as a user works.""" # Create a room. room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok) @@ -3360,7 +3364,7 @@ def test_send_event(self): event = self.get_success(self.store.get_event(event_id)) self.assertEqual(event.sender, self.other_user) - def test_devices(self): + def test_devices(self) -> None: """Tests that logging in as a user doesn't create a new device for them.""" # Login in as the user self._get_token() @@ -3374,7 +3378,7 @@ def test_devices(self): # We should only see the one device (from the login in `prepare`) self.assertEqual(len(channel.json_body["devices"]), 1) - def test_logout(self): + def test_logout(self) -> None: """Test that calling `/logout` with the token works.""" # Login in as the user puppet_token = self._get_token() @@ -3397,7 +3401,7 @@ def test_logout(self): ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) - def test_user_logout_all(self): + def test_user_logout_all(self) -> None: """Tests that the target user calling `/logout/all` does *not* expire the token. """ @@ -3424,7 +3428,7 @@ def test_user_logout_all(self): ) self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) - def test_admin_logout_all(self): + def test_admin_logout_all(self) -> None: """Tests that the admin user calling `/logout/all` does expire the token. """ @@ -3464,7 +3468,7 @@ def test_admin_logout_all(self): "form_secret": "123secret", } ) - def test_consent(self): + def test_consent(self) -> None: """Test that sending a message is not subject to the privacy policies.""" # Have the admin user accept the terms. self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0")) @@ -3492,7 +3496,7 @@ def test_consent(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 1, "mau_trial_days": 0} ) - def test_mau_limit(self): + def test_mau_limit(self) -> None: # Create a room as the admin user. This will bump the monthly active users to 1. room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) @@ -3524,14 +3528,14 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") - self.url = self.url_prefix % self.other_user + self.url = self.url_prefix % self.other_user # type: ignore[attr-defined] - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to get information of an user without authentication. """ @@ -3539,7 +3543,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_not_admin(self): + def test_requester_is_not_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -3554,11 +3558,11 @@ def test_requester_is_not_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ - url = self.url_prefix % "@unknown_person:unknown_domain" + url = self.url_prefix % "@unknown_person:unknown_domain" # type: ignore[attr-defined] channel = self.make_request( "GET", @@ -3568,7 +3572,7 @@ def test_user_is_not_local(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only whois a local user", channel.json_body["error"]) - def test_get_whois_admin(self): + def test_get_whois_admin(self) -> None: """ The lookup should succeed for an admin. """ @@ -3581,7 +3585,7 @@ def test_get_whois_admin(self): self.assertEqual(self.other_user, channel.json_body["user_id"]) self.assertIn("devices", channel.json_body) - def test_get_whois_user(self): + def test_get_whois_user(self) -> None: """ The lookup should succeed for a normal user looking up their own information. """ @@ -3604,7 +3608,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3617,7 +3621,7 @@ def prepare(self, reactor, clock, hs): ) @parameterized.expand(["POST", "DELETE"]) - def test_no_auth(self, method: str): + def test_no_auth(self, method: str) -> None: """ Try to get information of an user without authentication. """ @@ -3626,7 +3630,7 @@ def test_no_auth(self, method: str): self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["POST", "DELETE"]) - def test_requester_is_not_admin(self, method: str): + def test_requester_is_not_admin(self, method: str) -> None: """ If the user is not a server admin, an error is returned. """ @@ -3637,7 +3641,7 @@ def test_requester_is_not_admin(self, method: str): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["POST", "DELETE"]) - def test_user_is_not_local(self, method: str): + def test_user_is_not_local(self, method: str) -> None: """ Tests that shadow-banning for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -3646,7 +3650,7 @@ def test_user_is_not_local(self, method: str): channel = self.make_request(method, url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) - def test_success(self): + def test_success(self) -> None: """ Shadow-banning should succeed for an admin. """ @@ -3682,7 +3686,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3695,7 +3699,7 @@ def prepare(self, reactor, clock, hs): ) @parameterized.expand(["GET", "POST", "DELETE"]) - def test_no_auth(self, method: str): + def test_no_auth(self, method: str) -> None: """ Try to get information of a user without authentication. """ @@ -3705,7 +3709,7 @@ def test_no_auth(self, method: str): self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "POST", "DELETE"]) - def test_requester_is_no_admin(self, method: str): + def test_requester_is_no_admin(self, method: str) -> None: """ If the user is not a server admin, an error is returned. """ @@ -3721,7 +3725,7 @@ def test_requester_is_no_admin(self, method: str): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "POST", "DELETE"]) - def test_user_does_not_exist(self, method: str): + def test_user_does_not_exist(self, method: str) -> None: """ Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -3743,7 +3747,7 @@ def test_user_does_not_exist(self, method: str): ("DELETE", "Only local users can be ratelimited"), ] ) - def test_user_is_not_local(self, method: str, error_msg: str): + def test_user_is_not_local(self, method: str, error_msg: str) -> None: """ Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -3760,7 +3764,7 @@ def test_user_is_not_local(self, method: str, error_msg: str): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(error_msg, channel.json_body["error"]) - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -3808,7 +3812,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - def test_return_zero_when_null(self): + def test_return_zero_when_null(self) -> None: """ If values in database are `null` API should return an int `0` """ @@ -3834,7 +3838,7 @@ def test_return_zero_when_null(self): self.assertEqual(0, channel.json_body["messages_per_second"]) self.assertEqual(0, channel.json_body["burst_count"]) - def test_success(self): + def test_success(self) -> None: """ Rate-limiting (set/update/delete) should succeed for an admin. """ @@ -3908,7 +3912,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs) -> None: + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index 7978626e7197..b21f6d4689f8 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -14,9 +14,13 @@ from http import HTTPStatus +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.errors import Codes, SynapseError from synapse.rest.client import login +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -28,11 +32,11 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): ] url = "/_synapse/admin/v1/username_available" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - async def check_username(username): + async def check_username(username: str) -> bool: if username == "allowed": return True raise SynapseError( @@ -44,24 +48,24 @@ async def check_username(username): handler = self.hs.get_registration_handler() handler.check_username = check_username - def test_username_available(self): + def test_username_available(self) -> None: """ The endpoint should return a HTTPStatus.OK response if the username does not exist """ url = "%s?username=%s" % (self.url, "allowed") - channel = self.make_request("GET", url, None, self.admin_user_tok) + channel = self.make_request("GET", url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["available"]) - def test_username_unavailable(self): + def test_username_unavailable(self) -> None: """ The endpoint should return a HTTPStatus.OK response if the username does not exist """ url = "%s?username=%s" % (self.url, "disallowed") - channel = self.make_request("GET", url, None, self.admin_user_tok) + channel = self.make_request("GET", url, access_token=self.admin_user_tok) self.assertEqual( HTTPStatus.BAD_REQUEST, From a35e9db9be8666d9ce1164bec3f30b7623ecdfb5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Feb 2022 11:04:17 +0000 Subject: [PATCH 176/474] 1.52.0rc1 --- CHANGES.md | 61 +++++++++++++++++++++++++++++++++++++++ changelog.d/11612.bugfix | 1 - changelog.d/11621.feature | 1 - changelog.d/11639.feature | 1 - changelog.d/11658.feature | 1 - changelog.d/11683.removal | 1 - changelog.d/11743.feature | 1 - changelog.d/11767.bugfix | 1 - changelog.d/11784.bugfix | 1 - changelog.d/11788.feature | 1 - changelog.d/11789.feature | 1 - changelog.d/11790.feature | 1 - changelog.d/11792.misc | 1 - changelog.d/11793.misc | 1 - changelog.d/11794.misc | 1 - changelog.d/11795.misc | 1 - changelog.d/11798.bugfix | 1 - changelog.d/11799.misc | 1 - changelog.d/11810.misc | 1 - changelog.d/11811.misc | 1 - changelog.d/11813.misc | 1 - changelog.d/11815.misc | 1 - changelog.d/11816.misc | 1 - changelog.d/11817.misc | 1 - changelog.d/11820.doc | 1 - changelog.d/11821.doc | 1 - changelog.d/11823.misc | 1 - changelog.d/11827.bugfix | 1 - changelog.d/11830.misc | 1 - changelog.d/11834.misc | 1 - changelog.d/11836.misc | 1 - changelog.d/11838.misc | 1 - changelog.d/11843.removal | 1 - changelog.d/11846.feature | 1 - changelog.d/11847.misc | 1 - changelog.d/11851.misc | 1 - changelog.d/11860.doc | 1 - changelog.d/11861.doc | 1 - debian/changelog | 6 ++++ synapse/__init__.py | 2 +- 40 files changed, 68 insertions(+), 38 deletions(-) delete mode 100644 changelog.d/11612.bugfix delete mode 100644 changelog.d/11621.feature delete mode 100644 changelog.d/11639.feature delete mode 100644 changelog.d/11658.feature delete mode 100644 changelog.d/11683.removal delete mode 100644 changelog.d/11743.feature delete mode 100644 changelog.d/11767.bugfix delete mode 100644 changelog.d/11784.bugfix delete mode 100644 changelog.d/11788.feature delete mode 100644 changelog.d/11789.feature delete mode 100644 changelog.d/11790.feature delete mode 100644 changelog.d/11792.misc delete mode 100644 changelog.d/11793.misc delete mode 100644 changelog.d/11794.misc delete mode 100644 changelog.d/11795.misc delete mode 100644 changelog.d/11798.bugfix delete mode 100644 changelog.d/11799.misc delete mode 100644 changelog.d/11810.misc delete mode 100644 changelog.d/11811.misc delete mode 100644 changelog.d/11813.misc delete mode 100644 changelog.d/11815.misc delete mode 100644 changelog.d/11816.misc delete mode 100644 changelog.d/11817.misc delete mode 100644 changelog.d/11820.doc delete mode 100644 changelog.d/11821.doc delete mode 100644 changelog.d/11823.misc delete mode 100644 changelog.d/11827.bugfix delete mode 100644 changelog.d/11830.misc delete mode 100644 changelog.d/11834.misc delete mode 100644 changelog.d/11836.misc delete mode 100644 changelog.d/11838.misc delete mode 100644 changelog.d/11843.removal delete mode 100644 changelog.d/11846.feature delete mode 100644 changelog.d/11847.misc delete mode 100644 changelog.d/11851.misc delete mode 100644 changelog.d/11860.doc delete mode 100644 changelog.d/11861.doc diff --git a/CHANGES.md b/CHANGES.md index 37b9e6bb9613..6a8ce170dc54 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +Synapse 1.52.0rc1 (2022-02-01) +============================== + +Features +-------- + +- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) +- Add admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) +- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) +- Add a config flag to inhibit M_USER_IN_USE during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) +- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) +- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846)) + + +Bugfixes +-------- + +- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612)) +- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767)) +- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784)) +- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798)) +- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827)) + + +Improved Documentation +---------------------- + +- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820)) +- Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821)) +- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860)) +- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861)) + + +Deprecations and Removals +------------------------- + +- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683)) +- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843)) + + +Internal Changes +---------------- + +- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792)) +- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793)) +- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794)) +- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795)) +- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847)) +- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810)) +- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811)) +- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813)) +- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815)) +- Drop support for Python 3.6, which is EOL. ([\#11816](https://github.com/matrix-org/synapse/issues/11816)) +- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830)) +- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823)) +- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834)) +- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836)) +- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838)) +- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851)) + + Synapse 1.51.0 (2022-01-25) =========================== diff --git a/changelog.d/11612.bugfix b/changelog.d/11612.bugfix deleted file mode 100644 index 842f6892fda3..000000000000 --- a/changelog.d/11612.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11621.feature b/changelog.d/11621.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11621.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11639.feature b/changelog.d/11639.feature deleted file mode 100644 index e9f6704f7a1f..000000000000 --- a/changelog.d/11639.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin API to reset connection timeouts for remote server. \ No newline at end of file diff --git a/changelog.d/11658.feature b/changelog.d/11658.feature deleted file mode 100644 index 2ec9fb5eecf2..000000000000 --- a/changelog.d/11658.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API to get a list of rooms that federate with a given remote homeserver. \ No newline at end of file diff --git a/changelog.d/11683.removal b/changelog.d/11683.removal deleted file mode 100644 index b1f048f7f52d..000000000000 --- a/changelog.d/11683.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for Python 3.6, which is EOL. \ No newline at end of file diff --git a/changelog.d/11743.feature b/changelog.d/11743.feature deleted file mode 100644 index 9809f48b9688..000000000000 --- a/changelog.d/11743.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config flag to inhibit M_USER_IN_USE during registration. diff --git a/changelog.d/11767.bugfix b/changelog.d/11767.bugfix deleted file mode 100644 index 3e344747f4a2..000000000000 --- a/changelog.d/11767.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when previewing Reddit URLs which do not contain an image. diff --git a/changelog.d/11784.bugfix b/changelog.d/11784.bugfix deleted file mode 100644 index 6569a8c2997a..000000000000 --- a/changelog.d/11784.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. diff --git a/changelog.d/11788.feature b/changelog.d/11788.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11788.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11789.feature b/changelog.d/11789.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11789.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11790.feature b/changelog.d/11790.feature deleted file mode 100644 index 4a5cc8ec37db..000000000000 --- a/changelog.d/11790.feature +++ /dev/null @@ -1 +0,0 @@ -Add a module callback to set username at registration. diff --git a/changelog.d/11792.misc b/changelog.d/11792.misc deleted file mode 100644 index 6aa1cd61c354..000000000000 --- a/changelog.d/11792.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. diff --git a/changelog.d/11793.misc b/changelog.d/11793.misc deleted file mode 100644 index fc0530bf2cb6..000000000000 --- a/changelog.d/11793.misc +++ /dev/null @@ -1 +0,0 @@ -Add `FrozenEvent.get_state_key` and use it in a couple of places. diff --git a/changelog.d/11794.misc b/changelog.d/11794.misc deleted file mode 100644 index 29826bc0e5ab..000000000000 --- a/changelog.d/11794.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for database schema simplifications: stop reading from `event_reference_hashes`. diff --git a/changelog.d/11795.misc b/changelog.d/11795.misc deleted file mode 100644 index aeba317670e9..000000000000 --- a/changelog.d/11795.misc +++ /dev/null @@ -1 +0,0 @@ -Drop unused table `public_room_list_stream`. diff --git a/changelog.d/11798.bugfix b/changelog.d/11798.bugfix deleted file mode 100644 index 2651fd11cff0..000000000000 --- a/changelog.d/11798.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. \ No newline at end of file diff --git a/changelog.d/11799.misc b/changelog.d/11799.misc deleted file mode 100644 index 5c3b2bcaf4e5..000000000000 --- a/changelog.d/11799.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/changelog.d/11810.misc b/changelog.d/11810.misc deleted file mode 100644 index 5579b85979b1..000000000000 --- a/changelog.d/11810.misc +++ /dev/null @@ -1 +0,0 @@ -Docker: skip the initial amd64-only build and go straight to multiarch. diff --git a/changelog.d/11811.misc b/changelog.d/11811.misc deleted file mode 100644 index b911a2d042d2..000000000000 --- a/changelog.d/11811.misc +++ /dev/null @@ -1 +0,0 @@ -Run Complement on the Github Actions VM and not inside a Docker container. \ No newline at end of file diff --git a/changelog.d/11813.misc b/changelog.d/11813.misc deleted file mode 100644 index f90d183b45cb..000000000000 --- a/changelog.d/11813.misc +++ /dev/null @@ -1 +0,0 @@ -Log module names at startup. diff --git a/changelog.d/11815.misc b/changelog.d/11815.misc deleted file mode 100644 index 83aa6d6eb046..000000000000 --- a/changelog.d/11815.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type safety of bundled aggregations code. diff --git a/changelog.d/11816.misc b/changelog.d/11816.misc deleted file mode 100644 index b1f048f7f52d..000000000000 --- a/changelog.d/11816.misc +++ /dev/null @@ -1 +0,0 @@ -Drop support for Python 3.6, which is EOL. \ No newline at end of file diff --git a/changelog.d/11817.misc b/changelog.d/11817.misc deleted file mode 100644 index 3d6b2ea4d485..000000000000 --- a/changelog.d/11817.misc +++ /dev/null @@ -1 +0,0 @@ -Correct a type annotation in the event validation logic. diff --git a/changelog.d/11820.doc b/changelog.d/11820.doc deleted file mode 100644 index 4f563b9b569b..000000000000 --- a/changelog.d/11820.doc +++ /dev/null @@ -1 +0,0 @@ -Update pypi installation docs to indicate that we now support Python 3.10. diff --git a/changelog.d/11821.doc b/changelog.d/11821.doc deleted file mode 100644 index a16a6ef956e2..000000000000 --- a/changelog.d/11821.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. diff --git a/changelog.d/11823.misc b/changelog.d/11823.misc deleted file mode 100644 index 2d153eae4ade..000000000000 --- a/changelog.d/11823.misc +++ /dev/null @@ -1 +0,0 @@ -Minor updates and documentation for database schema delta files. diff --git a/changelog.d/11827.bugfix b/changelog.d/11827.bugfix deleted file mode 100644 index 30222dfb6269..000000000000 --- a/changelog.d/11827.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. \ No newline at end of file diff --git a/changelog.d/11830.misc b/changelog.d/11830.misc deleted file mode 100644 index fe248d00ab94..000000000000 --- a/changelog.d/11830.misc +++ /dev/null @@ -1 +0,0 @@ -Correct a type annotation in the event validation logic. \ No newline at end of file diff --git a/changelog.d/11834.misc b/changelog.d/11834.misc deleted file mode 100644 index 29a5635f7a1e..000000000000 --- a/changelog.d/11834.misc +++ /dev/null @@ -1 +0,0 @@ -Workaround a type annotation problem in `prometheus_client` 0.13.0. \ No newline at end of file diff --git a/changelog.d/11836.misc b/changelog.d/11836.misc deleted file mode 100644 index be7e331c639c..000000000000 --- a/changelog.d/11836.misc +++ /dev/null @@ -1 +0,0 @@ -Minor performance improvement in room state lookup. diff --git a/changelog.d/11838.misc b/changelog.d/11838.misc deleted file mode 100644 index 4747bbe88b0d..000000000000 --- a/changelog.d/11838.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some indentation inconsistencies in the sample config. \ No newline at end of file diff --git a/changelog.d/11843.removal b/changelog.d/11843.removal deleted file mode 100644 index d6d49b4ad50b..000000000000 --- a/changelog.d/11843.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. diff --git a/changelog.d/11846.feature b/changelog.d/11846.feature deleted file mode 100644 index fcf6affdb53d..000000000000 --- a/changelog.d/11846.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring a maximum file size as well as a list of allowed content types for avatars. diff --git a/changelog.d/11847.misc b/changelog.d/11847.misc deleted file mode 100644 index 5c3b2bcaf4e5..000000000000 --- a/changelog.d/11847.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/changelog.d/11851.misc b/changelog.d/11851.misc deleted file mode 100644 index ccc3ec3482b1..000000000000 --- a/changelog.d/11851.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/admin`. diff --git a/changelog.d/11860.doc b/changelog.d/11860.doc deleted file mode 100644 index 04b88c5f2c30..000000000000 --- a/changelog.d/11860.doc +++ /dev/null @@ -1 +0,0 @@ -Remove not needed old table of contents in documentation. diff --git a/changelog.d/11861.doc b/changelog.d/11861.doc deleted file mode 100644 index 53c75a088378..000000000000 --- a/changelog.d/11861.doc +++ /dev/null @@ -1 +0,0 @@ -Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. diff --git a/debian/changelog b/debian/changelog index 3a598c4148c9..a45888565554 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium + + * New synapse release 1.52.0~rc1. + + -- Synapse Packaging team Tue, 01 Feb 2022 11:04:09 +0000 + matrix-synapse-py3 (1.51.0) stable; urgency=medium * New synapse release 1.51.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 603dbb27e150..5e6503306101 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.51.0" +__version__ = "1.52.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From b7282fe7d17ec767a30725dcb1f66107ef0bd838 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Feb 2022 11:07:12 +0000 Subject: [PATCH 177/474] Don't mention 3.6 EOL under misc It's already under deps & removals --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6a8ce170dc54..36707db03bf7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Features -------- - Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) -- Add admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) +- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) - Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) - Add a config flag to inhibit M_USER_IN_USE during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) - Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) @@ -50,7 +50,6 @@ Internal Changes - Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811)) - Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813)) - Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815)) -- Drop support for Python 3.6, which is EOL. ([\#11816](https://github.com/matrix-org/synapse/issues/11816)) - Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830)) - Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823)) - Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834)) From 64ec45fc1b0856dc7daacca7d3ab75d50bd89f84 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 1 Feb 2022 14:13:38 +0000 Subject: [PATCH 178/474] Send to-device messages to application services (#11215) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/11215.feature | 1 + synapse/appservice/__init__.py | 3 + synapse/appservice/api.py | 29 +- synapse/appservice/scheduler.py | 97 ++++-- synapse/config/experimental.py | 7 + synapse/handlers/appservice.py | 136 +++++++-- synapse/handlers/sync.py | 4 +- synapse/notifier.py | 4 +- synapse/storage/databases/main/appservice.py | 24 +- synapse/storage/databases/main/deviceinbox.py | 276 ++++++++++++++--- ...9_add_device_id_appservice_stream_type.sql | 21 ++ tests/appservice/test_scheduler.py | 109 ++++--- tests/handlers/test_appservice.py | 281 +++++++++++++++++- tests/storage/test_appservice.py | 26 +- 14 files changed, 856 insertions(+), 162 deletions(-) create mode 100644 changelog.d/11215.feature create mode 100644 synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql diff --git a/changelog.d/11215.feature b/changelog.d/11215.feature new file mode 100644 index 000000000000..468020834b3d --- /dev/null +++ b/changelog.d/11215.feature @@ -0,0 +1 @@ +Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 8c9ff93b2c13..7dbebd97b5d3 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -351,11 +351,13 @@ def __init__( id: int, events: List[EventBase], ephemeral: List[JsonDict], + to_device_messages: List[JsonDict], ): self.service = service self.id = id self.events = events self.ephemeral = ephemeral + self.to_device_messages = to_device_messages async def send(self, as_api: "ApplicationServiceApi") -> bool: """Sends this transaction using the provided AS API interface. @@ -369,6 +371,7 @@ async def send(self, as_api: "ApplicationServiceApi") -> bool: service=self.service, events=self.events, ephemeral=self.ephemeral, + to_device_messages=self.to_device_messages, txn_id=self.id, ) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index def4424af0ee..73be7ff3d4a1 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -218,8 +218,23 @@ async def push_bulk( service: "ApplicationService", events: List[EventBase], ephemeral: List[JsonDict], + to_device_messages: List[JsonDict], txn_id: Optional[int] = None, ) -> bool: + """ + Push data to an application service. + + Args: + service: The application service to send to. + events: The persistent events to send. + ephemeral: The ephemeral events to send. + to_device_messages: The to-device messages to send. + txn_id: An unique ID to assign to this transaction. Application services should + deduplicate transactions received with identitical IDs. + + Returns: + True if the task succeeded, False if it failed. + """ if service.url is None: return True @@ -237,13 +252,15 @@ async def push_bulk( uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id))) # Never send ephemeral events to appservices that do not support it + body: Dict[str, List[JsonDict]] = {"events": serialized_events} if service.supports_ephemeral: - body = { - "events": serialized_events, - "de.sorunome.msc2409.ephemeral": ephemeral, - } - else: - body = {"events": serialized_events} + body.update( + { + # TODO: Update to stable prefixes once MSC2409 completes FCP merge. + "de.sorunome.msc2409.ephemeral": ephemeral, + "de.sorunome.msc2409.to_device": to_device_messages, + } + ) try: await self.put_json( diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 185e3a527815..c42fa32fff32 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -48,7 +48,16 @@ components. """ import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set +from typing import ( + TYPE_CHECKING, + Awaitable, + Callable, + Collection, + Dict, + List, + Optional, + Set, +) from synapse.appservice import ApplicationService, ApplicationServiceState from synapse.appservice.api import ApplicationServiceApi @@ -71,6 +80,9 @@ # Maximum number of ephemeral events to provide in an AS transaction. MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100 +# Maximum number of to-device messages to provide in an AS transaction. +MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION = 100 + class ApplicationServiceScheduler: """Public facing API for this module. Does the required DI to tie the @@ -97,15 +109,40 @@ async def start(self) -> None: for service in services: self.txn_ctrl.start_recoverer(service) - def submit_event_for_as( - self, service: ApplicationService, event: EventBase + def enqueue_for_appservice( + self, + appservice: ApplicationService, + events: Optional[Collection[EventBase]] = None, + ephemeral: Optional[Collection[JsonDict]] = None, + to_device_messages: Optional[Collection[JsonDict]] = None, ) -> None: - self.queuer.enqueue_event(service, event) + """ + Enqueue some data to be sent off to an application service. - def submit_ephemeral_events_for_as( - self, service: ApplicationService, events: List[JsonDict] - ) -> None: - self.queuer.enqueue_ephemeral(service, events) + Args: + appservice: The application service to create and send a transaction to. + events: The persistent room events to send. + ephemeral: The ephemeral events to send. + to_device_messages: The to-device messages to send. These differ from normal + to-device messages sent to clients, as they have 'to_device_id' and + 'to_user_id' fields. + """ + # We purposefully allow this method to run with empty events/ephemeral + # collections, so that callers do not need to check iterable size themselves. + if not events and not ephemeral and not to_device_messages: + return + + if events: + self.queuer.queued_events.setdefault(appservice.id, []).extend(events) + if ephemeral: + self.queuer.queued_ephemeral.setdefault(appservice.id, []).extend(ephemeral) + if to_device_messages: + self.queuer.queued_to_device_messages.setdefault(appservice.id, []).extend( + to_device_messages + ) + + # Kick off a new application service transaction + self.queuer.start_background_request(appservice) class _ServiceQueuer: @@ -121,13 +158,15 @@ def __init__(self, txn_ctrl: "_TransactionController", clock: Clock): self.queued_events: Dict[str, List[EventBase]] = {} # dict of {service_id: [events]} self.queued_ephemeral: Dict[str, List[JsonDict]] = {} + # dict of {service_id: [to_device_message_json]} + self.queued_to_device_messages: Dict[str, List[JsonDict]] = {} # the appservices which currently have a transaction in flight self.requests_in_flight: Set[str] = set() self.txn_ctrl = txn_ctrl self.clock = clock - def _start_background_request(self, service: ApplicationService) -> None: + def start_background_request(self, service: ApplicationService) -> None: # start a sender for this appservice if we don't already have one if service.id in self.requests_in_flight: return @@ -136,16 +175,6 @@ def _start_background_request(self, service: ApplicationService) -> None: "as-sender-%s" % (service.id,), self._send_request, service ) - def enqueue_event(self, service: ApplicationService, event: EventBase) -> None: - self.queued_events.setdefault(service.id, []).append(event) - self._start_background_request(service) - - def enqueue_ephemeral( - self, service: ApplicationService, events: List[JsonDict] - ) -> None: - self.queued_ephemeral.setdefault(service.id, []).extend(events) - self._start_background_request(service) - async def _send_request(self, service: ApplicationService) -> None: # sanity-check: we shouldn't get here if this service already has a sender # running. @@ -162,11 +191,21 @@ async def _send_request(self, service: ApplicationService) -> None: ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] - if not events and not ephemeral: + all_to_device_messages = self.queued_to_device_messages.get( + service.id, [] + ) + to_device_messages_to_send = all_to_device_messages[ + :MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION + ] + del all_to_device_messages[:MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION] + + if not events and not ephemeral and not to_device_messages_to_send: return try: - await self.txn_ctrl.send(service, events, ephemeral) + await self.txn_ctrl.send( + service, events, ephemeral, to_device_messages_to_send + ) except Exception: logger.exception("AS request failed") finally: @@ -198,10 +237,24 @@ async def send( service: ApplicationService, events: List[EventBase], ephemeral: Optional[List[JsonDict]] = None, + to_device_messages: Optional[List[JsonDict]] = None, ) -> None: + """ + Create a transaction with the given data and send to the provided + application service. + + Args: + service: The application service to send the transaction to. + events: The persistent events to include in the transaction. + ephemeral: The ephemeral events to include in the transaction. + to_device_messages: The to-device messages to include in the transaction. + """ try: txn = await self.store.create_appservice_txn( - service=service, events=events, ephemeral=ephemeral or [] + service=service, + events=events, + ephemeral=ephemeral or [], + to_device_messages=to_device_messages or [], ) service_is_up = await self._is_service_up(service) if service_is_up: diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 65c807a19af7..e4719d19b857 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -52,3 +52,10 @@ def read_config(self, config: JsonDict, **kwargs): self.msc3202_device_masquerading_enabled: bool = experimental.get( "msc3202_device_masquerading", False ) + + # MSC2409 (this setting only relates to optionally sending to-device messages). + # Presence, typing and read receipt EDUs are already sent to application services that + # have opted in to receive them. If enabled, this adds to-device messages to that list. + self.msc2409_to_device_messages_enabled: bool = experimental.get( + "msc2409_to_device_messages_enabled", False + ) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 7833e77e2b6b..0fb919acf672 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -55,6 +55,9 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.notify_appservices = hs.config.appservice.notify_appservices self.event_sources = hs.get_event_sources() + self._msc2409_to_device_messages_enabled = ( + hs.config.experimental.msc2409_to_device_messages_enabled + ) self.current_max = 0 self.is_processing = False @@ -132,7 +135,9 @@ async def start_scheduler() -> None: # Fork off pushes to these services for service in services: - self.scheduler.submit_event_for_as(service, event) + self.scheduler.enqueue_for_appservice( + service, events=[event] + ) now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) @@ -199,8 +204,9 @@ def notify_interested_services_ephemeral( Args: stream_key: The stream the event came from. - `stream_key` can be "typing_key", "receipt_key" or "presence_key". Any other - value for `stream_key` will cause this function to return early. + `stream_key` can be "typing_key", "receipt_key", "presence_key" or + "to_device_key". Any other value for `stream_key` will cause this function + to return early. Ephemeral events will only be pushed to appservices that have opted into receiving them by setting `push_ephemeral` to true in their registration @@ -216,8 +222,15 @@ def notify_interested_services_ephemeral( if not self.notify_appservices: return - # Ignore any unsupported streams - if stream_key not in ("typing_key", "receipt_key", "presence_key"): + # Notify appservices of updates in ephemeral event streams. + # Only the following streams are currently supported. + # FIXME: We should use constants for these values. + if stream_key not in ( + "typing_key", + "receipt_key", + "presence_key", + "to_device_key", + ): return # Assert that new_token is an integer (and not a RoomStreamToken). @@ -233,6 +246,13 @@ def notify_interested_services_ephemeral( # Additional context: https://github.com/matrix-org/synapse/pull/11137 assert isinstance(new_token, int) + # Ignore to-device messages if the feature flag is not enabled + if ( + stream_key == "to_device_key" + and not self._msc2409_to_device_messages_enabled + ): + return + # Check whether there are any appservices which have registered to receive # ephemeral events. # @@ -266,7 +286,7 @@ async def _notify_interested_services_ephemeral( with Measure(self.clock, "notify_interested_services_ephemeral"): for service in services: if stream_key == "typing_key": - # Note that we don't persist the token (via set_type_stream_id_for_appservice) + # Note that we don't persist the token (via set_appservice_stream_type_pos) # for typing_key due to performance reasons and due to their highly # ephemeral nature. # @@ -274,7 +294,7 @@ async def _notify_interested_services_ephemeral( # and, if they apply to this application service, send it off. events = await self._handle_typing(service, new_token) if events: - self.scheduler.submit_ephemeral_events_for_as(service, events) + self.scheduler.enqueue_for_appservice(service, ephemeral=events) continue # Since we read/update the stream position for this AS/stream @@ -285,28 +305,37 @@ async def _notify_interested_services_ephemeral( ): if stream_key == "receipt_key": events = await self._handle_receipts(service, new_token) - if events: - self.scheduler.submit_ephemeral_events_for_as( - service, events - ) + self.scheduler.enqueue_for_appservice(service, ephemeral=events) # Persist the latest handled stream token for this appservice - await self.store.set_type_stream_id_for_appservice( + await self.store.set_appservice_stream_type_pos( service, "read_receipt", new_token ) elif stream_key == "presence_key": events = await self._handle_presence(service, users, new_token) - if events: - self.scheduler.submit_ephemeral_events_for_as( - service, events - ) + self.scheduler.enqueue_for_appservice(service, ephemeral=events) # Persist the latest handled stream token for this appservice - await self.store.set_type_stream_id_for_appservice( + await self.store.set_appservice_stream_type_pos( service, "presence", new_token ) + elif stream_key == "to_device_key": + # Retrieve a list of to-device message events, as well as the + # maximum stream token of the messages we were able to retrieve. + to_device_messages = await self._get_to_device_messages( + service, new_token, users + ) + self.scheduler.enqueue_for_appservice( + service, to_device_messages=to_device_messages + ) + + # Persist the latest handled stream token for this appservice + await self.store.set_appservice_stream_type_pos( + service, "to_device", new_token + ) + async def _handle_typing( self, service: ApplicationService, new_token: int ) -> List[JsonDict]: @@ -440,6 +469,79 @@ async def _handle_presence( return events + async def _get_to_device_messages( + self, + service: ApplicationService, + new_token: int, + users: Collection[Union[str, UserID]], + ) -> List[JsonDict]: + """ + Given an application service, determine which events it should receive + from those between the last-recorded to-device message stream token for this + appservice and the given stream token. + + Args: + service: The application service to check for which events it should receive. + new_token: The latest to-device event stream token. + users: The users to be notified for the new to-device messages + (ie, the recipients of the messages). + + Returns: + A list of JSON dictionaries containing data derived from the to-device events + that should be sent to the given application service. + """ + # Get the stream token that this application service has processed up until + from_key = await self.store.get_type_stream_id_for_appservice( + service, "to_device" + ) + + # Filter out users that this appservice is not interested in + users_appservice_is_interested_in: List[str] = [] + for user in users: + # FIXME: We should do this farther up the call stack. We currently repeat + # this operation in _handle_presence. + if isinstance(user, UserID): + user = user.to_string() + + if service.is_interested_in_user(user): + users_appservice_is_interested_in.append(user) + + if not users_appservice_is_interested_in: + # Return early if the AS was not interested in any of these users + return [] + + # Retrieve the to-device messages for each user + recipient_device_to_messages = await self.store.get_messages_for_user_devices( + users_appservice_is_interested_in, + from_key, + new_token, + ) + + # According to MSC2409, we'll need to add 'to_user_id' and 'to_device_id' fields + # to the event JSON so that the application service will know which user/device + # combination this messages was intended for. + # + # So we mangle this dict into a flat list of to-device messages with the relevant + # user ID and device ID embedded inside each message dict. + message_payload: List[JsonDict] = [] + for ( + user_id, + device_id, + ), messages in recipient_device_to_messages.items(): + for message_json in messages: + # Remove 'message_id' from the to-device message, as it's an internal ID + message_json.pop("message_id", None) + + message_payload.append( + { + "to_user_id": user_id, + "to_device_id": device_id, + **message_json, + } + ) + + return message_payload + async def query_user_exists(self, user_id: str) -> bool: """Check if any application service knows this user_id exists. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c72ed7c2907a..aa9a76f8a968 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1348,8 +1348,8 @@ async def _generate_sync_entry_for_to_device( if sync_result_builder.since_token is not None: since_stream_id = int(sync_result_builder.since_token.to_device_key) - if since_stream_id != int(now_token.to_device_key): - messages, stream_id = await self.store.get_new_messages_for_device( + if device_id is not None and since_stream_id != int(now_token.to_device_key): + messages, stream_id = await self.store.get_messages_for_device( user_id, device_id, since_stream_id, now_token.to_device_key ) diff --git a/synapse/notifier.py b/synapse/notifier.py index 632b2245ef55..5988c67d9076 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -461,7 +461,9 @@ def on_new_event( users, ) except Exception: - logger.exception("Error notifying application services of event") + logger.exception( + "Error notifying application services of ephemeral events" + ) def on_new_replication_data(self) -> None: """Used to inform replication listeners that something has happened diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 2bb52884315e..304814af5dc5 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -198,6 +198,7 @@ async def create_appservice_txn( service: ApplicationService, events: List[EventBase], ephemeral: List[JsonDict], + to_device_messages: List[JsonDict], ) -> AppServiceTransaction: """Atomically creates a new transaction for this application service with the given list of events. Ephemeral events are NOT persisted to the @@ -207,6 +208,7 @@ async def create_appservice_txn( service: The service who the transaction is for. events: A list of persistent events to put in the transaction. ephemeral: A list of ephemeral events to put in the transaction. + to_device_messages: A list of to-device messages to put in the transaction. Returns: A new transaction. @@ -237,7 +239,11 @@ def _create_appservice_txn(txn): (service.id, new_txn_id, event_ids), ) return AppServiceTransaction( - service=service, id=new_txn_id, events=events, ephemeral=ephemeral + service=service, + id=new_txn_id, + events=events, + ephemeral=ephemeral, + to_device_messages=to_device_messages, ) return await self.db_pool.runInteraction( @@ -330,7 +336,11 @@ def _get_oldest_unsent_txn(txn): events = await self.get_events_as_list(event_ids) return AppServiceTransaction( - service=service, id=entry["txn_id"], events=events, ephemeral=[] + service=service, + id=entry["txn_id"], + events=events, + ephemeral=[], + to_device_messages=[], ) def _get_last_txn(self, txn, service_id: Optional[str]) -> int: @@ -391,7 +401,7 @@ def get_new_events_for_appservice_txn(txn): async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: - if type not in ("read_receipt", "presence"): + if type not in ("read_receipt", "presence", "to_device"): raise ValueError( "Expected type to be a valid application stream id type, got %s" % (type,) @@ -415,16 +425,16 @@ def get_type_stream_id_for_appservice_txn(txn): "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn ) - async def set_type_stream_id_for_appservice( + async def set_appservice_stream_type_pos( self, service: ApplicationService, stream_type: str, pos: Optional[int] ) -> None: - if stream_type not in ("read_receipt", "presence"): + if stream_type not in ("read_receipt", "presence", "to_device"): raise ValueError( "Expected type to be a valid application stream id type, got %s" % (stream_type,) ) - def set_type_stream_id_for_appservice_txn(txn): + def set_appservice_stream_type_pos_txn(txn): stream_id_type = "%s_stream_id" % stream_type txn.execute( "UPDATE application_services_state SET %s = ? WHERE as_id=?" @@ -433,7 +443,7 @@ def set_type_stream_id_for_appservice_txn(txn): ) await self.db_pool.runInteraction( - "set_type_stream_id_for_appservice", set_type_stream_id_for_appservice_txn + "set_appservice_stream_type_pos", set_appservice_stream_type_pos_txn ) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 4eca97189bef..8801b7b2dd83 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple, cast from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace @@ -24,6 +24,7 @@ DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_in_list_sql_clause, ) from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( @@ -136,63 +137,260 @@ def process_replication_rows(self, stream_name, instance_name, token, rows): def get_to_device_stream_token(self): return self._device_inbox_id_gen.get_current_token() - async def get_new_messages_for_device( + async def get_messages_for_user_devices( + self, + user_ids: Collection[str], + from_stream_id: int, + to_stream_id: int, + ) -> Dict[Tuple[str, str], List[JsonDict]]: + """ + Retrieve to-device messages for a given set of users. + + Only to-device messages with stream ids between the given boundaries + (from < X <= to) are returned. + + Args: + user_ids: The users to retrieve to-device messages for. + from_stream_id: The lower boundary of stream id to filter with (exclusive). + to_stream_id: The upper boundary of stream id to filter with (inclusive). + + Returns: + A dictionary of (user id, device id) -> list of to-device messages. + """ + # We expect the stream ID returned by _get_device_messages to always + # be to_stream_id. So, no need to return it from this function. + ( + user_id_device_id_to_messages, + last_processed_stream_id, + ) = await self._get_device_messages( + user_ids=user_ids, + from_stream_id=from_stream_id, + to_stream_id=to_stream_id, + ) + + assert ( + last_processed_stream_id == to_stream_id + ), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`" + + return user_id_device_id_to_messages + + async def get_messages_for_device( self, user_id: str, - device_id: Optional[str], - last_stream_id: int, - current_stream_id: int, + device_id: str, + from_stream_id: int, + to_stream_id: int, limit: int = 100, - ) -> Tuple[List[dict], int]: + ) -> Tuple[List[JsonDict], int]: """ + Retrieve to-device messages for a single user device. + + Only to-device messages with stream ids between the given boundaries + (from < X <= to) are returned. + Args: - user_id: The recipient user_id. - device_id: The recipient device_id. - last_stream_id: The last stream ID checked. - current_stream_id: The current position of the to device - message stream. - limit: The maximum number of messages to retrieve. + user_id: The ID of the user to retrieve messages for. + device_id: The ID of the device to retrieve to-device messages for. + from_stream_id: The lower boundary of stream id to filter with (exclusive). + to_stream_id: The upper boundary of stream id to filter with (inclusive). + limit: A limit on the number of to-device messages returned. Returns: A tuple containing: - * A list of messages for the device. - * The max stream token of these messages. There may be more to retrieve - if the given limit was reached. + * A list of to-device messages within the given stream id range intended for + the given user / device combo. + * The last-processed stream ID. Subsequent calls of this function with the + same device should pass this value as 'from_stream_id'. """ - has_changed = self._device_inbox_stream_cache.has_entity_changed( - user_id, last_stream_id + ( + user_id_device_id_to_messages, + last_processed_stream_id, + ) = await self._get_device_messages( + user_ids=[user_id], + device_id=device_id, + from_stream_id=from_stream_id, + to_stream_id=to_stream_id, + limit=limit, ) - if not has_changed: - return [], current_stream_id - def get_new_messages_for_device_txn(txn): - sql = ( - "SELECT stream_id, message_json FROM device_inbox" - " WHERE user_id = ? AND device_id = ?" - " AND ? < stream_id AND stream_id <= ?" - " ORDER BY stream_id ASC" - " LIMIT ?" + if not user_id_device_id_to_messages: + # There were no messages! + return [], to_stream_id + + # Extract the messages, no need to return the user and device ID again + to_device_messages = user_id_device_id_to_messages.get((user_id, device_id), []) + + return to_device_messages, last_processed_stream_id + + async def _get_device_messages( + self, + user_ids: Collection[str], + from_stream_id: int, + to_stream_id: int, + device_id: Optional[str] = None, + limit: Optional[int] = None, + ) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]: + """ + Retrieve pending to-device messages for a collection of user devices. + + Only to-device messages with stream ids between the given boundaries + (from < X <= to) are returned. + + Note that a stream ID can be shared by multiple copies of the same message with + different recipient devices. Stream IDs are only unique in the context of a single + user ID / device ID pair. Thus, applying a limit (of messages to return) when working + with a sliding window of stream IDs is only possible when querying messages of a + single user device. + + Finally, note that device IDs are not unique across users. + + Args: + user_ids: The user IDs to filter device messages by. + from_stream_id: The lower boundary of stream id to filter with (exclusive). + to_stream_id: The upper boundary of stream id to filter with (inclusive). + device_id: A device ID to query to-device messages for. If not provided, to-device + messages from all device IDs for the given user IDs will be queried. May not be + provided if `user_ids` contains more than one entry. + limit: The maximum number of to-device messages to return. Can only be used when + passing a single user ID / device ID tuple. + + Returns: + A tuple containing: + * A dict of (user_id, device_id) -> list of to-device messages + * The last-processed stream ID. If this is less than `to_stream_id`, then + there may be more messages to retrieve. If `limit` is not set, then this + is always equal to 'to_stream_id'. + """ + if not user_ids: + logger.warning("No users provided upon querying for device IDs") + return {}, to_stream_id + + # Prevent a query for one user's device also retrieving another user's device with + # the same device ID (device IDs are not unique across users). + if len(user_ids) > 1 and device_id is not None: + raise AssertionError( + "Programming error: 'device_id' cannot be supplied to " + "_get_device_messages when >1 user_id has been provided" ) - txn.execute( - sql, (user_id, device_id, last_stream_id, current_stream_id, limit) + + # A limit can only be applied when querying for a single user ID / device ID tuple. + # See the docstring of this function for more details. + if limit is not None and device_id is None: + raise AssertionError( + "Programming error: _get_device_messages was passed 'limit' " + "without a specific user_id/device_id" ) - messages = [] - stream_pos = current_stream_id + user_ids_to_query: Set[str] = set() + device_ids_to_query: Set[str] = set() + + # Note that a device ID could be an empty str + if device_id is not None: + # If a device ID was passed, use it to filter results. + # Otherwise, device IDs will be derived from the given collection of user IDs. + device_ids_to_query.add(device_id) + + # Determine which users have devices with pending messages + for user_id in user_ids: + if self._device_inbox_stream_cache.has_entity_changed( + user_id, from_stream_id + ): + # This user has new messages sent to them. Query messages for them + user_ids_to_query.add(user_id) + + def get_device_messages_txn(txn: LoggingTransaction): + # Build a query to select messages from any of the given devices that + # are between the given stream id bounds. + + # If a list of device IDs was not provided, retrieve all devices IDs + # for the given users. We explicitly do not query hidden devices, as + # hidden devices should not receive to-device messages. + # Note that this is more efficient than just dropping `device_id` from the query, + # since device_inbox has an index on `(user_id, device_id, stream_id)` + if not device_ids_to_query: + user_device_dicts = self.db_pool.simple_select_many_txn( + txn, + table="devices", + column="user_id", + iterable=user_ids_to_query, + keyvalues={"user_id": user_id, "hidden": False}, + retcols=("device_id",), + ) - for row in txn: - stream_pos = row[0] - messages.append(db_to_json(row[1])) + device_ids_to_query.update( + {row["device_id"] for row in user_device_dicts} + ) - # If the limit was not reached we know that there's no more data for this - # user/device pair up to current_stream_id. - if len(messages) < limit: - stream_pos = current_stream_id + if not device_ids_to_query: + # We've ended up with no devices to query. + return {}, to_stream_id - return messages, stream_pos + # We include both user IDs and device IDs in this query, as we have an index + # (device_inbox_user_stream_id) for them. + user_id_many_clause_sql, user_id_many_clause_args = make_in_list_sql_clause( + self.database_engine, "user_id", user_ids_to_query + ) + ( + device_id_many_clause_sql, + device_id_many_clause_args, + ) = make_in_list_sql_clause( + self.database_engine, "device_id", device_ids_to_query + ) + + sql = f""" + SELECT stream_id, user_id, device_id, message_json FROM device_inbox + WHERE {user_id_many_clause_sql} + AND {device_id_many_clause_sql} + AND ? < stream_id AND stream_id <= ? + ORDER BY stream_id ASC + """ + sql_args = ( + *user_id_many_clause_args, + *device_id_many_clause_args, + from_stream_id, + to_stream_id, + ) + + # If a limit was provided, limit the data retrieved from the database + if limit is not None: + sql += "LIMIT ?" + sql_args += (limit,) + + txn.execute(sql, sql_args) + + # Create and fill a dictionary of (user ID, device ID) -> list of messages + # intended for each device. + last_processed_stream_pos = to_stream_id + recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {} + for row in txn: + last_processed_stream_pos = row[0] + recipient_user_id = row[1] + recipient_device_id = row[2] + message_dict = db_to_json(row[3]) + + # Store the device details + recipient_device_to_messages.setdefault( + (recipient_user_id, recipient_device_id), [] + ).append(message_dict) + + if limit is not None and txn.rowcount == limit: + # We ended up bumping up against the message limit. There may be more messages + # to retrieve. Return what we have, as well as the last stream position that + # was processed. + # + # The caller is expected to set this as the lower (exclusive) bound + # for the next query of this device. + return recipient_device_to_messages, last_processed_stream_pos + + # The limit was not reached, thus we know that recipient_device_to_messages + # contains all to-device messages for the given device and stream id range. + # + # We return to_stream_id, which the caller should then provide as the lower + # (exclusive) bound on the next query of this device. + return recipient_device_to_messages, to_stream_id return await self.db_pool.runInteraction( - "get_new_messages_for_device", get_new_messages_for_device_txn + "get_device_messages", get_device_messages_txn ) @trace diff --git a/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql b/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql new file mode 100644 index 000000000000..bbf0af53110f --- /dev/null +++ b/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql @@ -0,0 +1,21 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a column to track what to_device stream id that this application +-- service has been caught up to. + +-- NULL indicates that this appservice has never received any to_device messages. This +-- can be used, for example, to avoid sending a huge dump of messages at startup. +ALTER TABLE application_services_state ADD COLUMN to_device_stream_id BIGINT; \ No newline at end of file diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 55f0899bae7d..8fb6687f89eb 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -11,23 +11,29 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import TYPE_CHECKING from unittest.mock import Mock from twisted.internet import defer from synapse.appservice import ApplicationServiceState from synapse.appservice.scheduler import ( + ApplicationServiceScheduler, _Recoverer, - _ServiceQueuer, _TransactionController, ) from synapse.logging.context import make_deferred_yieldable +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.test_utils import simple_async_mock from ..utils import MockClock +if TYPE_CHECKING: + from twisted.internet.testing import MemoryReactor + class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): def setUp(self): @@ -58,7 +64,10 @@ def test_single_service_up_txn_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[] # txn made and saved + service=service, + events=events, + ephemeral=[], + to_device_messages=[], # txn made and saved ) self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed @@ -79,7 +88,10 @@ def test_single_service_down(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[] # txn made and saved + service=service, + events=events, + ephemeral=[], + to_device_messages=[], # txn made and saved ) self.assertEquals(0, txn.send.call_count) # txn not sent though self.assertEquals(0, txn.complete.call_count) # or completed @@ -102,7 +114,7 @@ def test_single_service_up_txn_not_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[] + service=service, events=events, ephemeral=[], to_device_messages=[] ) self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made self.assertEquals(1, self.recoverer.recover.call_count) # and invoked @@ -189,38 +201,41 @@ def take_txn(*args, **kwargs): self.callback.assert_called_once_with(self.recoverer) -class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): - def setUp(self): +class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer): + self.scheduler = ApplicationServiceScheduler(hs) self.txn_ctrl = Mock() self.txn_ctrl.send = simple_async_mock() - self.queuer = _ServiceQueuer(self.txn_ctrl, MockClock()) + + # Replace instantiated _TransactionController instances with our Mock + self.scheduler.txn_ctrl = self.txn_ctrl + self.scheduler.queuer.txn_ctrl = self.txn_ctrl def test_send_single_event_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4) event = Mock() - self.queuer.enqueue_event(service, event) - self.txn_ctrl.send.assert_called_once_with(service, [event], []) + self.scheduler.enqueue_for_appservice(service, events=[event]) + self.txn_ctrl.send.assert_called_once_with(service, [event], [], []) def test_send_single_event_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock( - side_effect=lambda x, y, z: make_deferred_yieldable(d) - ) + self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) service = Mock(id=4) event = Mock(event_id="first") event2 = Mock(event_id="second") event3 = Mock(event_id="third") # Send an event and don't resolve it just yet. - self.queuer.enqueue_event(service, event) + self.scheduler.enqueue_for_appservice(service, events=[event]) # Send more events: expect send() to NOT be called multiple times. - self.queuer.enqueue_event(service, event2) - self.queuer.enqueue_event(service, event3) - self.txn_ctrl.send.assert_called_with(service, [event], []) + # (call enqueue_for_appservice multiple times deliberately) + self.scheduler.enqueue_for_appservice(service, events=[event2]) + self.scheduler.enqueue_for_appservice(service, events=[event3]) + self.txn_ctrl.send.assert_called_with(service, [event], [], []) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve the send event: expect the queued events to be sent d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [event2, event3], []) + self.txn_ctrl.send.assert_called_with(service, [event2, event3], [], []) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_multiple_service_queues(self): @@ -238,23 +253,23 @@ def test_multiple_service_queues(self): send_return_list = [srv_1_defer, srv_2_defer] - def do_send(x, y, z): + def do_send(*args, **kwargs): return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) # send events for different ASes and make sure they are sent - self.queuer.enqueue_event(srv1, srv_1_event) - self.queuer.enqueue_event(srv1, srv_1_event2) - self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], []) - self.queuer.enqueue_event(srv2, srv_2_event) - self.queuer.enqueue_event(srv2, srv_2_event2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], []) + self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event]) + self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event2]) + self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [], []) + self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event]) + self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event2]) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [], []) # make sure callbacks for a service only send queued events for THAT # service srv_2_defer.callback(srv2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], []) self.assertEquals(3, self.txn_ctrl.send.call_count) def test_send_large_txns(self): @@ -262,7 +277,7 @@ def test_send_large_txns(self): srv_2_defer = defer.Deferred() send_return_list = [srv_1_defer, srv_2_defer] - def do_send(x, y, z): + def do_send(*args, **kwargs): return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) @@ -270,67 +285,65 @@ def do_send(x, y, z): service = Mock(id=4, name="service") event_list = [Mock(name="event%i" % (i + 1)) for i in range(200)] for event in event_list: - self.queuer.enqueue_event(service, event) + self.scheduler.enqueue_for_appservice(service, [event], []) # Expect the first event to be sent immediately. - self.txn_ctrl.send.assert_called_with(service, [event_list[0]], []) + self.txn_ctrl.send.assert_called_with(service, [event_list[0]], [], []) srv_1_defer.callback(service) # Then send the next 100 events - self.txn_ctrl.send.assert_called_with(service, event_list[1:101], []) + self.txn_ctrl.send.assert_called_with(service, event_list[1:101], [], []) srv_2_defer.callback(service) # Then the final 99 events - self.txn_ctrl.send.assert_called_with(service, event_list[101:], []) + self.txn_ctrl.send.assert_called_with(service, event_list[101:], [], []) self.assertEquals(3, self.txn_ctrl.send.call_count) def test_send_single_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event")] - self.queuer.enqueue_ephemeral(service, event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list, []) def test_send_multiple_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")] - self.queuer.enqueue_ephemeral(service, event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list, []) def test_send_single_ephemeral_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock( - side_effect=lambda x, y, z: make_deferred_yieldable(d) - ) + self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) service = Mock(id=4) event_list_1 = [Mock(event_id="event1"), Mock(event_id="event2")] event_list_2 = [Mock(event_id="event3"), Mock(event_id="event4")] event_list_3 = [Mock(event_id="event5"), Mock(event_id="event6")] # Send an event and don't resolve it just yet. - self.queuer.enqueue_ephemeral(service, event_list_1) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_1) # Send more events: expect send() to NOT be called multiple times. - self.queuer.enqueue_ephemeral(service, event_list_2) - self.queuer.enqueue_ephemeral(service, event_list_3) - self.txn_ctrl.send.assert_called_with(service, [], event_list_1) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_2) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_3) + self.txn_ctrl.send.assert_called_with(service, [], event_list_1, []) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve txn_ctrl.send d.callback(service) # Expect the queued events to be sent - self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) + self.txn_ctrl.send.assert_called_with( + service, [], event_list_2 + event_list_3, [] + ) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_send_large_txns_ephemeral(self): d = defer.Deferred() - self.txn_ctrl.send = Mock( - side_effect=lambda x, y, z: make_deferred_yieldable(d) - ) + self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) # Expect the event to be sent immediately. service = Mock(id=4, name="service") first_chunk = [Mock(name="event%i" % (i + 1)) for i in range(100)] second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)] event_list = first_chunk + second_chunk - self.queuer.enqueue_ephemeral(service, event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk, []) d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [], second_chunk) + self.txn_ctrl.send.assert_called_with(service, [], second_chunk, []) self.assertEquals(2, self.txn_ctrl.send.call_count) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index d6f14e2dba79..fe57ff267119 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -1,4 +1,4 @@ -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,18 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Iterable, List, Optional from unittest.mock import Mock from twisted.internet import defer +import synapse.rest.admin +import synapse.storage +from synapse.appservice import ApplicationService from synapse.handlers.appservice import ApplicationServicesHandler +from synapse.rest.client import login, receipts, room, sendtodevice from synapse.types import RoomStreamToken +from synapse.util.stringutils import random_string -from tests.test_utils import make_awaitable +from tests import unittest +from tests.test_utils import make_awaitable, simple_async_mock from tests.utils import MockClock -from .. import unittest - class AppServiceHandlerTestCase(unittest.TestCase): """Tests the ApplicationServicesHandler.""" @@ -36,6 +41,9 @@ def setUp(self): hs.get_datastore.return_value = self.mock_store self.mock_store.get_received_ts.return_value = make_awaitable(0) self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None) + self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable( + None + ) hs.get_application_service_api.return_value = self.mock_as_api hs.get_application_service_scheduler.return_value = self.mock_scheduler hs.get_clock.return_value = MockClock() @@ -63,8 +71,8 @@ def test_notify_interested_services(self): ] self.handler.notify_interested_services(RoomStreamToken(None, 1)) - self.mock_scheduler.submit_event_for_as.assert_called_once_with( - interested_service, event + self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( + interested_service, events=[event] ) def test_query_user_exists_unknown_user(self): @@ -261,7 +269,6 @@ def test_notify_interested_services_ephemeral(self): """ interested_service = self._mkservice(is_interested=True) services = [interested_service] - self.mock_store.get_app_services.return_value = services self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( 579 @@ -275,10 +282,10 @@ def test_notify_interested_services_ephemeral(self): self.handler.notify_interested_services_ephemeral( "receipt_key", 580, ["@fakerecipient:example.com"] ) - self.mock_scheduler.submit_ephemeral_events_for_as.assert_called_once_with( - interested_service, [event] + self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( + interested_service, ephemeral=[event] ) - self.mock_store.set_type_stream_id_for_appservice.assert_called_once_with( + self.mock_store.set_appservice_stream_type_pos.assert_called_once_with( interested_service, "read_receipt", 580, @@ -305,7 +312,10 @@ def test_notify_interested_services_ephemeral_out_of_order(self): self.handler.notify_interested_services_ephemeral( "receipt_key", 580, ["@fakerecipient:example.com"] ) - self.mock_scheduler.submit_ephemeral_events_for_as.assert_not_called() + # This method will be called, but with an empty list of events + self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( + interested_service, ephemeral=[] + ) def _mkservice(self, is_interested, protocols=None): service = Mock() @@ -321,3 +331,252 @@ def _mkservice_alias(self, is_interested_in_alias): service.token = "mock_service_token" service.url = "mock_service_url" return service + + +class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): + """ + Tests that the ApplicationServicesHandler sends events to application + services correctly. + """ + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + room.register_servlets, + sendtodevice.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + # Mock the ApplicationServiceScheduler's _TransactionController's send method so that + # we can track any outgoing ephemeral events + self.send_mock = simple_async_mock() + hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock + + # Mock out application services, and allow defining our own in tests + self._services: List[ApplicationService] = [] + self.hs.get_datastore().get_app_services = Mock(return_value=self._services) + + # A user on the homeserver. + self.local_user_device_id = "local_device" + self.local_user = self.register_user("local_user", "password") + self.local_user_token = self.login( + "local_user", "password", self.local_user_device_id + ) + + # A user on the homeserver which lies within an appservice's exclusive user namespace. + self.exclusive_as_user_device_id = "exclusive_as_device" + self.exclusive_as_user = self.register_user("exclusive_as_user", "password") + self.exclusive_as_user_token = self.login( + "exclusive_as_user", "password", self.exclusive_as_user_device_id + ) + + @unittest.override_config( + {"experimental_features": {"msc2409_to_device_messages_enabled": True}} + ) + def test_application_services_receive_local_to_device(self): + """ + Test that when a user sends a to-device message to another user + that is an application service's user namespace, the + application service will receive it. + """ + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + } + ], + }, + ) + + # Have local_user send a to-device message to exclusive_as_user + message_content = {"some_key": "some really interesting value"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={ + "messages": { + self.exclusive_as_user: { + self.exclusive_as_user_device_id: message_content + } + } + }, + access_token=self.local_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Have exclusive_as_user send a to-device message to local_user + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/4", + content={ + "messages": { + self.local_user: {self.local_user_device_id: message_content} + } + }, + access_token=self.exclusive_as_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Check if our application service - that is interested in exclusive_as_user - received + # the to-device message as part of an AS transaction. + # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS. + # + # The uninterested application service should not have been notified at all. + self.send_mock.assert_called_once() + service, _events, _ephemeral, to_device_messages = self.send_mock.call_args[0] + + # Assert that this was the same to-device message that local_user sent + self.assertEqual(service, interested_appservice) + self.assertEqual(to_device_messages[0]["type"], "m.room_key_request") + self.assertEqual(to_device_messages[0]["sender"], self.local_user) + + # Additional fields 'to_user_id' and 'to_device_id' specifically for + # to-device messages via the AS API + self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user) + self.assertEqual( + to_device_messages[0]["to_device_id"], self.exclusive_as_user_device_id + ) + self.assertEqual(to_device_messages[0]["content"], message_content) + + @unittest.override_config( + {"experimental_features": {"msc2409_to_device_messages_enabled": True}} + ) + def test_application_services_receive_bursts_of_to_device(self): + """ + Test that when a user sends >100 to-device messages at once, any + interested AS's will receive them in separate transactions. + + Also tests that uninterested application services do not receive messages. + """ + # Register two application services with exclusive interest in a user + interested_appservices = [] + for _ in range(2): + appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + } + ], + }, + ) + interested_appservices.append(appservice) + + # ...and an application service which does not have any user interest. + self._register_application_service() + + to_device_message_content = { + "some key": "some interesting value", + } + + # We need to send a large burst of to-device messages. We also would like to + # include them all in the same application service transaction so that we can + # test large transactions. + # + # To do this, we can send a single to-device message to many user devices at + # once. + # + # We insert number_of_messages - 1 messages into the database directly. We'll then + # send a final to-device message to the real device, which will also kick off + # an AS transaction (as just inserting messages into the DB won't). + number_of_messages = 150 + fake_device_ids = [f"device_{num}" for num in range(number_of_messages - 1)] + messages = { + self.exclusive_as_user: { + device_id: to_device_message_content for device_id in fake_device_ids + } + } + + # Create a fake device per message. We can't send to-device messages to + # a device that doesn't exist. + self.get_success( + self.hs.get_datastore().db_pool.simple_insert_many( + desc="test_application_services_receive_burst_of_to_device", + table="devices", + keys=("user_id", "device_id"), + values=[ + ( + self.exclusive_as_user, + device_id, + ) + for device_id in fake_device_ids + ], + ) + ) + + # Seed the device_inbox table with our fake messages + self.get_success( + self.hs.get_datastore().add_messages_to_device_inbox(messages, {}) + ) + + # Now have local_user send a final to-device message to exclusive_as_user. All unsent + # to-device messages should be sent to any application services + # interested in exclusive_as_user. + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/4", + content={ + "messages": { + self.exclusive_as_user: { + self.exclusive_as_user_device_id: to_device_message_content + } + } + }, + access_token=self.local_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + self.send_mock.assert_called() + + # Count the total number of to-device messages that were sent out per-service. + # Ensure that we only sent to-device messages to interested services, and that + # each interested service received the full count of to-device messages. + service_id_to_message_count: Dict[str, int] = {} + + for call in self.send_mock.call_args_list: + service, _events, _ephemeral, to_device_messages = call[0] + + # Check that this was made to an interested service + self.assertIn(service, interested_appservices) + + # Add to the count of messages for this application service + service_id_to_message_count.setdefault(service.id, 0) + service_id_to_message_count[service.id] += len(to_device_messages) + + # Assert that each interested service received the full count of messages + for count in service_id_to_message_count.values(): + self.assertEqual(count, number_of_messages) + + def _register_application_service( + self, + namespaces: Optional[Dict[str, Iterable[Dict]]] = None, + ) -> ApplicationService: + """ + Register a new application service, with the given namespaces of interest. + + Args: + namespaces: A dictionary containing any user, room or alias namespaces that + the application service is interested in. + + Returns: + The registered application service. + """ + # Create an application service + appservice = ApplicationService( + token=random_string(10), + hostname="example.com", + id=random_string(10), + sender="@as:example.com", + rate_limited=False, + namespaces=namespaces, + supports_ephemeral=True, + ) + + # Register the application service + self._services.append(appservice) + + return appservice diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 329490caad53..ddcb7f554918 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -266,7 +266,9 @@ def test_create_appservice_txn_first( service = Mock(id=self.as_list[0]["id"]) events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) txn = self.get_success( - defer.ensureDeferred(self.store.create_appservice_txn(service, events, [])) + defer.ensureDeferred( + self.store.create_appservice_txn(service, events, [], []) + ) ) self.assertEquals(txn.id, 1) self.assertEquals(txn.events, events) @@ -280,7 +282,9 @@ def test_create_appservice_txn_older_last_txn( self.get_success(self._set_last_txn(service.id, 9643)) # AS is falling behind self.get_success(self._insert_txn(service.id, 9644, events)) self.get_success(self._insert_txn(service.id, 9645, events)) - txn = self.get_success(self.store.create_appservice_txn(service, events, [])) + txn = self.get_success( + self.store.create_appservice_txn(service, events, [], []) + ) self.assertEquals(txn.id, 9646) self.assertEquals(txn.events, events) self.assertEquals(txn.service, service) @@ -291,7 +295,9 @@ def test_create_appservice_txn_up_to_date_last_txn( service = Mock(id=self.as_list[0]["id"]) events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) self.get_success(self._set_last_txn(service.id, 9643)) - txn = self.get_success(self.store.create_appservice_txn(service, events, [])) + txn = self.get_success( + self.store.create_appservice_txn(service, events, [], []) + ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) self.assertEquals(txn.service, service) @@ -313,7 +319,9 @@ def test_create_appservice_txn_up_fuzzing( self.get_success(self._insert_txn(self.as_list[2]["id"], 10, events)) self.get_success(self._insert_txn(self.as_list[3]["id"], 9643, events)) - txn = self.get_success(self.store.create_appservice_txn(service, events, [])) + txn = self.get_success( + self.store.create_appservice_txn(service, events, [], []) + ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) self.assertEquals(txn.service, service) @@ -481,10 +489,10 @@ def test_get_type_stream_id_for_appservice_invalid_type(self) -> None: ValueError, ) - def test_set_type_stream_id_for_appservice(self) -> None: + def test_set_appservice_stream_type_pos(self) -> None: read_receipt_value = 1024 self.get_success( - self.store.set_type_stream_id_for_appservice( + self.store.set_appservice_stream_type_pos( self.service, "read_receipt", read_receipt_value ) ) @@ -494,7 +502,7 @@ def test_set_type_stream_id_for_appservice(self) -> None: self.assertEqual(result, read_receipt_value) self.get_success( - self.store.set_type_stream_id_for_appservice( + self.store.set_appservice_stream_type_pos( self.service, "presence", read_receipt_value ) ) @@ -503,9 +511,9 @@ def test_set_type_stream_id_for_appservice(self) -> None: ) self.assertEqual(result, read_receipt_value) - def test_set_type_stream_id_for_appservice_invalid_type(self) -> None: + def test_set_appservice_stream_type_pos_invalid_type(self) -> None: self.get_failure( - self.store.set_type_stream_id_for_appservice(self.service, "foobar", 1024), + self.store.set_appservice_stream_type_pos(self.service, "foobar", 1024), ValueError, ) From 5c16c3302125f2a5d8a006ad42792f22b320c737 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 1 Feb 2022 16:23:55 +0100 Subject: [PATCH 179/474] Allow modules to retrieve server and worker names (#11868) Fixes #10701 --- changelog.d/11868.feature | 1 + synapse/module_api/__init__.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 changelog.d/11868.feature diff --git a/changelog.d/11868.feature b/changelog.d/11868.feature new file mode 100644 index 000000000000..3723dac4ea10 --- /dev/null +++ b/changelog.d/11868.feature @@ -0,0 +1 @@ +Allow modules to retrieve the current instance's server name and worker name. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 788b2e47d523..29fbc73c971d 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -401,6 +401,32 @@ def email_app_name(self) -> str: """ return self._hs.config.email.email_app_name + @property + def server_name(self) -> str: + """The server name for the local homeserver. + + Added in Synapse v1.53.0. + """ + return self._server_name + + @property + def worker_name(self) -> Optional[str]: + """The name of the worker this specific instance is running as per the + "worker_name" configuration setting, or None if it's the main process. + + Added in Synapse v1.53.0. + """ + return self._hs.config.worker.worker_name + + @property + def worker_app(self) -> Optional[str]: + """The name of the worker app this specific instance is running as per the + "worker_app" configuration setting, or None if it's the main process. + + Added in Synapse v1.53.0. + """ + return self._hs.config.worker.worker_app + async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: """Get user info by user_id From 3f72c2a322af0d9d45452aa9c78723793bea2432 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 1 Feb 2022 17:45:13 +0000 Subject: [PATCH 180/474] Convert `ApplicationServiceTestCase` to use `simple_async_mock` (#11880) --- changelog.d/11880.misc | 1 + tests/appservice/test_appservice.py | 19 +++++++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11880.misc diff --git a/changelog.d/11880.misc b/changelog.d/11880.misc new file mode 100644 index 000000000000..8125947b2a17 --- /dev/null +++ b/changelog.d/11880.misc @@ -0,0 +1 @@ +Convert `ApplicationServiceTestCase` to use `simple_async_mock`. \ No newline at end of file diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index ba2a2bfd64ad..07d8105f41d6 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -19,6 +19,7 @@ from synapse.appservice import ApplicationService, Namespace from tests import unittest +from tests.test_utils import simple_async_mock def _regex(regex: str, exclusive: bool = True) -> Namespace: @@ -91,10 +92,10 @@ def test_regex_alias_match(self): self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) - self.store.get_aliases_for_room.return_value = defer.succeed( + self.store.get_aliases_for_room = simple_async_mock( ["#irc_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_users_in_room.return_value = defer.succeed([]) + self.store.get_users_in_room = simple_async_mock([]) self.assertTrue( ( yield defer.ensureDeferred( @@ -144,10 +145,10 @@ def test_regex_alias_no_match(self): self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) - self.store.get_aliases_for_room.return_value = defer.succeed( + self.store.get_aliases_for_room = simple_async_mock( ["#xmpp_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_users_in_room.return_value = defer.succeed([]) + self.store.get_users_in_room = simple_async_mock([]) self.assertFalse( ( yield defer.ensureDeferred( @@ -163,10 +164,8 @@ def test_regex_multiple_matches(self): ) self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" - self.store.get_aliases_for_room.return_value = defer.succeed( - ["#irc_barfoo:matrix.org"] - ) - self.store.get_users_in_room.return_value = defer.succeed([]) + self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"]) + self.store.get_users_in_room = simple_async_mock([]) self.assertTrue( ( yield defer.ensureDeferred( @@ -191,10 +190,10 @@ def test_interested_in_self(self): def test_member_list_match(self): self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. - self.store.get_users_in_room.return_value = defer.succeed( + self.store.get_users_in_room = simple_async_mock( ["@alice:here", "@irc_fo:here", "@bob:here"] ) - self.store.get_aliases_for_room.return_value = defer.succeed([]) + self.store.get_aliases_for_room = simple_async_mock([]) self.event.sender = "@xmpp_foobar:matrix.org" self.assertTrue( From 513913cc6ba77833082178b7d2b2f0565daf3c99 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Feb 2022 09:59:55 +0000 Subject: [PATCH 181/474] Expose the registered device ID from the `register_appservice_user` test helper. (#11615) --- changelog.d/11615.misc | 1 + tests/handlers/test_user_directory.py | 6 ++++-- tests/rest/client/test_room_batch.py | 2 +- tests/storage/test_user_directory.py | 4 +++- tests/unittest.py | 9 +++++---- 5 files changed, 14 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11615.misc diff --git a/changelog.d/11615.misc b/changelog.d/11615.misc new file mode 100644 index 000000000000..0aa953650493 --- /dev/null +++ b/changelog.d/11615.misc @@ -0,0 +1 @@ +Expose the registered device ID from the `register_appservice_user` test helper. \ No newline at end of file diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 70c621b825f4..482c90ef68c5 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -169,7 +169,9 @@ def test_excludes_appservices_user(self) -> None: # Register an AS user. user = self.register_user("user", "pass") token = self.login(user, "pass") - as_user = self.register_appservice_user("as_user_potato", self.appservice.token) + as_user, _ = self.register_appservice_user( + "as_user_potato", self.appservice.token + ) # Join the AS user to rooms owned by the normal user. public, private = self._create_rooms_and_inject_memberships( @@ -388,7 +390,7 @@ def test_handle_local_profile_change_with_deactivated_user(self) -> None: def test_handle_local_profile_change_with_appservice_user(self) -> None: # create user - as_user_id = self.register_appservice_user( + as_user_id, _ = self.register_appservice_user( "as_user_alice", self.appservice.token ) diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py index 721454c1875f..e9f870403536 100644 --- a/tests/rest/client/test_room_batch.py +++ b/tests/rest/client/test_room_batch.py @@ -89,7 +89,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.clock = clock self.storage = hs.get_storage() - self.virtual_user_id = self.register_appservice_user( + self.virtual_user_id, _ = self.register_appservice_user( "as_user_potato", self.appservice.token ) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 7f5b28aed8c4..48f1e9d8411b 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -341,7 +341,9 @@ def test_population_excludes_appservice_user(self) -> None: # Register an AS user. user = self.register_user("user", "pass") token = self.login(user, "pass") - as_user = self.register_appservice_user("as_user_potato", self.appservice.token) + as_user, _ = self.register_appservice_user( + "as_user_potato", self.appservice.token + ) # Join the AS user to rooms owned by the normal user. public, private = self._create_rooms_and_inject_memberships( diff --git a/tests/unittest.py b/tests/unittest.py index 14318483674e..6fc617601a40 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -620,18 +620,19 @@ def register_appservice_user( self, username: str, appservice_token: str, - ) -> str: + ) -> Tuple[str, str]: """Register an appservice user as an application service. Requires the client-facing registration API be registered. Args: username: the user to be registered by an application service. - Should be a full username, i.e. ""@localpart:hostname" as opposed to just "localpart" + Should NOT be a full username, i.e. just "localpart" as opposed to "@localpart:hostname" appservice_token: the acccess token for that application service. Raises: if the request to '/register' does not return 200 OK. - Returns: the MXID of the new user. + Returns: + The MXID of the new user, the device ID of the new user's first device. """ channel = self.make_request( "POST", @@ -643,7 +644,7 @@ def register_appservice_user( access_token=appservice_token, ) self.assertEqual(channel.code, 200, channel.json_body) - return channel.json_body["user_id"] + return channel.json_body["user_id"], channel.json_body["device_id"] def login( self, From af795173bec447da44b65b739d05d0083b02229d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Feb 2022 11:37:18 +0000 Subject: [PATCH 182/474] Add a background database update to purge account data for deactivated users. (#11655) --- changelog.d/11655.feature | 1 + scripts/synapse_port_db | 4 + .../storage/databases/main/account_data.py | 164 ++++++++++++------ ..._account_data_for_deactivated_accounts.sql | 20 +++ tests/handlers/test_deactivate_account.py | 106 +++++++++++ 5 files changed, 240 insertions(+), 55 deletions(-) create mode 100644 changelog.d/11655.feature create mode 100644 synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql diff --git a/changelog.d/11655.feature b/changelog.d/11655.feature new file mode 100644 index 000000000000..dc426fb658ac --- /dev/null +++ b/changelog.d/11655.feature @@ -0,0 +1 @@ +Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 640ff15277db..70ee4e5c7f8e 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -36,6 +36,8 @@ from synapse.logging.context import ( run_in_background, ) from synapse.storage.database import DatabasePool, make_conn +from synapse.storage.databases.main import PushRuleStore +from synapse.storage.databases.main.account_data import AccountDataWorkerStore from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore @@ -180,6 +182,8 @@ class Store( UserDirectoryBackgroundUpdateStore, EndToEndKeyBackgroundStore, StatsStore, + AccountDataWorkerStore, + PushRuleStore, PusherWorkerStore, PresenceBackgroundUpdateStore, GroupServerWorkerStore, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 5bfa408f74b0..52146aacc8c0 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -106,6 +106,11 @@ def __init__( "AccountDataAndTagsChangeCache", account_max ) + self.db_pool.updates.register_background_update_handler( + "delete_account_data_for_deactivated_users", + self._delete_account_data_for_deactivated_users, + ) + def get_max_account_data_stream_id(self) -> int: """Get the current max stream ID for account data stream @@ -549,72 +554,121 @@ def _add_account_data_for_user( async def purge_account_data_for_user(self, user_id: str) -> None: """ - Removes the account data for a user. + Removes ALL the account data for a user. + Intended to be used upon user deactivation. - This is intended to be used upon user deactivation and also removes any - derived information from account data (e.g. push rules and ignored users). + Also purges the user from the ignored_users cache table + and the push_rules cache tables. + """ - Args: - user_id: The user ID to remove data for. + await self.db_pool.runInteraction( + "purge_account_data_for_user_txn", + self._purge_account_data_for_user_txn, + user_id, + ) + + def _purge_account_data_for_user_txn( + self, txn: LoggingTransaction, user_id: str + ) -> None: """ + See `purge_account_data_for_user`. + """ + # Purge from the primary account_data tables. + self.db_pool.simple_delete_txn( + txn, table="account_data", keyvalues={"user_id": user_id} + ) - def purge_account_data_for_user_txn(txn: LoggingTransaction) -> None: - # Purge from the primary account_data tables. - self.db_pool.simple_delete_txn( - txn, table="account_data", keyvalues={"user_id": user_id} - ) + self.db_pool.simple_delete_txn( + txn, table="room_account_data", keyvalues={"user_id": user_id} + ) - self.db_pool.simple_delete_txn( - txn, table="room_account_data", keyvalues={"user_id": user_id} - ) + # Purge from ignored_users where this user is the ignorer. + # N.B. We don't purge where this user is the ignoree, because that + # interferes with other users' account data. + # It's also not this user's data to delete! + self.db_pool.simple_delete_txn( + txn, table="ignored_users", keyvalues={"ignorer_user_id": user_id} + ) - # Purge from ignored_users where this user is the ignorer. - # N.B. We don't purge where this user is the ignoree, because that - # interferes with other users' account data. - # It's also not this user's data to delete! - self.db_pool.simple_delete_txn( - txn, table="ignored_users", keyvalues={"ignorer_user_id": user_id} - ) + # Remove the push rules + self.db_pool.simple_delete_txn( + txn, table="push_rules", keyvalues={"user_name": user_id} + ) + self.db_pool.simple_delete_txn( + txn, table="push_rules_enable", keyvalues={"user_name": user_id} + ) + self.db_pool.simple_delete_txn( + txn, table="push_rules_stream", keyvalues={"user_id": user_id} + ) - # Remove the push rules - self.db_pool.simple_delete_txn( - txn, table="push_rules", keyvalues={"user_name": user_id} - ) - self.db_pool.simple_delete_txn( - txn, table="push_rules_enable", keyvalues={"user_name": user_id} - ) - self.db_pool.simple_delete_txn( - txn, table="push_rules_stream", keyvalues={"user_id": user_id} - ) + # Invalidate caches as appropriate + self._invalidate_cache_and_stream( + txn, self.get_account_data_for_room_and_type, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_account_data_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_global_account_data_by_type_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_account_data_for_room, (user_id,) + ) + self._invalidate_cache_and_stream(txn, self.get_push_rules_for_user, (user_id,)) + self._invalidate_cache_and_stream( + txn, self.get_push_rules_enabled_for_user, (user_id,) + ) + # This user might be contained in the ignored_by cache for other users, + # so we have to invalidate it all. + self._invalidate_all_cache_and_stream(txn, self.ignored_by) - # Invalidate caches as appropriate - self._invalidate_cache_and_stream( - txn, self.get_account_data_for_room_and_type, (user_id,) - ) - self._invalidate_cache_and_stream( - txn, self.get_account_data_for_user, (user_id,) - ) - self._invalidate_cache_and_stream( - txn, self.get_global_account_data_by_type_for_user, (user_id,) - ) - self._invalidate_cache_and_stream( - txn, self.get_account_data_for_room, (user_id,) - ) - self._invalidate_cache_and_stream( - txn, self.get_push_rules_for_user, (user_id,) - ) - self._invalidate_cache_and_stream( - txn, self.get_push_rules_enabled_for_user, (user_id,) - ) - # This user might be contained in the ignored_by cache for other users, - # so we have to invalidate it all. - self._invalidate_all_cache_and_stream(txn, self.ignored_by) + async def _delete_account_data_for_deactivated_users( + self, progress: dict, batch_size: int + ) -> int: + """ + Retroactively purges account data for users that have already been deactivated. + Gets run as a background update caused by a schema delta. + """ - await self.db_pool.runInteraction( - "purge_account_data_for_user_txn", - purge_account_data_for_user_txn, + last_user: str = progress.get("last_user", "") + + def _delete_account_data_for_deactivated_users_txn( + txn: LoggingTransaction, + ) -> int: + sql = """ + SELECT name FROM users + WHERE deactivated = ? and name > ? + ORDER BY name ASC + LIMIT ? + """ + + txn.execute(sql, (1, last_user, batch_size)) + users = [row[0] for row in txn] + + for user in users: + self._purge_account_data_for_user_txn(txn, user_id=user) + + if users: + self.db_pool.updates._background_update_progress_txn( + txn, + "delete_account_data_for_deactivated_users", + {"last_user": users[-1]}, + ) + + return len(users) + + number_deleted = await self.db_pool.runInteraction( + "_delete_account_data_for_deactivated_users", + _delete_account_data_for_deactivated_users_txn, ) + if number_deleted < batch_size: + await self.db_pool.updates._end_background_update( + "delete_account_data_for_deactivated_users" + ) + + return number_deleted + class AccountDataStore(AccountDataWorkerStore): pass diff --git a/synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql b/synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql new file mode 100644 index 000000000000..e1249338438e --- /dev/null +++ b/synapse/storage/schema/main/delta/68/03_delete_account_data_for_deactivated_accounts.sql @@ -0,0 +1,20 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- We want to retroactively delete account data for users that were already +-- deactivated. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (6803, 'delete_account_data_for_deactivated_users', '{}'); diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index 3da597768c62..01096a1581e3 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -217,3 +217,109 @@ def test_ignored_users_deleted_upon_deactivation(self) -> None: self.assertEqual( self.get_success(self._store.ignored_by("@sheltie:test")), set() ) + + def _rerun_retroactive_account_data_deletion_update(self) -> None: + # Reset the 'all done' flag + self._store.db_pool.updates._all_done = False + + self.get_success( + self._store.db_pool.simple_insert( + "background_updates", + { + "update_name": "delete_account_data_for_deactivated_users", + "progress_json": "{}", + }, + ) + ) + + self.wait_for_background_updates() + + def test_account_data_deleted_retroactively_by_background_update_if_deactivated( + self, + ) -> None: + """ + Tests that a user, who deactivated their account before account data was + deleted automatically upon deactivation, has their account data retroactively + scrubbed by the background update. + """ + + # Request the deactivation of our account + self._deactivate_my_account() + + # Add some account data + # (we do this after the deactivation so that the act of deactivating doesn't + # clear it out. This emulates a user that was deactivated before this was cleared + # upon deactivation.) + self.get_success( + self._store.add_account_data_for_user( + self.user, + AccountDataTypes.DIRECT, + {"@someone:remote": ["!somewhere:remote"]}, + ) + ) + + # Check that the account data is there. + self.assertIsNotNone( + self.get_success( + self._store.get_global_account_data_by_type_for_user( + self.user, + AccountDataTypes.DIRECT, + ) + ), + ) + + # Re-run the retroactive deletion update + self._rerun_retroactive_account_data_deletion_update() + + # Check that the account data was cleared. + self.assertIsNone( + self.get_success( + self._store.get_global_account_data_by_type_for_user( + self.user, + AccountDataTypes.DIRECT, + ) + ), + ) + + def test_account_data_preserved_by_background_update_if_not_deactivated( + self, + ) -> None: + """ + Tests that the background update does not scrub account data for users that have + not been deactivated. + """ + + # Add some account data + # (we do this after the deactivation so that the act of deactivating doesn't + # clear it out. This emulates a user that was deactivated before this was cleared + # upon deactivation.) + self.get_success( + self._store.add_account_data_for_user( + self.user, + AccountDataTypes.DIRECT, + {"@someone:remote": ["!somewhere:remote"]}, + ) + ) + + # Check that the account data is there. + self.assertIsNotNone( + self.get_success( + self._store.get_global_account_data_by_type_for_user( + self.user, + AccountDataTypes.DIRECT, + ) + ), + ) + + # Re-run the retroactive deletion update + self._rerun_retroactive_account_data_deletion_update() + + # Check that the account data was NOT cleared. + self.assertIsNotNone( + self.get_success( + self._store.get_global_account_data_by_type_for_user( + self.user, + AccountDataTypes.DIRECT, + ) + ), + ) From acda9f07c858902ab072d49f6ad3e26df874d06e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Feb 2022 09:49:31 -0500 Subject: [PATCH 183/474] Revert experimental push rules from #7997. (#11884) Manually reverts the merge from cdbb8e6d6e36e0b6bc36e676d8fe66c96986b399. --- changelog.d/11884.misc | 1 + synapse/config/server.py | 13 -- synapse/push/baserules.py | 219 +------------------- synapse/rest/client/push_rule.py | 13 +- synapse/storage/databases/main/push_rule.py | 20 +- 5 files changed, 14 insertions(+), 252 deletions(-) create mode 100644 changelog.d/11884.misc diff --git a/changelog.d/11884.misc b/changelog.d/11884.misc new file mode 100644 index 000000000000..d679d6038fe9 --- /dev/null +++ b/changelog.d/11884.misc @@ -0,0 +1 @@ +Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. diff --git a/synapse/config/server.py b/synapse/config/server.py index a460cf25b42f..a0a00a9798da 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -656,19 +656,6 @@ def read_config(self, config, **kwargs): False, ) - # List of users trialing the new experimental default push rules. This setting is - # not included in the sample configuration file on purpose as it's a temporary - # hack, so that some users can trial the new defaults without impacting every - # user on the homeserver. - users_new_default_push_rules: list = ( - config.get("users_new_default_push_rules") or [] - ) - if not isinstance(users_new_default_push_rules, list): - raise ConfigError("'users_new_default_push_rules' must be a list") - - # Turn the list into a set to improve lookup speed. - self.users_new_default_push_rules: set = set(users_new_default_push_rules) - # Whitelist of domain names that given next_link parameters must have next_link_domain_whitelist: Optional[List[str]] = config.get( "next_link_domain_whitelist" diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 621150699069..910b05c0da32 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -20,15 +20,11 @@ from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP -def list_with_base_rules( - rawrules: List[Dict[str, Any]], use_new_defaults: bool = False -) -> List[Dict[str, Any]]: +def list_with_base_rules(rawrules: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """Combine the list of rules set by the user with the default push rules Args: rawrules: The rules the user has modified or set. - use_new_defaults: Whether to use the new experimental default rules when - appending or prepending default rules. Returns: A new list with the rules set by the user combined with the defaults. @@ -48,9 +44,7 @@ def list_with_base_rules( ruleslist.extend( make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], - modified_base_rules, - use_new_defaults, + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules ) ) @@ -61,7 +55,6 @@ def list_with_base_rules( make_base_append_rules( PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules, - use_new_defaults, ) ) current_prio_class -= 1 @@ -70,7 +63,6 @@ def list_with_base_rules( make_base_prepend_rules( PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules, - use_new_defaults, ) ) @@ -79,18 +71,14 @@ def list_with_base_rules( while current_prio_class > 0: ruleslist.extend( make_base_append_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], - modified_base_rules, - use_new_defaults, + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules ) ) current_prio_class -= 1 if current_prio_class > 0: ruleslist.extend( make_base_prepend_rules( - PRIORITY_CLASS_INVERSE_MAP[current_prio_class], - modified_base_rules, - use_new_defaults, + PRIORITY_CLASS_INVERSE_MAP[current_prio_class], modified_base_rules ) ) @@ -98,24 +86,14 @@ def list_with_base_rules( def make_base_append_rules( - kind: str, - modified_base_rules: Dict[str, Dict[str, Any]], - use_new_defaults: bool = False, + kind: str, modified_base_rules: Dict[str, Dict[str, Any]] ) -> List[Dict[str, Any]]: rules = [] if kind == "override": - rules = ( - NEW_APPEND_OVERRIDE_RULES - if use_new_defaults - else BASE_APPEND_OVERRIDE_RULES - ) + rules = BASE_APPEND_OVERRIDE_RULES elif kind == "underride": - rules = ( - NEW_APPEND_UNDERRIDE_RULES - if use_new_defaults - else BASE_APPEND_UNDERRIDE_RULES - ) + rules = BASE_APPEND_UNDERRIDE_RULES elif kind == "content": rules = BASE_APPEND_CONTENT_RULES @@ -134,7 +112,6 @@ def make_base_append_rules( def make_base_prepend_rules( kind: str, modified_base_rules: Dict[str, Dict[str, Any]], - use_new_defaults: bool = False, ) -> List[Dict[str, Any]]: rules = [] @@ -301,135 +278,6 @@ def make_base_prepend_rules( ] -NEW_APPEND_OVERRIDE_RULES = [ - { - "rule_id": "global/override/.m.rule.encrypted", - "conditions": [ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.encrypted", - "_id": "_encrypted", - } - ], - "actions": ["notify"], - }, - { - "rule_id": "global/override/.m.rule.suppress_notices", - "conditions": [ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.message", - "_id": "_suppress_notices_type", - }, - { - "kind": "event_match", - "key": "content.msgtype", - "pattern": "m.notice", - "_id": "_suppress_notices", - }, - ], - "actions": [], - }, - { - "rule_id": "global/underride/.m.rule.suppress_edits", - "conditions": [ - { - "kind": "event_match", - "key": "m.relates_to.m.rel_type", - "pattern": "m.replace", - "_id": "_suppress_edits", - } - ], - "actions": [], - }, - { - "rule_id": "global/override/.m.rule.invite_for_me", - "conditions": [ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.member", - "_id": "_member", - }, - { - "kind": "event_match", - "key": "content.membership", - "pattern": "invite", - "_id": "_invite_member", - }, - {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, - ], - "actions": ["notify", {"set_tweak": "sound", "value": "default"}], - }, - { - "rule_id": "global/override/.m.rule.contains_display_name", - "conditions": [{"kind": "contains_display_name"}], - "actions": [ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight"}, - ], - }, - { - "rule_id": "global/override/.m.rule.tombstone", - "conditions": [ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.tombstone", - "_id": "_tombstone", - }, - { - "kind": "event_match", - "key": "state_key", - "pattern": "", - "_id": "_tombstone_statekey", - }, - ], - "actions": [ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight"}, - ], - }, - { - "rule_id": "global/override/.m.rule.roomnotif", - "conditions": [ - { - "kind": "event_match", - "key": "content.body", - "pattern": "@room", - "_id": "_roomnotif_content", - }, - { - "kind": "sender_notification_permission", - "key": "room", - "_id": "_roomnotif_pl", - }, - ], - "actions": [ - "notify", - {"set_tweak": "highlight"}, - {"set_tweak": "sound", "value": "default"}, - ], - }, - { - "rule_id": "global/override/.m.rule.call", - "conditions": [ - { - "kind": "event_match", - "key": "type", - "pattern": "m.call.invite", - "_id": "_call", - } - ], - "actions": ["notify", {"set_tweak": "sound", "value": "ring"}], - }, -] - - BASE_APPEND_UNDERRIDE_RULES = [ { "rule_id": "global/underride/.m.rule.call", @@ -538,36 +386,6 @@ def make_base_prepend_rules( ] -NEW_APPEND_UNDERRIDE_RULES = [ - { - "rule_id": "global/underride/.m.rule.room_one_to_one", - "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, - { - "kind": "event_match", - "key": "content.body", - "pattern": "*", - "_id": "body", - }, - ], - "actions": ["notify", {"set_tweak": "sound", "value": "default"}], - }, - { - "rule_id": "global/underride/.m.rule.message", - "conditions": [ - { - "kind": "event_match", - "key": "content.body", - "pattern": "*", - "_id": "body", - }, - ], - "actions": ["notify"], - "enabled": False, - }, -] - - BASE_RULE_IDS = set() for r in BASE_APPEND_CONTENT_RULES: @@ -589,26 +407,3 @@ def make_base_prepend_rules( r["priority_class"] = PRIORITY_CLASS_MAP["underride"] r["default"] = True BASE_RULE_IDS.add(r["rule_id"]) - - -NEW_RULE_IDS = set() - -for r in BASE_APPEND_CONTENT_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["content"] - r["default"] = True - NEW_RULE_IDS.add(r["rule_id"]) - -for r in BASE_PREPEND_OVERRIDE_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["override"] - r["default"] = True - NEW_RULE_IDS.add(r["rule_id"]) - -for r in NEW_APPEND_OVERRIDE_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["override"] - r["default"] = True - NEW_RULE_IDS.add(r["rule_id"]) - -for r in NEW_APPEND_UNDERRIDE_RULES: - r["priority_class"] = PRIORITY_CLASS_MAP["underride"] - r["default"] = True - NEW_RULE_IDS.add(r["rule_id"]) diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 6f796d5e5096..8fe75bd750bd 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -29,7 +29,7 @@ parse_string, ) from synapse.http.site import SynapseRequest -from synapse.push.baserules import BASE_RULE_IDS, NEW_RULE_IDS +from synapse.push.baserules import BASE_RULE_IDS from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest.client._base import client_patterns @@ -61,10 +61,6 @@ def __init__(self, hs: "HomeServer"): self.notifier = hs.get_notifier() self._is_worker = hs.config.worker.worker_app is not None - self._users_new_default_push_rules = ( - hs.config.server.users_new_default_push_rules - ) - async def on_PUT(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDict]: if self._is_worker: raise Exception("Cannot handle PUT /push_rules on worker") @@ -217,12 +213,7 @@ async def set_rule_attr( rule_id = spec.rule_id is_default_rule = rule_id.startswith(".") if is_default_rule: - if user_id in self._users_new_default_push_rules: - rule_ids = NEW_RULE_IDS - else: - rule_ids = BASE_RULE_IDS - - if namespaced_rule_id not in rule_ids: + if namespaced_rule_id not in BASE_RULE_IDS: raise SynapseError(404, "Unknown rule %r" % (namespaced_rule_id,)) await self.store.set_push_rule_actions( user_id, namespaced_rule_id, actions, is_default_rule diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index e01c94930aed..92539f5d41b1 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -42,7 +42,7 @@ logger = logging.getLogger(__name__) -def _load_rules(rawrules, enabled_map, use_new_defaults=False): +def _load_rules(rawrules, enabled_map): ruleslist = [] for rawrule in rawrules: rule = dict(rawrule) @@ -52,7 +52,7 @@ def _load_rules(rawrules, enabled_map, use_new_defaults=False): ruleslist.append(rule) # We're going to be mutating this a lot, so do a deep copy - rules = list(list_with_base_rules(ruleslist, use_new_defaults)) + rules = list(list_with_base_rules(ruleslist)) for i, rule in enumerate(rules): rule_id = rule["rule_id"] @@ -112,10 +112,6 @@ def __init__( prefilled_cache=push_rules_prefill, ) - self._users_new_default_push_rules = ( - hs.config.server.users_new_default_push_rules - ) - @abc.abstractmethod def get_max_push_rules_stream_id(self): """Get the position of the push rules stream. @@ -145,9 +141,7 @@ async def get_push_rules_for_user(self, user_id): enabled_map = await self.get_push_rules_enabled_for_user(user_id) - use_new_defaults = user_id in self._users_new_default_push_rules - - return _load_rules(rows, enabled_map, use_new_defaults) + return _load_rules(rows, enabled_map) @cached(max_entries=5000) async def get_push_rules_enabled_for_user(self, user_id) -> Dict[str, bool]: @@ -206,13 +200,7 @@ async def bulk_get_push_rules(self, user_ids): enabled_map_by_user = await self.bulk_get_push_rules_enabled(user_ids) for user_id, rules in results.items(): - use_new_defaults = user_id in self._users_new_default_push_rules - - results[user_id] = _load_rules( - rules, - enabled_map_by_user.get(user_id, {}), - use_new_defaults, - ) + results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {})) return results From f510fba4ba9d0ebf0c062f0a561da19b2ea0c2d6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Feb 2022 15:11:23 +0000 Subject: [PATCH 184/474] Describe `prune_unread_entries` in docstrings (#11876) Should have been caught in #10826. --- changelog.d/11876.misc | 1 + synapse/util/caches/deferred_cache.py | 5 +++-- synapse/util/caches/descriptors.py | 8 ++++++++ synapse/util/caches/lrucache.py | 6 ++++++ 4 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11876.misc diff --git a/changelog.d/11876.misc b/changelog.d/11876.misc new file mode 100644 index 000000000000..09f2d0b67fd5 --- /dev/null +++ b/changelog.d/11876.misc @@ -0,0 +1 @@ +Improve internal docstrings in `synapse.util.caches`. diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 377c9a282a69..1d6ec22191a0 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -81,13 +81,14 @@ def __init__( Args: name: The name of the cache max_entries: Maximum amount of entries that the cache will hold - keylen: The length of the tuple used as the cache key. Ignored unless - `tree` is True. tree: Use a TreeCache instead of a dict as the underlying cache type iterable: If True, count each item in the cached object as an entry, rather than each cached object apply_cache_factor_from_config: Whether cache factors specified in the config file affect `max_entries` + prune_unread_entries: If True, cache entries that haven't been read recently + will be evicted from the cache in the background. Set to False to + opt-out of this behaviour. """ cache_type = TreeCache if tree else dict diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 375cd443f14e..df4fb156c2d0 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -254,9 +254,17 @@ def foo(self, key, cache_context): return r1 + r2 Args: + orig: + max_entries: num_args: number of positional arguments (excluding ``self`` and ``cache_context``) to use as cache keys. Defaults to all named args of the function. + tree: + cache_context: + iterable: + prune_unread_entries: If True, cache entries that haven't been read recently + will be evicted from the cache in the background. Set to False to opt-out + of this behaviour. """ def __init__( diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 3f11a2f9dd5c..7548b38548bd 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -340,6 +340,12 @@ def __init__( apply_cache_factor_from_config (bool): If true, `max_size` will be multiplied by a cache factor derived from the homeserver config + + clock: + + prune_unread_entries: If True, cache entries that haven't been read recently + will be evicted from the cache in the background. Set to False to + opt-out of this behaviour. """ # Default `clock` to something sensible. Note that we rename it to # `real_clock` so that mypy doesn't think its still `Optional`. From 23a698f5e6b69ae6b13afd439a6d4e867d842046 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 2 Feb 2022 15:59:33 +0000 Subject: [PATCH 185/474] Disable coverage calculation for olddeps build. (#11888) We disabled coverage calculation for most of CI in #11017, but the olddeps build uses a separate script and got forgotten. --- .ci/scripts/test_old_deps.sh | 2 +- changelog.d/11888.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11888.misc diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh index a54aa86fbc0f..54ec3c8b0dce 100755 --- a/.ci/scripts/test_old_deps.sh +++ b/.ci/scripts/test_old_deps.sh @@ -15,4 +15,4 @@ export LANG="C.UTF-8" # Prevent virtualenv from auto-updating pip to an incompatible version export VIRTUALENV_NO_DOWNLOAD=1 -exec tox -e py3-old,combine +exec tox -e py3-old diff --git a/changelog.d/11888.misc b/changelog.d/11888.misc new file mode 100644 index 000000000000..db1c9b8bbd0b --- /dev/null +++ b/changelog.d/11888.misc @@ -0,0 +1 @@ +Disable coverage calculation for olddeps build. From dd7f825118f1b8f2789eee9498912ce362a79224 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Feb 2022 16:25:17 +0000 Subject: [PATCH 186/474] Fix losing incoming EDUs if debug logging enabled (#11890) * Fix losing incoming EDUs if debug logging enabled Fixes #11889. Homeservers should only be affected if the `synapse.8631_debug` logger was enabled for DEBUG mode. I am not sure if this merits a bugfix release: I think the logging can be disabled in config if anyone is affected? But it is still pretty bad. --- changelog.d/11890.bugfix | 1 + synapse/federation/transport/server/federation.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11890.bugfix diff --git a/changelog.d/11890.bugfix b/changelog.d/11890.bugfix new file mode 100644 index 000000000000..6b696692e332 --- /dev/null +++ b/changelog.d/11890.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. \ No newline at end of file diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 9c1ad5851f69..d86dfede4e89 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -109,11 +109,11 @@ async def on_PUT( ) if issue_8631_logger.isEnabledFor(logging.DEBUG): - DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"} + DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"] device_list_updates = [ edu.content for edu in transaction_data.get("edus", []) - if edu.edu_type in DEVICE_UPDATE_EDUS + if edu.get("edu_type") in DEVICE_UPDATE_EDUS ] if device_list_updates: issue_8631_logger.debug( From 41818cda1fcb086fc41cab7b60da46954ec1dd4a Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Feb 2022 16:51:00 +0000 Subject: [PATCH 187/474] Fix type errors introduced by new annotations in the Prometheus Client library. (#11832) Co-authored-by: David Robertson --- changelog.d/11832.misc | 1 + synapse/metrics/__init__.py | 10 +++++++++- synapse/python_dependencies.py | 3 +-- 3 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11832.misc diff --git a/changelog.d/11832.misc b/changelog.d/11832.misc new file mode 100644 index 000000000000..5ff117d93326 --- /dev/null +++ b/changelog.d/11832.misc @@ -0,0 +1 @@ +Fix type errors introduced by new annotations in the Prometheus Client library. \ No newline at end of file diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 9e6c1b2f3b54..cca084c18c21 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -30,6 +30,7 @@ Type, TypeVar, Union, + cast, ) import attr @@ -60,7 +61,7 @@ HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat") -class RegistryProxy: +class _RegistryProxy: @staticmethod def collect() -> Iterable[Metric]: for metric in REGISTRY.collect(): @@ -68,6 +69,13 @@ def collect() -> Iterable[Metric]: yield metric +# A little bit nasty, but collect() above is static so a Protocol doesn't work. +# _RegistryProxy matches the signature of a CollectorRegistry instance enough +# for it to be usable in the contexts in which we use it. +# TODO Do something nicer about this. +RegistryProxy = cast(CollectorRegistry, _RegistryProxy) + + @attr.s(slots=True, hash=True, auto_attribs=True) class LaterGauge: diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 80786464c2fb..22b4606ae0ea 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -76,8 +76,7 @@ "msgpack>=0.5.2", "phonenumbers>=8.2.0", # we use GaugeHistogramMetric, which was added in prom-client 0.4.0. - # 0.13.0 has an incorrect type annotation, see #11832. - "prometheus_client>=0.4.0,<0.13.0", + "prometheus_client>=0.4.0", # we use `order`, which arrived in attrs 19.2.0. # Note: 21.1.0 broke `/sync`, see #9936 "attrs>=19.2.0,!=21.1.0", From a8da0469070771b0b63c97a96f3221afea2aa2e7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Feb 2022 12:24:07 -0500 Subject: [PATCH 188/474] Invalidate the get_users_in_room{_with_profile} caches only when necessary. (#11878) The get_users_in_room and get_users_in_room_with_profiles are now only invalidated when the membership of a room changes, instead of during any state change in the room. --- changelog.d/11878.misc | 1 + synapse/storage/_base.py | 11 ++++++++--- synapse/storage/databases/main/cache.py | 16 +++++++++++----- 3 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11878.misc diff --git a/changelog.d/11878.misc b/changelog.d/11878.misc new file mode 100644 index 000000000000..74915a47dd25 --- /dev/null +++ b/changelog.d/11878.misc @@ -0,0 +1 @@ +Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 7967011afdc0..8df80664a27e 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -57,7 +57,7 @@ def process_replication_rows( pass def _invalidate_state_caches( - self, room_id: str, members_changed: Iterable[str] + self, room_id: str, members_changed: Collection[str] ) -> None: """Invalidates caches that are based on the current state, but does not stream invalidations down replication. @@ -66,11 +66,16 @@ def _invalidate_state_caches( room_id: Room where state changed members_changed: The user_ids of members that have changed """ + # If there were any membership changes, purge the appropriate caches. for host in {get_domain_from_id(u) for u in members_changed}: self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) + if members_changed: + self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache( + "get_users_in_room_with_profiles", (room_id,) + ) - self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) - self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) + # Purge other caches based on room state. self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,)) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 0024348067d5..c428dd5596af 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -15,7 +15,7 @@ import itertools import logging -from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Collection, Iterable, List, Optional, Tuple from synapse.api.constants import EventTypes from synapse.replication.tcp.streams import BackfillStream, CachesStream @@ -25,7 +25,11 @@ EventsStreamEventRow, ) from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.engines import PostgresEngine from synapse.util.iterutils import batch_iter @@ -236,7 +240,9 @@ def _invalidate_all_cache_and_stream(self, txn, cache_func): txn.call_after(cache_func.invalidate_all) self._send_invalidation_to_replication(txn, cache_func.__name__, None) - def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed): + def _invalidate_state_caches_and_stream( + self, txn: LoggingTransaction, room_id: str, members_changed: Collection[str] + ) -> None: """Special case invalidation of caches based on current state. We special case this so that we can batch the cache invalidations into a @@ -244,8 +250,8 @@ def _invalidate_state_caches_and_stream(self, txn, room_id, members_changed): Args: txn - room_id (str): Room where state changed - members_changed (iterable[str]): The user_ids of members that have changed + room_id: Room where state changed + members_changed: The user_ids of members that have changed """ txn.call_after(self._invalidate_state_caches, room_id, members_changed) From 31b554c2976612ce5fd983517615906261c39cea Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 2 Feb 2022 22:41:57 +0000 Subject: [PATCH 189/474] Fixes for opentracing scopes (#11869) `start_active_span` was inconsistent as to whether it would activate the span immediately, or wait for `scope.__enter__` to happen (it depended on whether the current logcontext already had an associated scope). The inconsistency was rather confusing if you were hoping to set up a couple of separate spans before activating either. Looking at the other implementations of opentracing `ScopeManager`s, the intention is that it *should* be activated immediately, as the name implies. Indeed, the idea is that you don't have to use the scope as a contextmanager at all - you can just call `.close` on the result. Hence, our cleanup has to happen in `.close` rather than `.__exit__`. So, the main change here is to ensure that `start_active_span` does activate the span, and that `scope.close()` does close the scope. We also add some tests, which requires a `tracer` param so that we don't have to rely on the global variable in unit tests. --- changelog.d/11869.misc | 1 + synapse/logging/opentracing.py | 29 +++- synapse/logging/scopecontextmanager.py | 76 ++++++---- tests/logging/test_opentracing.py | 184 +++++++++++++++++++++++++ 4 files changed, 255 insertions(+), 35 deletions(-) create mode 100644 changelog.d/11869.misc create mode 100644 tests/logging/test_opentracing.py diff --git a/changelog.d/11869.misc b/changelog.d/11869.misc new file mode 100644 index 000000000000..054fbf610140 --- /dev/null +++ b/changelog.d/11869.misc @@ -0,0 +1 @@ +Ensure that `opentracing` scopes are activated and closed at the right time. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index b240d2d21da2..d25f25ecb5a8 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -443,10 +443,14 @@ def start_active_span( start_time=None, ignore_active_span=False, finish_on_close=True, + *, + tracer=None, ): - """Starts an active opentracing span. Note, the scope doesn't become active - until it has been entered, however, the span starts from the time this - message is called. + """Starts an active opentracing span. + + Records the start time for the span, and sets it as the "active span" in the + scope manager. + Args: See opentracing.tracer Returns: @@ -456,7 +460,11 @@ def start_active_span( if opentracing is None: return noop_context_manager() # type: ignore[unreachable] - return opentracing.tracer.start_active_span( + if tracer is None: + # use the global tracer by default + tracer = opentracing.tracer + + return tracer.start_active_span( operation_name, child_of=child_of, references=references, @@ -468,7 +476,11 @@ def start_active_span( def start_active_span_follows_from( - operation_name: str, contexts: Collection, inherit_force_tracing=False + operation_name: str, + contexts: Collection, + *, + inherit_force_tracing=False, + tracer=None, ): """Starts an active opentracing span, with additional references to previous spans @@ -477,12 +489,17 @@ def start_active_span_follows_from( contexts: the previous spans to inherit from inherit_force_tracing: if set, and any of the previous contexts have had tracing forced, the new span will also have tracing forced. + tracer: override the opentracing tracer. By default the global tracer is used. """ if opentracing is None: return noop_context_manager() # type: ignore[unreachable] references = [opentracing.follows_from(context) for context in contexts] - scope = start_active_span(operation_name, references=references) + scope = start_active_span( + operation_name, + references=references, + tracer=tracer, + ) if inherit_force_tracing and any( is_context_forced_tracing(ctx) for ctx in contexts diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index db8ca2c0497b..d57e7c5324f8 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -28,8 +28,9 @@ class LogContextScopeManager(ScopeManager): The LogContextScopeManager tracks the active scope in opentracing by using the log contexts which are native to synapse. This is so that the basic opentracing api can be used across twisted defereds. - (I would love to break logcontexts and this into an OS package. but - let's wait for twisted's contexts to be released.) + + It would be nice just to use opentracing's ContextVarsScopeManager, + but currently that doesn't work due to https://twistedmatrix.com/trac/ticket/10301. """ def __init__(self, config): @@ -65,29 +66,45 @@ def activate(self, span, finish_on_close): Scope.close() on the returned instance. """ - enter_logcontext = False ctx = current_context() if not ctx: - # We don't want this scope to affect. logger.error("Tried to activate scope outside of loggingcontext") return Scope(None, span) # type: ignore[arg-type] - elif ctx.scope is not None: - # We want the logging scope to look exactly the same so we give it - # a blank suffix + + if ctx.scope is not None: + # start a new logging context as a child of the existing one. + # Doing so -- rather than updating the existing logcontext -- means that + # creating several concurrent spans under the same logcontext works + # correctly. ctx = nested_logging_context("") enter_logcontext = True + else: + # if there is no span currently associated with the current logcontext, we + # just store the scope in it. + # + # This feels a bit dubious, but it does hack around a problem where a + # span outlasts its parent logcontext (which would otherwise lead to + # "Re-starting finished log context" errors). + enter_logcontext = False scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) ctx.scope = scope + if enter_logcontext: + ctx.__enter__() + return scope class _LogContextScope(Scope): """ - A custom opentracing scope. The only significant difference is that it will - close the log context it's related to if the logcontext was created specifically - for this scope. + A custom opentracing scope, associated with a LogContext + + * filters out _DefGen_Return exceptions which arise from calling + `defer.returnValue` in Twisted code + + * When the scope is closed, the logcontext's active scope is reset to None. + and - if enter_logcontext was set - the logcontext is finished too. """ def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close): @@ -101,8 +118,7 @@ def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close) logcontext (LogContext): the logcontext to which this scope is attached. enter_logcontext (Boolean): - if True the logcontext will be entered and exited when the scope - is entered and exited respectively + if True the logcontext will be exited when the scope is finished finish_on_close (Boolean): if True finish the span when the scope is closed """ @@ -111,26 +127,28 @@ def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close) self._finish_on_close = finish_on_close self._enter_logcontext = enter_logcontext - def __enter__(self): - if self._enter_logcontext: - self.logcontext.__enter__() + def __exit__(self, exc_type, value, traceback): + if exc_type == twisted.internet.defer._DefGen_Return: + # filter out defer.returnValue() calls + exc_type = value = traceback = None + super().__exit__(exc_type, value, traceback) - return self - - def __exit__(self, type, value, traceback): - if type == twisted.internet.defer._DefGen_Return: - super().__exit__(None, None, None) - else: - super().__exit__(type, value, traceback) - if self._enter_logcontext: - self.logcontext.__exit__(type, value, traceback) - else: # the logcontext existed before the creation of the scope - self.logcontext.scope = None + def __str__(self): + return f"Scope<{self.span}>" def close(self): - if self.manager.active is not self: - logger.error("Tried to close a non-active scope!") - return + active_scope = self.manager.active + if active_scope is not self: + logger.error( + "Closing scope %s which is not the currently-active one %s", + self, + active_scope, + ) if self._finish_on_close: self.span.finish() + + self.logcontext.scope = None + + if self._enter_logcontext: + self.logcontext.__exit__(None, None, None) diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py new file mode 100644 index 000000000000..e430941d27ce --- /dev/null +++ b/tests/logging/test_opentracing.py @@ -0,0 +1,184 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactorClock + +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, + run_in_background, +) +from synapse.logging.opentracing import ( + start_active_span, + start_active_span_follows_from, +) +from synapse.util import Clock + +try: + from synapse.logging.scopecontextmanager import LogContextScopeManager +except ImportError: + LogContextScopeManager = None # type: ignore + +try: + import jaeger_client +except ImportError: + jaeger_client = None # type: ignore + +from tests.unittest import TestCase + + +class LogContextScopeManagerTestCase(TestCase): + if LogContextScopeManager is None: + skip = "Requires opentracing" # type: ignore[unreachable] + if jaeger_client is None: + skip = "Requires jaeger_client" # type: ignore[unreachable] + + def setUp(self) -> None: + # since this is a unit test, we don't really want to mess around with the + # global variables that power opentracing. We create our own tracer instance + # and test with it. + + scope_manager = LogContextScopeManager({}) + config = jaeger_client.config.Config( + config={}, service_name="test", scope_manager=scope_manager + ) + + self._reporter = jaeger_client.reporter.InMemoryReporter() + + self._tracer = config.create_tracer( + sampler=jaeger_client.ConstSampler(True), + reporter=self._reporter, + ) + + def test_start_active_span(self) -> None: + # the scope manager assumes a logging context of some sort. + with LoggingContext("root context"): + self.assertIsNone(self._tracer.active_span) + + # start_active_span should start and activate a span. + scope = start_active_span("span", tracer=self._tracer) + span = scope.span + self.assertEqual(self._tracer.active_span, span) + self.assertIsNotNone(span.start_time) + + # entering the context doesn't actually do a whole lot. + with scope as ctx: + self.assertIs(ctx, scope) + self.assertEqual(self._tracer.active_span, span) + + # ... but leaving it unsets the active span, and finishes the span. + self.assertIsNone(self._tracer.active_span) + self.assertIsNotNone(span.end_time) + + # the span should have been reported + self.assertEqual(self._reporter.get_spans(), [span]) + + def test_nested_spans(self) -> None: + """Starting two spans off inside each other should work""" + + with LoggingContext("root context"): + with start_active_span("root span", tracer=self._tracer) as root_scope: + self.assertEqual(self._tracer.active_span, root_scope.span) + + scope1 = start_active_span( + "child1", + tracer=self._tracer, + ) + self.assertEqual( + self._tracer.active_span, scope1.span, "child1 was not activated" + ) + self.assertEqual( + scope1.span.context.parent_id, root_scope.span.context.span_id + ) + + scope2 = start_active_span_follows_from( + "child2", + contexts=(scope1,), + tracer=self._tracer, + ) + self.assertEqual(self._tracer.active_span, scope2.span) + self.assertEqual( + scope2.span.context.parent_id, scope1.span.context.span_id + ) + + with scope1, scope2: + pass + + # the root scope should be restored + self.assertEqual(self._tracer.active_span, root_scope.span) + self.assertIsNotNone(scope2.span.end_time) + self.assertIsNotNone(scope1.span.end_time) + + self.assertIsNone(self._tracer.active_span) + + # the spans should be reported in order of their finishing. + self.assertEqual( + self._reporter.get_spans(), [scope2.span, scope1.span, root_scope.span] + ) + + def test_overlapping_spans(self) -> None: + """Overlapping spans which are not neatly nested should work""" + reactor = MemoryReactorClock() + clock = Clock(reactor) + + scopes = [] + + async def task(i: int): + scope = start_active_span( + f"task{i}", + tracer=self._tracer, + ) + scopes.append(scope) + + self.assertEqual(self._tracer.active_span, scope.span) + await clock.sleep(4) + self.assertEqual(self._tracer.active_span, scope.span) + scope.close() + + async def root(): + with start_active_span("root span", tracer=self._tracer) as root_scope: + self.assertEqual(self._tracer.active_span, root_scope.span) + scopes.append(root_scope) + + d1 = run_in_background(task, 1) + await clock.sleep(2) + d2 = run_in_background(task, 2) + + # because we did run_in_background, the active span should still be the + # root. + self.assertEqual(self._tracer.active_span, root_scope.span) + + await make_deferred_yieldable( + defer.gatherResults([d1, d2], consumeErrors=True) + ) + + self.assertEqual(self._tracer.active_span, root_scope.span) + + with LoggingContext("root context"): + # start the test off + d1 = defer.ensureDeferred(root()) + + # let the tasks complete + reactor.pump((2,) * 8) + + self.successResultOf(d1) + self.assertIsNone(self._tracer.active_span) + + # the spans should be reported in order of their finishing: task 1, task 2, + # root. + self.assertEqual( + self._reporter.get_spans(), + [scopes[1].span, scopes[2].span, scopes[0].span], + ) From 964f5b9324f7d18c70a45e6d4049eba879778043 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 3 Feb 2022 12:29:16 +0000 Subject: [PATCH 190/474] Improve opentracing for federation requests (#11870) The idea here is to set the parent span for incoming federation requests to the *outgoing* span on the other end. That means that you can see (most of) the full end-to-end flow when you have a process that includes federation requests. However, in order not to lose information, we still want a link to the `incoming-federation-request` span from the servlet, so we have to create another span to do exactly that. --- changelog.d/11870.misc | 1 + synapse/federation/transport/server/_base.py | 67 ++++++++++++++------ synapse/logging/opentracing.py | 12 ++++ 3 files changed, 61 insertions(+), 19 deletions(-) create mode 100644 changelog.d/11870.misc diff --git a/changelog.d/11870.misc b/changelog.d/11870.misc new file mode 100644 index 000000000000..2cb0efdb456c --- /dev/null +++ b/changelog.d/11870.misc @@ -0,0 +1 @@ +Improve opentracing for incoming federation requests. diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 2ca7c0583501..dff2b68359b2 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -15,6 +15,7 @@ import functools import logging import re +import time from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, cast from synapse.api.errors import Codes, FederationDeniedError, SynapseError @@ -24,8 +25,10 @@ from synapse.http.site import SynapseRequest from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( + active_span, set_tag, span_context_from_request, + start_active_span, start_active_span_follows_from, whitelisted_homeserver, ) @@ -265,9 +268,10 @@ async def new_func( content = parse_json_object_from_request(request) try: - origin: Optional[str] = await authenticator.authenticate_request( - request, content - ) + with start_active_span("authenticate_request"): + origin: Optional[str] = await authenticator.authenticate_request( + request, content + ) except NoAuthenticationError: origin = None if self.REQUIRE_AUTH: @@ -282,32 +286,57 @@ async def new_func( # update the active opentracing span with the authenticated entity set_tag("authenticated_entity", origin) - # if the origin is authenticated and whitelisted, link to its span context + # if the origin is authenticated and whitelisted, use its span context + # as the parent. context = None if origin and whitelisted_homeserver(origin): context = span_context_from_request(request) - scope = start_active_span_follows_from( - "incoming-federation-request", contexts=(context,) if context else () - ) + if context: + servlet_span = active_span() + # a scope which uses the origin's context as a parent + processing_start_time = time.time() + scope = start_active_span_follows_from( + "incoming-federation-request", + child_of=context, + contexts=(servlet_span,), + start_time=processing_start_time, + ) - with scope: - if origin and self.RATELIMIT: - with ratelimiter.ratelimit(origin) as d: - await d - if request._disconnected: - logger.warning( - "client disconnected before we started processing " - "request" + else: + # just use our context as a parent + scope = start_active_span( + "incoming-federation-request", + ) + + try: + with scope: + if origin and self.RATELIMIT: + with ratelimiter.ratelimit(origin) as d: + await d + if request._disconnected: + logger.warning( + "client disconnected before we started processing " + "request" + ) + return None + response = await func( + origin, content, request.args, *args, **kwargs ) - return None + else: response = await func( origin, content, request.args, *args, **kwargs ) - else: - response = await func( - origin, content, request.args, *args, **kwargs + finally: + # if we used the origin's context as the parent, add a new span using + # the servlet span as a parent, so that we have a link + if context: + scope2 = start_active_span_follows_from( + "process-federation_request", + contexts=(scope.span,), + start_time=processing_start_time, ) + scope2.close() return response diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index d25f25ecb5a8..3ebed5c16130 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -478,6 +478,8 @@ def start_active_span( def start_active_span_follows_from( operation_name: str, contexts: Collection, + child_of=None, + start_time: Optional[float] = None, *, inherit_force_tracing=False, tracer=None, @@ -487,6 +489,14 @@ def start_active_span_follows_from( Args: operation_name: name of the operation represented by the new span contexts: the previous spans to inherit from + + child_of: optionally override the parent span. If unset, the currently active + span will be the parent. (If there is no currently active span, the first + span in `contexts` will be the parent.) + + start_time: optional override for the start time of the created span. Seconds + since the epoch. + inherit_force_tracing: if set, and any of the previous contexts have had tracing forced, the new span will also have tracing forced. tracer: override the opentracing tracer. By default the global tracer is used. @@ -497,7 +507,9 @@ def start_active_span_follows_from( references = [opentracing.follows_from(context) for context in contexts] scope = start_active_span( operation_name, + child_of=child_of, references=references, + start_time=start_time, tracer=tracer, ) From 833247553f40d7725f252b081016f8b40a513047 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 3 Feb 2022 13:09:22 +0000 Subject: [PATCH 191/474] Allow specifying the application service-specific `user_id` parameter in the `join` test helper. (#11616) --- changelog.d/11615.misc | 2 +- changelog.d/11616.misc | 1 + tests/rest/client/utils.py | 31 ++++++++++++++++++++++++++----- 3 files changed, 28 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11616.misc diff --git a/changelog.d/11615.misc b/changelog.d/11615.misc index 0aa953650493..bbc551698d45 100644 --- a/changelog.d/11615.misc +++ b/changelog.d/11615.misc @@ -1 +1 @@ -Expose the registered device ID from the `register_appservice_user` test helper. \ No newline at end of file +Enhance user registration test helpers to make them more useful for tests involving Application Services and devices. diff --git a/changelog.d/11616.misc b/changelog.d/11616.misc new file mode 100644 index 000000000000..bbc551698d45 --- /dev/null +++ b/changelog.d/11616.misc @@ -0,0 +1 @@ +Enhance user registration test helpers to make them more useful for tests involving Application Services and devices. diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 842438358021..1c0cb0cf4f34 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -31,6 +31,7 @@ overload, ) from unittest.mock import patch +from urllib.parse import urlencode import attr from typing_extensions import Literal @@ -147,12 +148,20 @@ def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None): expect_code=expect_code, ) - def join(self, room=None, user=None, expect_code=200, tok=None): + def join( + self, + room: str, + user: Optional[str] = None, + expect_code: int = 200, + tok: Optional[str] = None, + appservice_user_id: Optional[str] = None, + ) -> None: self.change_membership( room=room, src=user, targ=user, tok=tok, + appservice_user_id=appservice_user_id, membership=Membership.JOIN, expect_code=expect_code, ) @@ -209,11 +218,12 @@ def ban(self, room: str, src: str, targ: str, **kwargs: object): def change_membership( self, room: str, - src: str, - targ: str, + src: Optional[str], + targ: Optional[str], membership: str, extra_data: Optional[dict] = None, tok: Optional[str] = None, + appservice_user_id: Optional[str] = None, expect_code: int = 200, expect_errcode: Optional[str] = None, ) -> None: @@ -227,15 +237,26 @@ def change_membership( membership: The type of membership event extra_data: Extra information to include in the content of the event tok: The user access token to use + appservice_user_id: The `user_id` URL parameter to pass. + This allows driving an application service user + using an application service access token in `tok`. expect_code: The expected HTTP response code expect_errcode: The expected Matrix error code """ temp_id = self.auth_user_id self.auth_user_id = src - path = "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (room, targ) + path = f"/_matrix/client/r0/rooms/{room}/state/m.room.member/{targ}" + url_params: Dict[str, str] = {} + if tok: - path = path + "?access_token=%s" % tok + url_params["access_token"] = tok + + if appservice_user_id: + url_params["user_id"] = appservice_user_id + + if url_params: + path += "?" + urlencode(url_params) data = {"membership": membership} data.update(extra_data or {}) From d80d39b0359d3891d0078e9c8b53e189986d417f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 3 Feb 2022 14:28:15 +0100 Subject: [PATCH 192/474] Add a ratelimiter for 3pid invite (#11892) --- changelog.d/11892.feature | 1 + docs/sample_config.yaml | 7 +++++++ synapse/config/ratelimiting.py | 15 +++++++++++++++ synapse/handlers/room_member.py | 9 ++++++++- 4 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11892.feature diff --git a/changelog.d/11892.feature b/changelog.d/11892.feature new file mode 100644 index 000000000000..86e21a7f8454 --- /dev/null +++ b/changelog.d/11892.feature @@ -0,0 +1 @@ +Use a dedicated configurable rate limiter for 3PID invites. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 689b207fc0c0..946cd281d265 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -857,6 +857,9 @@ log_config: "CONFDIR/SERVERNAME.log.config" # - one for ratelimiting how often a user or IP can attempt to validate a 3PID. # - two for ratelimiting how often invites can be sent in a room or to a # specific user. +# - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID +# such as an email address or a phone number) based on the account that's +# sending the invite. # # The defaults are as shown below. # @@ -906,6 +909,10 @@ log_config: "CONFDIR/SERVERNAME.log.config" # per_user: # per_second: 0.003 # burst_count: 5 +# +#rc_third_party_invite: +# per_second: 0.2 +# burst_count: 10 # Ratelimiting settings for incoming federation # diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 36636ab07e40..e9ccf1bd624e 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -134,6 +134,14 @@ def read_config(self, config, **kwargs): defaults={"per_second": 0.003, "burst_count": 5}, ) + self.rc_third_party_invite = RateLimitConfig( + config.get("rc_third_party_invite", {}), + defaults={ + "per_second": self.rc_message.per_second, + "burst_count": self.rc_message.burst_count, + }, + ) + def generate_config_section(self, **kwargs): return """\ ## Ratelimiting ## @@ -168,6 +176,9 @@ def generate_config_section(self, **kwargs): # - one for ratelimiting how often a user or IP can attempt to validate a 3PID. # - two for ratelimiting how often invites can be sent in a room or to a # specific user. + # - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID + # such as an email address or a phone number) based on the account that's + # sending the invite. # # The defaults are as shown below. # @@ -217,6 +228,10 @@ def generate_config_section(self, **kwargs): # per_user: # per_second: 0.003 # burst_count: 5 + # + #rc_third_party_invite: + # per_second: 0.2 + # burst_count: 10 # Ratelimiting settings for incoming federation # diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 3dd5e1b6e4dc..efe6b4c9aaf3 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -116,6 +116,13 @@ def __init__(self, hs: "HomeServer"): burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count, ) + self._third_party_invite_limiter = Ratelimiter( + store=self.store, + clock=self.clock, + rate_hz=hs.config.ratelimiting.rc_third_party_invite.per_second, + burst_count=hs.config.ratelimiting.rc_third_party_invite.burst_count, + ) + self.request_ratelimiter = hs.get_request_ratelimiter() @abc.abstractmethod @@ -1295,7 +1302,7 @@ async def do_3pid_invite( # We need to rate limit *before* we send out any 3PID invites, so we # can't just rely on the standard ratelimiting of events. - await self.request_ratelimiter.ratelimit(requester) + await self._third_party_invite_limiter.ratelimit(requester) can_invite = await self.third_party_event_rules.check_threepid_can_be_invited( medium, address, room_id From 6b1c265c21c0e923cc21e87c5257176160d9fb45 Mon Sep 17 00:00:00 2001 From: Christian Paul Date: Thu, 3 Feb 2022 18:20:44 +0100 Subject: [PATCH 193/474] Fix typo: unpind -> unbind (#11859) Co-authored-by: reivilibre --- changelog.d/11859.doc | 1 + docs/admin_api/user_admin_api.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11859.doc diff --git a/changelog.d/11859.doc b/changelog.d/11859.doc new file mode 100644 index 000000000000..d903c8ddafa9 --- /dev/null +++ b/changelog.d/11859.doc @@ -0,0 +1 @@ +Fix typo in User Admin API: unpind -> unbind. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 4f5f377b3808..995782c6bc1f 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -331,7 +331,7 @@ An empty body may be passed for backwards compatibility. The following actions are performed when deactivating an user: -- Try to unpind 3PIDs from the identity server +- Try to unbind 3PIDs from the identity server - Remove all 3PIDs from the homeserver - Delete all devices and E2EE keys - Delete all access tokens From 119edf51eb3e4f5ed5139dc370f5d7aed46edc1c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 3 Feb 2022 13:36:49 -0500 Subject: [PATCH 194/474] Remove support for the webclient listener. (#11895) Also remove support for non-HTTP(S) web_client_location. --- changelog.d/11895.removal | 1 + docs/upgrade.md | 13 +++++ synapse/api/urls.py | 1 - synapse/app/homeserver.py | 34 +---------- synapse/config/server.py | 48 ++++------------ tests/http/test_webclient.py | 108 ----------------------------------- 6 files changed, 29 insertions(+), 176 deletions(-) create mode 100644 changelog.d/11895.removal delete mode 100644 tests/http/test_webclient.py diff --git a/changelog.d/11895.removal b/changelog.d/11895.removal new file mode 100644 index 000000000000..5973d96a3340 --- /dev/null +++ b/changelog.d/11895.removal @@ -0,0 +1 @@ +Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. diff --git a/docs/upgrade.md b/docs/upgrade.md index f455d257babf..7d582af0a794 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,19 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.53.0 + +## Dropping support for `webclient` listeners and non-HTTP(S) `web_client_location` + +Per the deprecation notice in Synapse v1.51.0, listeners of type `webclient` +are no longer supported and configuring them is a now a configuration error. + +Configuring a non-HTTP(S) `web_client_location` configuration is is now a +configuration error. Since the `webclient` listener is no longer supported, this +setting only applies to the root path `/` of Synapse's web server and no longer +the `/_matrix/client/` path. + + # Upgrading to v1.51.0 ## Deprecation of `webclient` listeners and non-HTTP(S) `web_client_location` diff --git a/synapse/api/urls.py b/synapse/api/urls.py index f9f9467dc1d7..bd49fa6a5f03 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -28,7 +28,6 @@ FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2" FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable" STATIC_PREFIX = "/_matrix/static" -WEB_CLIENT_PREFIX = "/_matrix/client" SERVER_KEY_V2_PREFIX = "/_matrix/key/v2" MEDIA_R0_PREFIX = "/_matrix/media/r0" MEDIA_V3_PREFIX = "/_matrix/media/v3" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index efedcc88894b..24d55b049488 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -21,7 +21,6 @@ from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource from twisted.web.server import GzipEncoderFactory -from twisted.web.static import File import synapse import synapse.config.logger @@ -33,7 +32,6 @@ MEDIA_V3_PREFIX, SERVER_KEY_V2_PREFIX, STATIC_PREFIX, - WEB_CLIENT_PREFIX, ) from synapse.app import _base from synapse.app._base import ( @@ -53,7 +51,6 @@ from synapse.http.server import ( OptionsResource, RootOptionsRedirectResource, - RootRedirect, StaticResource, ) from synapse.http.site import SynapseSite @@ -134,15 +131,12 @@ def _listener_http( # Try to find something useful to serve at '/': # # 1. Redirect to the web client if it is an HTTP(S) URL. - # 2. Redirect to the web client served via Synapse. - # 3. Redirect to the static "Synapse is running" page. - # 4. Do not redirect and use a blank resource. - if self.config.server.web_client_location_is_redirect: + # 2. Redirect to the static "Synapse is running" page. + # 3. Do not redirect and use a blank resource. + if self.config.server.web_client_location: root_resource: Resource = RootOptionsRedirectResource( self.config.server.web_client_location ) - elif WEB_CLIENT_PREFIX in resources: - root_resource = RootOptionsRedirectResource(WEB_CLIENT_PREFIX) elif STATIC_PREFIX in resources: root_resource = RootOptionsRedirectResource(STATIC_PREFIX) else: @@ -270,28 +264,6 @@ def _configure_named_resource( if name in ["keys", "federation"]: resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) - if name == "webclient": - # webclient listeners are deprecated as of Synapse v1.51.0, remove it - # in > v1.53.0. - webclient_loc = self.config.server.web_client_location - - if webclient_loc is None: - logger.warning( - "Not enabling webclient resource, as web_client_location is unset." - ) - elif self.config.server.web_client_location_is_redirect: - resources[WEB_CLIENT_PREFIX] = RootRedirect(webclient_loc) - else: - logger.warning( - "Running webclient on the same domain is not recommended: " - "https://github.com/matrix-org/synapse#security-note - " - "after you move webclient to different host you can set " - "web_client_location to its full URL to enable redirection." - ) - # GZip is disabled here due to - # https://twistedmatrix.com/trac/ticket/7678 - resources[WEB_CLIENT_PREFIX] = File(webclient_loc) - if name == "metrics" and self.config.metrics.enable_metrics: resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) diff --git a/synapse/config/server.py b/synapse/config/server.py index a0a00a9798da..7bc962454684 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -179,7 +179,6 @@ def generate_ip_set( "openid", "replication", "static", - "webclient", } @@ -519,16 +518,12 @@ def read_config(self, config, **kwargs): self.listeners = l2 self.web_client_location = config.get("web_client_location", None) - self.web_client_location_is_redirect = self.web_client_location and ( + # Non-HTTP(S) web client location is not supported. + if self.web_client_location and not ( self.web_client_location.startswith("http://") or self.web_client_location.startswith("https://") - ) - # A non-HTTP(S) web client location is deprecated. - if self.web_client_location and not self.web_client_location_is_redirect: - logger.warning(NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING) - - # Warn if webclient is configured for a worker. - _warn_if_webclient_configured(self.listeners) + ): + raise ConfigError("web_client_location must point to a HTTP(S) URL.") self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None)) @@ -1351,11 +1346,16 @@ def parse_listener_def(listener: Any) -> ListenerConfig: http_config = None if listener_type == "http": + try: + resources = [ + HttpResourceConfig(**res) for res in listener.get("resources", []) + ] + except ValueError as e: + raise ConfigError("Unknown listener resource") from e + http_config = HttpListenerConfig( x_forwarded=listener.get("x_forwarded", False), - resources=[ - HttpResourceConfig(**res) for res in listener.get("resources", []) - ], + resources=resources, additional_resources=listener.get("additional_resources", {}), tag=listener.get("tag"), ) @@ -1363,30 +1363,6 @@ def parse_listener_def(listener: Any) -> ListenerConfig: return ListenerConfig(port, bind_addresses, listener_type, tls, http_config) -NO_MORE_NONE_HTTP_WEB_CLIENT_LOCATION_WARNING = """ -Synapse no longer supports serving a web client. To remove this warning, -configure 'web_client_location' with an HTTP(S) URL. -""" - - -NO_MORE_WEB_CLIENT_WARNING = """ -Synapse no longer includes a web client. To redirect the root resource to a web client, configure -'web_client_location'. To remove this warning, remove 'webclient' from the 'listeners' -configuration. -""" - - -def _warn_if_webclient_configured(listeners: Iterable[ListenerConfig]) -> None: - for listener in listeners: - if not listener.http_options: - continue - for res in listener.http_options.resources: - for name in res.names: - if name == "webclient": - logger.warning(NO_MORE_WEB_CLIENT_WARNING) - return - - _MANHOLE_SETTINGS_SCHEMA = { "type": "object", "properties": { diff --git a/tests/http/test_webclient.py b/tests/http/test_webclient.py deleted file mode 100644 index ee5cf299f64c..000000000000 --- a/tests/http/test_webclient.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2022 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from http import HTTPStatus -from typing import Dict - -from twisted.web.resource import Resource - -from synapse.app.homeserver import SynapseHomeServer -from synapse.config.server import HttpListenerConfig, HttpResourceConfig, ListenerConfig -from synapse.http.site import SynapseSite - -from tests.server import make_request -from tests.unittest import HomeserverTestCase, create_resource_tree, override_config - - -class WebClientTests(HomeserverTestCase): - @override_config( - { - "web_client_location": "https://example.org", - } - ) - def test_webclient_resolves_with_client_resource(self): - """ - Tests that both client and webclient resources can be accessed simultaneously. - - This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763. - """ - for resource_name_order_list in [ - ["webclient", "client"], - ["client", "webclient"], - ]: - # Create a dictionary from path regex -> resource - resource_dict: Dict[str, Resource] = {} - - for resource_name in resource_name_order_list: - resource_dict.update( - SynapseHomeServer._configure_named_resource(self.hs, resource_name) - ) - - # Create a root resource which ties the above resources together into one - root_resource = Resource() - create_resource_tree(resource_dict, root_resource) - - # Create a site configured with this resource to make HTTP requests against - listener_config = ListenerConfig( - port=8008, - bind_addresses=["127.0.0.1"], - type="http", - http_options=HttpListenerConfig( - resources=[HttpResourceConfig(names=resource_name_order_list)] - ), - ) - test_site = SynapseSite( - logger_name="synapse.access.http.fake", - site_tag=self.hs.config.server.server_name, - config=listener_config, - resource=root_resource, - server_version_string="1", - max_request_body_size=1234, - reactor=self.reactor, - ) - - # Attempt to make requests to endpoints on both the webclient and client resources - # on test_site. - self._request_client_and_webclient_resources(test_site) - - def _request_client_and_webclient_resources(self, test_site: SynapseSite) -> None: - """Make a request to an endpoint on both the webclient and client-server resources - of the given SynapseSite. - - Args: - test_site: The SynapseSite object to make requests against. - """ - - # Ensure that the *webclient* resource is behaving as expected (we get redirected to - # the configured web_client_location) - channel = make_request( - self.reactor, - site=test_site, - method="GET", - path="/_matrix/client", - ) - # Check that we are being redirected to the webclient location URI. - self.assertEqual(channel.code, HTTPStatus.FOUND) - self.assertEqual( - channel.headers.getRawHeaders("Location"), ["https://example.org"] - ) - - # Ensure that a request to the *client* resource works. - channel = make_request( - self.reactor, - site=test_site, - method="GET", - path="/_matrix/client/v3/login", - ) - self.assertEqual(channel.code, HTTPStatus.OK) - self.assertIn("flows", channel.json_body) From b3d155a749a7881806d985fe85d8ed20b16dbaf0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 4 Feb 2022 11:27:00 +0000 Subject: [PATCH 195/474] Delete MSC1711_certificates_FAQ.md (#11907) This document isn't really relevant any more, and its existence is more confusing than helpful. --- changelog.d/11907.doc | 1 + docs/MSC1711_certificates_FAQ.md | 314 ------------------------------- docs/SUMMARY.md | 3 +- docs/upgrade.md | 6 +- 4 files changed, 3 insertions(+), 321 deletions(-) create mode 100644 changelog.d/11907.doc delete mode 100644 docs/MSC1711_certificates_FAQ.md diff --git a/changelog.d/11907.doc b/changelog.d/11907.doc new file mode 100644 index 000000000000..345cb900548d --- /dev/null +++ b/changelog.d/11907.doc @@ -0,0 +1 @@ +Remove outdated MSC1711 FAQ document. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md deleted file mode 100644 index 32ba15652d1a..000000000000 --- a/docs/MSC1711_certificates_FAQ.md +++ /dev/null @@ -1,314 +0,0 @@ -# MSC1711 Certificates FAQ - -## Historical Note -This document was originally written to guide server admins through the upgrade -path towards Synapse 1.0. Specifically, -[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md) -required that all servers present valid TLS certificates on their federation -API. Admins were encouraged to achieve compliance from version 0.99.0 (released -in February 2019) ahead of version 1.0 (released June 2019) enforcing the -certificate checks. - -Much of what follows is now outdated since most admins will have already -upgraded, however it may be of use to those with old installs returning to the -project. - -If you are setting up a server from scratch you almost certainly should look at -the [installation guide](setup/installation.md) instead. - -## Introduction -The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It -supports the r0.1 release of the server to server specification, but is -compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well -as post-r0.1 behaviour, in order to allow for a smooth upgrade across the -federation. - -The most important thing to know is that Synapse 1.0.0 will require a valid TLS -certificate on federation endpoints. Self signed certificates will not be -sufficient. - -Synapse 0.99.0 makes it easy to configure TLS certificates and will -interoperate with both >= 1.0.0 servers as well as existing servers yet to -upgrade. - -**It is critical that all admins upgrade to 0.99.0 and configure a valid TLS -certificate.** Admins will have 1 month to do so, after which 1.0.0 will be -released and those servers without a valid certificate will not longer be able -to federate with >= 1.0.0 servers. - -Full details on how to carry out this configuration change is given -[below](#configuring-certificates-for-compatibility-with-synapse-100). A -timeline and some frequently asked questions are also given below. - -For more details and context on the release of the r0.1 Server/Server API and -imminent Matrix 1.0 release, you can also see our -[main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/). - -## Timeline - -**5th Feb 2019 - Synapse 0.99.0 is released.** - -All server admins are encouraged to upgrade. - -0.99.0: - -- provides support for ACME to make setting up Let's Encrypt certs easy, as - well as .well-known support. - -- does not enforce that a valid CA cert is present on the federation API, but - rather makes it easy to set one up. - -- provides support for .well-known - -Admins should upgrade and configure a valid CA cert. Homeservers that require a -.well-known entry (see below), should retain their SRV record and use it -alongside their .well-known record. - -**10th June 2019 - Synapse 1.0.0 is released** - -1.0.0 is scheduled for release on 10th June. In -accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html) -1.0.0 will enforce certificate validity. This means that any homeserver without a -valid certificate after this point will no longer be able to federate with -1.0.0 servers. - -## Configuring certificates for compatibility with Synapse 1.0.0 - -### If you do not currently have an SRV record - -In this case, your `server_name` points to the host where your Synapse is -running. There is no need to create a `.well-known` URI or an SRV record, but -you will need to give Synapse a valid, signed, certificate. - -### If you do have an SRV record currently - -If you are using an SRV record, your matrix domain (`server_name`) may not -point to the same host that your Synapse is running on (the 'target -domain'). (If it does, you can follow the recommendation above; otherwise, read -on.) - -Let's assume that your `server_name` is `example.com`, and your Synapse is -hosted at a target domain of `customer.example.net`. Currently you should have -an SRV record which looks like: - -``` -_matrix._tcp.example.com. IN SRV 10 5 8000 customer.example.net. -``` - -In this situation, you have three choices for how to proceed: - -#### Option 1: give Synapse a certificate for your matrix domain - -Synapse 1.0 will expect your server to present a TLS certificate for your -`server_name` (`example.com` in the above example). You can achieve this by acquiring a -certificate for the `server_name` yourself (for example, using `certbot`), and giving it -and the key to Synapse via `tls_certificate_path` and `tls_private_key_path`. - -#### Option 2: run Synapse behind a reverse proxy - -If you have an existing reverse proxy set up with correct TLS certificates for -your domain, you can simply route all traffic through the reverse proxy by -updating the SRV record appropriately (or removing it, if the proxy listens on -8448). - -See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a -reverse proxy. - -#### Option 3: add a .well-known file to delegate your matrix traffic - -This will allow you to keep Synapse on a separate domain, without having to -give it a certificate for the matrix domain. - -You can do this with a `.well-known` file as follows: - - 1. Keep the SRV record in place - it is needed for backwards compatibility - with Synapse 0.34 and earlier. - - 2. Give Synapse a certificate corresponding to the target domain - (`customer.example.net` in the above example). You can do this by acquire a - certificate for the target domain and giving it to Synapse via `tls_certificate_path` - and `tls_private_key_path`. - - 3. Restart Synapse to ensure the new certificate is loaded. - - 4. Arrange for a `.well-known` file at - `https:///.well-known/matrix/server` with contents: - - ```json - {"m.server": ""} - ``` - - where the target server name is resolved as usual (i.e. SRV lookup, falling - back to talking to port 8448). - - In the above example, where synapse is listening on port 8000, - `https://example.com/.well-known/matrix/server` should have `m.server` set to one of: - - 1. `customer.example.net` ─ with a SRV record on - `_matrix._tcp.customer.example.com` pointing to port 8000, or: - - 2. `customer.example.net` ─ updating synapse to listen on the default port - 8448, or: - - 3. `customer.example.net:8000` ─ ensuring that if there is a reverse proxy - on `customer.example.net:8000` it correctly handles HTTP requests with - Host header set to `customer.example.net:8000`. - -## FAQ - -### Synapse 0.99.0 has just been released, what do I need to do right now? - -Upgrade as soon as you can in preparation for Synapse 1.0.0, and update your -TLS certificates as [above](#configuring-certificates-for-compatibility-with-synapse-100). - -### What will happen if I do not set up a valid federation certificate immediately? - -Nothing initially, but once 1.0.0 is in the wild it will not be possible to -federate with 1.0.0 servers. - -### What will happen if I do nothing at all? - -If the admin takes no action at all, and remains on a Synapse < 0.99.0 then the -homeserver will be unable to federate with those who have implemented -.well-known. Then, as above, once the month upgrade window has expired the -homeserver will not be able to federate with any Synapse >= 1.0.0 - -### When do I need a SRV record or .well-known URI? - -If your homeserver listens on the default federation port (8448), and your -`server_name` points to the host that your homeserver runs on, you do not need an -SRV record or `.well-known/matrix/server` URI. - -For instance, if you registered `example.com` and pointed its DNS A record at a -fresh Upcloud VPS or similar, you could install Synapse 0.99 on that host, -giving it a server_name of `example.com`, and it would automatically generate a -valid TLS certificate for you via Let's Encrypt and no SRV record or -`.well-known` URI would be needed. - -This is the common case, although you can add an SRV record or -`.well-known/matrix/server` URI for completeness if you wish. - -**However**, if your server does not listen on port 8448, or if your `server_name` -does not point to the host that your homeserver runs on, you will need to let -other servers know how to find it. - -In this case, you should see ["If you do have an SRV record -currently"](#if-you-do-have-an-srv-record-currently) above. - -### Can I still use an SRV record? - -Firstly, if you didn't need an SRV record before (because your server is -listening on port 8448 of your server_name), you certainly don't need one now: -the defaults are still the same. - -If you previously had an SRV record, you can keep using it provided you are -able to give Synapse a TLS certificate corresponding to your server name. For -example, suppose you had the following SRV record, which directs matrix traffic -for example.com to matrix.example.com:443: - -``` -_matrix._tcp.example.com. IN SRV 10 5 443 matrix.example.com -``` - -In this case, Synapse must be given a certificate for example.com - or be -configured to acquire one from Let's Encrypt. - -If you are unable to give Synapse a certificate for your server_name, you will -also need to use a .well-known URI instead. However, see also "I have created a -.well-known URI. Do I still need an SRV record?". - -### I have created a .well-known URI. Do I still need an SRV record? - -As of Synapse 0.99, Synapse will first check for the existence of a `.well-known` -URI and follow any delegation it suggests. It will only then check for the -existence of an SRV record. - -That means that the SRV record will often be redundant. However, you should -remember that there may still be older versions of Synapse in the federation -which do not understand `.well-known` URIs, so if you removed your SRV record you -would no longer be able to federate with them. - -It is therefore best to leave the SRV record in place for now. Synapse 0.34 and -earlier will follow the SRV record (and not care about the invalid -certificate). Synapse 0.99 and later will follow the .well-known URI, with the -correct certificate chain. - -### It used to work just fine, why are you breaking everything? - -We have always wanted Matrix servers to be as easy to set up as possible, and -so back when we started federation in 2014 we didn't want admins to have to go -through the cumbersome process of buying a valid TLS certificate to run a -server. This was before Let's Encrypt came along and made getting a free and -valid TLS certificate straightforward. So instead, we adopted a system based on -[Perspectives](https://en.wikipedia.org/wiki/Convergence_(SSL)): an approach -where you check a set of "notary servers" (in practice, homeservers) to vouch -for the validity of a certificate rather than having it signed by a CA. As long -as enough different notaries agree on the certificate's validity, then it is -trusted. - -However, in practice this has never worked properly. Most people only use the -default notary server (matrix.org), leading to inadvertent centralisation which -we want to eliminate. Meanwhile, we never implemented the full consensus -algorithm to query the servers participating in a room to determine consensus -on whether a given certificate is valid. This is fiddly to get right -(especially in face of sybil attacks), and we found ourselves questioning -whether it was worth the effort to finish the work and commit to maintaining a -secure certificate validation system as opposed to focusing on core Matrix -development. - -Meanwhile, Let's Encrypt came along in 2016, and put the final nail in the -coffin of the Perspectives project (which was already pretty dead). So, the -Spec Core Team decided that a better approach would be to mandate valid TLS -certificates for federation alongside the rest of the Web. More details can be -found in -[MSC1711](https://github.com/matrix-org/matrix-doc/blob/main/proposals/1711-x509-for-federation.md#background-the-failure-of-the-perspectives-approach). - -This results in a breaking change, which is disruptive, but absolutely critical -for the security model. However, the existence of Let's Encrypt as a trivial -way to replace the old self-signed certificates with valid CA-signed ones helps -smooth things over massively, especially as Synapse can now automate Let's -Encrypt certificate generation if needed. - -### Can I manage my own certificates rather than having Synapse renew certificates itself? - -Yes, you are welcome to manage your certificates yourself. Synapse will only -attempt to obtain certificates from Let's Encrypt if you configure it to do -so.The only requirement is that there is a valid TLS cert present for -federation end points. - -### Do you still recommend against using a reverse proxy on the federation port? - -We no longer actively recommend against using a reverse proxy. Many admins will -find it easier to direct federation traffic to a reverse proxy and manage their -own TLS certificates, and this is a supported configuration. - -See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a -reverse proxy. - -### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy? - -Practically speaking, this is no longer necessary. - -If you are using a reverse proxy for all of your TLS traffic, then you can set -`no_tls: True`. In that case, the only reason Synapse needs the certificate is -to populate a legacy 'tls_fingerprints' field in the federation API. This is -ignored by Synapse 0.99.0 and later, and the only time pre-0.99 Synapses will -check it is when attempting to fetch the server keys - and generally this is -delegated via `matrix.org`, which is on 0.99.0. - -However, there is a bug in Synapse 0.99.0 -[4554]() which prevents -Synapse from starting if you do not give it a TLS certificate. To work around -this, you can give it any TLS certificate at all. This will be fixed soon. - -### Do I need the same certificate for the client and federation port? - -No. There is nothing stopping you from using different certificates, -particularly if you are using a reverse proxy. However, Synapse will use the -same certificate on any ports where TLS is configured. - -### How do I tell Synapse to reload my keys/certificates after I replace them? - -Synapse will reload the keys and certificates when it receives a SIGHUP - for -example `kill -HUP $(cat homeserver.pid)`. Alternatively, simply restart -Synapse, though this will result in downtime while it restarts. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 11f597b3edb8..3eeb1a2799d3 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -13,7 +13,6 @@ # Upgrading - [Upgrading between Synapse Versions](upgrade.md) - - [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md) # Usage - [Federation](federate.md) @@ -72,7 +71,7 @@ - [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md) - [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md) - [Database Maintenance Tools](usage/administration/database_maintenance_tools.md) - - [State Groups](usage/administration/state_groups.md) + - [State Groups](usage/administration/state_groups.md) - [Request log format](usage/administration/request_log.md) - [Admin FAQ](usage/administration/admin_faq.md) - [Scripts]() diff --git a/docs/upgrade.md b/docs/upgrade.md index 7d582af0a794..75febb4adfcf 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -1142,8 +1142,7 @@ more details on upgrading your database. Synapse v1.0 is the first release to enforce validation of TLS certificates for the federation API. It is therefore essential that your -certificates are correctly configured. See the -[FAQ](MSC1711_certificates_FAQ.md) for more information. +certificates are correctly configured. Note, v1.0 installations will also no longer be able to federate with servers that have not correctly configured their certificates. @@ -1208,9 +1207,6 @@ you will need to replace any self-signed certificates with those verified by a root CA. Information on how to do so can be found at the ACME docs. -For more information on configuring TLS certificates see the -[FAQ](MSC1711_certificates_FAQ.md). - # Upgrading to v0.34.0 1. This release is the first to fully support Python 3. Synapse will From 02632b3504ad4512c5f5a4f859b3fe326b19c788 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 4 Feb 2022 13:15:13 +0100 Subject: [PATCH 196/474] Stabilise MSC3231 (Token Based Registration) (#11867) --- changelog.d/11867.feature | 5 +++++ docs/modules/password_auth_provider_callbacks.md | 2 +- docs/upgrade.md | 15 +++++++++++++++ docs/workers.md | 2 +- synapse/api/constants.py | 2 +- synapse/handlers/ui_auth/__init__.py | 2 +- synapse/rest/client/register.py | 7 +++---- tests/rest/client/test_register.py | 2 +- 8 files changed, 28 insertions(+), 9 deletions(-) create mode 100644 changelog.d/11867.feature diff --git a/changelog.d/11867.feature b/changelog.d/11867.feature new file mode 100644 index 000000000000..dbd9de0e4cf7 --- /dev/null +++ b/changelog.d/11867.feature @@ -0,0 +1,5 @@ +Stabilize [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). + +Client implementations using `m.login.registration_token` should switch to the stable identifiers: +* `org.matrix.msc3231.login.registration_token` in query parameters and request/response bodies becomes `m.login.registration_token`. +* `/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity` becomes `/_matrix/client/v1/register/m.login.registration_token/validity`. \ No newline at end of file diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index ec8324d292d8..3697e3782ec3 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -148,7 +148,7 @@ Here's an example featuring all currently supported keys: "address": "33123456789", "validated_at": 1642701357084, }, - "org.matrix.msc3231.login.registration_token": "sometoken", # User has registered through the flow described in MSC3231 + "m.login.registration_token": "sometoken", # User has registered through a registration token } ``` diff --git a/docs/upgrade.md b/docs/upgrade.md index 75febb4adfcf..8ce37bcdee4f 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -84,6 +84,21 @@ process, for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.(next) + +## Stablisation of MSC3231 + +The unstable validity-check endpoint for the +[Registration Tokens](https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv1registermloginregistration_tokenvalidity) +feature has been stabilised and moved from: + +`/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity` + +to: + +`/_matrix/client/v1/register/m.login.registration_token/validity` + +Please update any relevant reverse proxy or firewall configurations appropriately. # Upgrading to v1.53.0 diff --git a/docs/workers.md b/docs/workers.md index fd83e2ddeb1f..dadde4d7265d 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -241,7 +241,7 @@ expressions: # Registration/login requests ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ ^/_matrix/client/(r0|v3|unstable)/register$ - ^/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity$ + ^/_matrix/client/v1/register/m.login.registration_token/validity$ # Event sending requests ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 52c083a20b9c..36ace7c6134f 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -81,7 +81,7 @@ class LoginType: TERMS: Final = "m.login.terms" SSO: Final = "m.login.sso" DUMMY: Final = "m.login.dummy" - REGISTRATION_TOKEN: Final = "org.matrix.msc3231.login.registration_token" + REGISTRATION_TOKEN: Final = "m.login.registration_token" # This is used in the `type` parameter for /register when called by diff --git a/synapse/handlers/ui_auth/__init__.py b/synapse/handlers/ui_auth/__init__.py index 13b0c61d2e20..56eee4057f44 100644 --- a/synapse/handlers/ui_auth/__init__.py +++ b/synapse/handlers/ui_auth/__init__.py @@ -38,4 +38,4 @@ class UIAuthSessionDataConstants: # used during registration to store the registration token used (if required) so that: # - we can prevent a token being used twice by one session # - we can 'use up' the token after registration has successfully completed - REGISTRATION_TOKEN = "org.matrix.msc3231.login.registration_token" + REGISTRATION_TOKEN = "m.login.registration_token" diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index e3492f9f9399..c283313e8da3 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -368,7 +368,7 @@ class RegistrationTokenValidityRestServlet(RestServlet): Example: - GET /_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity?token=abcd + GET /_matrix/client/v1/register/m.login.registration_token/validity?token=abcd 200 OK @@ -378,9 +378,8 @@ class RegistrationTokenValidityRestServlet(RestServlet): """ PATTERNS = client_patterns( - f"/org.matrix.msc3231/register/{LoginType.REGISTRATION_TOKEN}/validity", - releases=(), - unstable=True, + f"/register/{LoginType.REGISTRATION_TOKEN}/validity", + releases=("v1",), ) def __init__(self, hs: "HomeServer"): diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 407dd32a7383..0f1c47dcbb87 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -1154,7 +1154,7 @@ def test_background_job(self): class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): servlets = [register.register_servlets] - url = "/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity" + url = "/_matrix/client/v1/register/m.login.registration_token/validity" def default_config(self): config = super().default_config() From a3865ed525db76eda31780cf6e03574b53e97e11 Mon Sep 17 00:00:00 2001 From: kegsay Date: Fri, 4 Feb 2022 13:04:57 +0000 Subject: [PATCH 197/474] Run Complement tests sequentially (#11910) Since #11811 there has been general Complement flakiness around networking. It seems like tests are hitting the wrong containers. In an effort to diagnose the cause of this, as well as reduce its impact on this project, set the parallelsim to 1 (no parallelism) when running tests. If this fixes the flakiness then this indicates the cause and I can diagnose this further. If this doesn't fix the flakiness then that implies some kind of test pollution which also helps to diagnose this further. --- .github/workflows/tests.yml | 2 +- changelog.d/11910.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11910.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e0f80aaaa783..c395f3e1c2fb 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -383,7 +383,7 @@ jobs: # Run Complement - run: | set -o pipefail - go test -v -json -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt + go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt shell: bash name: Run Complement Tests env: diff --git a/changelog.d/11910.misc b/changelog.d/11910.misc new file mode 100644 index 000000000000..d05130969f5b --- /dev/null +++ b/changelog.d/11910.misc @@ -0,0 +1 @@ +Run Complement tests sequentially. From 65ef21b1c7b4297d6a7b2888de4cbfe198ac0324 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 4 Feb 2022 15:39:14 +0100 Subject: [PATCH 198/474] Clarify that users' media are also preview images (#11862) --- changelog.d/11862.doc | 1 + docs/admin_api/media_admin_api.md | 3 +++ docs/admin_api/user_admin_api.md | 9 ++++++++- 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11862.doc diff --git a/changelog.d/11862.doc b/changelog.d/11862.doc new file mode 100644 index 000000000000..98e32e7325bd --- /dev/null +++ b/changelog.d/11862.doc @@ -0,0 +1 @@ +Document images returned by the User List Media Admin API can include those generated by URL previews. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index a8cdf1972726..96b3668f2a08 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -2,6 +2,9 @@ These APIs allow extracting media information from the homeserver. +Details about the format of the `media_id` and storage of the media in the file system +are documented under [media repository](../media_repository.md). + To use it, you will need to authenticate by providing an `access_token` for a server admin: see [Admin API](../usage/administration/admin_api). diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 995782c6bc1f..1bbe23708055 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -539,6 +539,11 @@ The following fields are returned in the JSON response body: ### List media uploaded by a user Gets a list of all local media that a specific `user_id` has created. +These are media that the user has uploaded themselves +([local media](../media_repository.md#local-media)), as well as +[URL preview images](../media_repository.md#url-previews) requested by the user if the +[feature is enabled](../development/url_previews.md). + By default, the response is ordered by descending creation date and ascending media ID. The newest media is on top. You can change the order with parameters `order_by` and `dir`. @@ -635,7 +640,9 @@ The following fields are returned in the JSON response body: Media objects contain the following fields: - `created_ts` - integer - Timestamp when the content was uploaded in ms. - `last_access_ts` - integer - Timestamp when the content was last accessed in ms. - - `media_id` - string - The id used to refer to the media. + - `media_id` - string - The id used to refer to the media. Details about the format + are documented under + [media repository](../media_repository.md). - `media_length` - integer - Length of the media in bytes. - `media_type` - string - The MIME-type of the media. - `quarantined_by` - string - The user ID that initiated the quarantine request From 0c4878caf2ba6d7e67f373b8ef80a445920f0dcd Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 7 Feb 2022 13:21:19 +0000 Subject: [PATCH 199/474] Add a unit test for users receiving their own device list updates (#11909) --- changelog.d/11909.misc | 1 + tests/rest/client/test_sync.py | 57 +++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11909.misc diff --git a/changelog.d/11909.misc b/changelog.d/11909.misc new file mode 100644 index 000000000000..ffd3e5c6397c --- /dev/null +++ b/changelog.d/11909.misc @@ -0,0 +1 @@ +Add a test that checks users receive their own device list updates down `/sync`. \ No newline at end of file diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index c42768637678..cd4af2b1f358 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -23,7 +23,7 @@ ReadReceiptEventFields, RelationTypes, ) -from synapse.rest.client import knock, login, read_marker, receipts, room, sync +from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync from tests import unittest from tests.federation.transport.test_knocking import ( @@ -710,3 +710,58 @@ def test_noop_sync_does_not_tightloop(self): channel.await_result(timeout_ms=9900) channel.await_result(timeout_ms=200) self.assertEqual(channel.code, 200, channel.json_body) + + +class DeviceListSyncTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + devices.register_servlets, + ] + + def test_user_with_no_rooms_receives_self_device_list_updates(self): + """Tests that a user with no rooms still receives their own device list updates""" + device_id = "TESTDEVICE" + + # Register a user and login, creating a device + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey", device_id=device_id) + + # Request an initial sync + channel = self.make_request("GET", "/sync", access_token=self.tok) + self.assertEqual(channel.code, 200, channel.json_body) + next_batch = channel.json_body["next_batch"] + + # Now, make an incremental sync request. + # It won't return until something has happened + incremental_sync_channel = self.make_request( + "GET", + f"/sync?since={next_batch}&timeout=30000", + access_token=self.tok, + await_result=False, + ) + + # Change our device's display name + channel = self.make_request( + "PUT", + f"devices/{device_id}", + { + "display_name": "freeze ray", + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # The sync should now have returned + incremental_sync_channel.await_result(timeout_ms=20000) + self.assertEqual(incremental_sync_channel.code, 200, channel.json_body) + + # We should have received notification that the (user's) device has changed + device_list_changes = incremental_sync_channel.json_body.get( + "device_lists", {} + ).get("changed", []) + + self.assertIn( + self.user_id, device_list_changes, incremental_sync_channel.json_body + ) From e03dde259b741ae824a2569d23ba8bdbc336a54a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 7 Feb 2022 13:25:09 +0000 Subject: [PATCH 200/474] Clean up an indirect reference to the homeserver datastore (#11914) --- changelog.d/11914.misc | 1 + synapse/handlers/typing.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11914.misc diff --git a/changelog.d/11914.misc b/changelog.d/11914.misc new file mode 100644 index 000000000000..c288d43455f8 --- /dev/null +++ b/changelog.d/11914.misc @@ -0,0 +1 @@ +Various refactors to the typing notifications code. \ No newline at end of file diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index e43c22832da6..e4bed1c93758 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -446,7 +446,7 @@ def process_replication_rows( class TypingNotificationEventSource(EventSource[int, JsonDict]): def __init__(self, hs: "HomeServer"): - self.hs = hs + self._main_store = hs.get_datastore() self.clock = hs.get_clock() # We can't call get_typing_handler here because there's a cycle: # @@ -487,7 +487,7 @@ async def get_new_events_as( continue if not await service.matches_user_in_member_list( - room_id, handler.store + room_id, self._main_store ): continue From 314ca4c86d59777b98efe7da6c2a0e6ec3203b47 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 7 Feb 2022 10:06:52 -0500 Subject: [PATCH 201/474] Pass the proper type when uploading files. (#11927) The Content-Length header should be treated as an int, not a string. This shouldn't have any user-facing change. --- changelog.d/11927.misc | 1 + synapse/rest/media/v1/upload_resource.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelog.d/11927.misc diff --git a/changelog.d/11927.misc b/changelog.d/11927.misc new file mode 100644 index 000000000000..22c58521c92c --- /dev/null +++ b/changelog.d/11927.misc @@ -0,0 +1 @@ +Use the proper type for the Content-Length header in the `UploadResource`. diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 8162094cf688..fde28d08cb8e 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -49,10 +49,14 @@ async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: async def _async_render_POST(self, request: SynapseRequest) -> None: requester = await self.auth.get_user_by_req(request) - content_length = request.getHeader("Content-Length") - if content_length is None: + raw_content_length = request.getHeader("Content-Length") + if raw_content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) - if int(content_length) > self.max_upload_size: + try: + content_length = int(raw_content_length) + except ValueError: + raise SynapseError(msg="Content-Length value is invalid", code=400) + if content_length > self.max_upload_size: raise SynapseError( msg="Upload request body is too large", code=413, @@ -66,7 +70,8 @@ async def _async_render_POST(self, request: SynapseRequest) -> None: upload_name: Optional[str] = upload_name_bytes.decode("utf8") except UnicodeDecodeError: raise SynapseError( - msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400 + msg="Invalid UTF-8 filename parameter: %r" % (upload_name_bytes,), + code=400, ) # If the name is falsey (e.g. an empty byte string) ensure it is None. From cf06783d54b2b9090aef595a9094ddd857c1155b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 7 Feb 2022 18:26:42 +0000 Subject: [PATCH 202/474] Remove optional state of `ApplicationService.is_interested`'s `store` parameter (#11911) --- changelog.d/11911.misc | 1 + synapse/appservice/__init__.py | 23 ++++------------- synapse/handlers/appservice.py | 2 +- tests/appservice/test_appservice.py | 38 ++++++++++++++++++++++++----- 4 files changed, 39 insertions(+), 25 deletions(-) create mode 100644 changelog.d/11911.misc diff --git a/changelog.d/11911.misc b/changelog.d/11911.misc new file mode 100644 index 000000000000..805588c2e963 --- /dev/null +++ b/changelog.d/11911.misc @@ -0,0 +1 @@ +Various refactors to the application service notifier code. \ No newline at end of file diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 7dbebd97b5d3..a340a8c9c703 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -165,23 +165,16 @@ def _is_exclusive(self, namespace_key: str, test_string: str) -> bool: return namespace.exclusive return False - async def _matches_user( - self, event: Optional[EventBase], store: Optional["DataStore"] = None - ) -> bool: - if not event: - return False - + async def _matches_user(self, event: EventBase, store: "DataStore") -> bool: if self.is_interested_in_user(event.sender): return True + # also check m.room.member state key if event.type == EventTypes.Member and self.is_interested_in_user( event.state_key ): return True - if not store: - return False - does_match = await self.matches_user_in_member_list(event.room_id, store) return does_match @@ -216,21 +209,15 @@ def _matches_room_id(self, event: EventBase) -> bool: return self.is_interested_in_room(event.room_id) return False - async def _matches_aliases( - self, event: EventBase, store: Optional["DataStore"] = None - ) -> bool: - if not store or not event: - return False - + async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool: alias_list = await store.get_aliases_for_room(event.room_id) for alias in alias_list: if self.is_interested_in_alias(alias): return True + return False - async def is_interested( - self, event: EventBase, store: Optional["DataStore"] = None - ) -> bool: + async def is_interested(self, event: EventBase, store: "DataStore") -> bool: """Check if this service is interested in this event. Args: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 0fb919acf672..a42c3558e42b 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -649,7 +649,7 @@ async def _get_services_for_event( """Retrieve a list of application services interested in this event. Args: - event: The event to check. Can be None if alias_list is not. + event: The event to check. Returns: A list of services interested in this event based on the service regex. """ diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 07d8105f41d6..9bd6275e92db 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -40,13 +40,19 @@ def setUp(self): ) self.store = Mock() + self.store.get_aliases_for_room = simple_async_mock([]) + self.store.get_users_in_room = simple_async_mock([]) @defer.inlineCallbacks def test_regex_user_id_prefix_match(self): self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" self.assertTrue( - (yield defer.ensureDeferred(self.service.is_interested(self.event))) + ( + yield defer.ensureDeferred( + self.service.is_interested(self.event, self.store) + ) + ) ) @defer.inlineCallbacks @@ -54,7 +60,11 @@ def test_regex_user_id_prefix_no_match(self): self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.assertFalse( - (yield defer.ensureDeferred(self.service.is_interested(self.event))) + ( + yield defer.ensureDeferred( + self.service.is_interested(self.event, self.store) + ) + ) ) @defer.inlineCallbacks @@ -64,7 +74,11 @@ def test_regex_room_member_is_checked(self): self.event.type = "m.room.member" self.event.state_key = "@irc_foobar:matrix.org" self.assertTrue( - (yield defer.ensureDeferred(self.service.is_interested(self.event))) + ( + yield defer.ensureDeferred( + self.service.is_interested(self.event, self.store) + ) + ) ) @defer.inlineCallbacks @@ -74,7 +88,11 @@ def test_regex_room_id_match(self): ) self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org" self.assertTrue( - (yield defer.ensureDeferred(self.service.is_interested(self.event))) + ( + yield defer.ensureDeferred( + self.service.is_interested(self.event, self.store) + ) + ) ) @defer.inlineCallbacks @@ -84,7 +102,11 @@ def test_regex_room_id_no_match(self): ) self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org" self.assertFalse( - (yield defer.ensureDeferred(self.service.is_interested(self.event))) + ( + yield defer.ensureDeferred( + self.service.is_interested(self.event, self.store) + ) + ) ) @defer.inlineCallbacks @@ -183,7 +205,11 @@ def test_interested_in_self(self): self.event.content = {"membership": "invite"} self.event.state_key = self.service.sender self.assertTrue( - (yield defer.ensureDeferred(self.service.is_interested(self.event))) + ( + yield defer.ensureDeferred( + self.service.is_interested(self.event, self.store) + ) + ) ) @defer.inlineCallbacks From fef2e792beec9c126953b4f6b6d2d9f6e31ed96f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 7 Feb 2022 15:54:13 -0600 Subject: [PATCH 203/474] Fix historical messages backfilling in random order on remote homeservers (MSC2716) (#11114) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix https://github.com/matrix-org/synapse/issues/11091 Fix https://github.com/matrix-org/synapse/issues/10764 (side-stepping the issue because we no longer have to deal with `fake_prev_event_id`) 1. Made the `/backfill` response return messages in `(depth, stream_ordering)` order (previously only sorted by `depth`) - Technically, it shouldn't really matter how `/backfill` returns things but I'm just trying to make the `stream_ordering` a little more consistent from the origin to the remote homeservers in order to get the order of messages from `/messages` consistent ([sorted by `(topological_ordering, stream_ordering)`](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)). - Even now that we return backfilled messages in order, it still doesn't guarantee the same `stream_ordering` (and more importantly the [`/messages` order](https://github.com/matrix-org/synapse/blob/develop/docs/development/room-dag-concepts.md#depth-and-stream-ordering)) on the other server. For example, if a room has a bunch of history imported and someone visits a permalink to a historical message back in time, their homeserver will skip over the historical messages in between and insert the permalink as the next message in the `stream_order` and totally throw off the sort. - This will be even more the case when we add the [MSC3030 jump to date API endpoint](https://github.com/matrix-org/matrix-doc/pull/3030) so the static archives can navigate and jump to a certain date. - We're solving this in the future by switching to [online topological ordering](https://github.com/matrix-org/gomatrixserverlib/issues/187) and [chunking](https://github.com/matrix-org/synapse/issues/3785) which by its nature will apply retroactively to fix any inconsistencies introduced by people permalinking 2. As we're navigating `prev_events` to return in `/backfill`, we order by `depth` first (newest -> oldest) and now also tie-break based on the `stream_ordering` (newest -> oldest). This is technically important because MSC2716 inserts a bunch of historical messages at the same `depth` so it's best to be prescriptive about which ones we should process first. In reality, I think the code already looped over the historical messages as expected because the database is already in order. 3. Making the historical state chain and historical event chain float on their own by having no `prev_events` instead of a fake `prev_event` which caused backfill to get clogged with an unresolvable event. Fixes https://github.com/matrix-org/synapse/issues/11091 and https://github.com/matrix-org/synapse/issues/10764 4. We no longer find connected insertion events by finding a potential `prev_event` connection to the current event we're iterating over. We now solely rely on marker events which when processed, add the insertion event as an extremity and the federating homeserver can ask about it when time calls. - Related discussion, https://github.com/matrix-org/synapse/pull/11114#discussion_r741514793 Before | After --- | --- ![](https://user-images.githubusercontent.com/558581/139218681-b465c862-5c49-4702-a59e-466733b0cf45.png) | ![](https://user-images.githubusercontent.com/558581/146453159-a1609e0a-8324-439d-ae44-e4bce43ac6d1.png) #### Why aren't we sorting topologically when receiving backfill events? > The main reason we're going to opt to not sort topologically when receiving backfill events is because it's probably best to do whatever is easiest to make it just work. People will probably have opinions once they look at [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) which could change whatever implementation anyway. > > As mentioned, ideally we would do this but code necessary to make the fake edges but it gets confusing and gives an impression of “just whyyyy” (feels icky). This problem also dissolves with online topological ordering. > > -- https://github.com/matrix-org/synapse/pull/11114#discussion_r741517138 See https://github.com/matrix-org/synapse/pull/11114#discussion_r739610091 for the technical difficulties --- changelog.d/11114.bugfix | 1 + synapse/handlers/federation.py | 27 +- synapse/handlers/federation_event.py | 34 +- synapse/handlers/message.py | 20 +- synapse/handlers/room_batch.py | 44 +-- synapse/handlers/room_member.py | 22 +- synapse/rest/client/room_batch.py | 17 +- .../databases/main/event_federation.py | 313 ++++++++++++------ synapse/storage/databases/main/events.py | 13 +- 9 files changed, 342 insertions(+), 149 deletions(-) create mode 100644 changelog.d/11114.bugfix diff --git a/changelog.d/11114.bugfix b/changelog.d/11114.bugfix new file mode 100644 index 000000000000..c6e65df97f90 --- /dev/null +++ b/changelog.d/11114.bugfix @@ -0,0 +1 @@ +Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a37ae0ca094f..c0f642005ff4 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -166,9 +166,14 @@ async def _maybe_backfill_inner( oldest_events_with_depth = ( await self.store.get_oldest_event_ids_with_depth_in_room(room_id) ) - insertion_events_to_be_backfilled = ( - await self.store.get_insertion_event_backwards_extremities_in_room(room_id) - ) + + insertion_events_to_be_backfilled: Dict[str, int] = {} + if self.hs.config.experimental.msc2716_enabled: + insertion_events_to_be_backfilled = ( + await self.store.get_insertion_event_backward_extremities_in_room( + room_id + ) + ) logger.debug( "_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s", oldest_events_with_depth, @@ -271,11 +276,12 @@ async def _maybe_backfill_inner( ] logger.debug( - "room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems: %s filtered_sorted_extremeties_tuple: %s", + "room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems (%d): %s filtered_sorted_extremeties_tuple: %s", room_id, current_depth, limit, max_depth, + len(sorted_extremeties_tuple), sorted_extremeties_tuple, filtered_sorted_extremeties_tuple, ) @@ -1047,6 +1053,19 @@ async def on_backfill_request( limit = min(limit, 100) events = await self.store.get_backfill_events(room_id, pdu_list, limit) + logger.debug( + "on_backfill_request: backfill events=%s", + [ + "event_id=%s,depth=%d,body=%s,prevs=%s\n" + % ( + event.event_id, + event.depth, + event.content.get("body", event.type), + event.prev_event_ids(), + ) + for event in events + ], + ) events = await filter_events_for_server(self.storage, origin, events) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 3905f60b3a78..9edc7369d6fe 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -508,7 +508,11 @@ async def backfill( f"room {ev.room_id}, when we were backfilling in {room_id}" ) - await self._process_pulled_events(dest, events, backfilled=True) + await self._process_pulled_events( + dest, + events, + backfilled=True, + ) async def _get_missing_events_for_pdu( self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int @@ -626,11 +630,24 @@ async def _process_pulled_events( backfilled: True if this is part of a historical batch of events (inhibits notification to clients, and validation of device keys.) """ + logger.debug( + "processing pulled backfilled=%s events=%s", + backfilled, + [ + "event_id=%s,depth=%d,body=%s,prevs=%s\n" + % ( + event.event_id, + event.depth, + event.content.get("body", event.type), + event.prev_event_ids(), + ) + for event in events + ], + ) # We want to sort these by depth so we process them and # tell clients about them in order. sorted_events = sorted(events, key=lambda x: x.depth) - for ev in sorted_events: with nested_logging_context(ev.event_id): await self._process_pulled_event(origin, ev, backfilled=backfilled) @@ -992,6 +1009,8 @@ async def _process_received_pdu( await self._run_push_actions_and_persist_event(event, context, backfilled) + await self._handle_marker_event(origin, event) + if backfilled or context.rejected: return @@ -1071,8 +1090,6 @@ async def _process_received_pdu( event.sender, ) - await self._handle_marker_event(origin, event) - async def _resync_device(self, sender: str) -> None: """We have detected that the device list for the given user may be out of sync, so we try and resync them. @@ -1323,7 +1340,14 @@ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: return event, context events_to_persist = (x for x in (prep(event) for event in fetched_events) if x) - await self.persist_events_and_notify(room_id, tuple(events_to_persist)) + await self.persist_events_and_notify( + room_id, + tuple(events_to_persist), + # Mark these events backfilled as they're historic events that will + # eventually be backfilled. For example, missing events we fetch + # during backfill should be marked as backfilled as well. + backfilled=True, + ) async def _check_event_auth( self, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b37250aa3895..9267e586a83d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -490,12 +490,12 @@ async def create_event( requester: Requester, event_dict: dict, txn_id: Optional[str] = None, + allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, auth_event_ids: Optional[List[str]] = None, require_consent: bool = True, outlier: bool = False, historical: bool = False, - allow_no_prev_events: bool = False, depth: Optional[int] = None, ) -> Tuple[EventBase, EventContext]: """ @@ -510,6 +510,10 @@ async def create_event( requester event_dict: An entire event txn_id + allow_no_prev_events: Whether to allow this event to be created an empty + list of prev_events. Normally this is prohibited just because most + events should have a prev_event and we should only use this in special + cases like MSC2716. prev_event_ids: the forward extremities to use as the prev_events for the new event. @@ -604,10 +608,10 @@ async def create_event( event, context = await self.create_new_client_event( builder=builder, requester=requester, + allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, depth=depth, - allow_no_prev_events=allow_no_prev_events, ) # In an ideal world we wouldn't need the second part of this condition. However, @@ -764,6 +768,7 @@ async def create_and_send_nonmember_event( self, requester: Requester, event_dict: dict, + allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, auth_event_ids: Optional[List[str]] = None, ratelimit: bool = True, @@ -781,6 +786,10 @@ async def create_and_send_nonmember_event( Args: requester: The requester sending the event. event_dict: An entire event. + allow_no_prev_events: Whether to allow this event to be created an empty + list of prev_events. Normally this is prohibited just because most + events should have a prev_event and we should only use this in special + cases like MSC2716. prev_event_ids: The event IDs to use as the prev events. Should normally be left as None to automatically request them @@ -880,16 +889,20 @@ async def create_new_client_event( self, builder: EventBuilder, requester: Optional[Requester] = None, + allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, auth_event_ids: Optional[List[str]] = None, depth: Optional[int] = None, - allow_no_prev_events: bool = False, ) -> Tuple[EventBase, EventContext]: """Create a new event for a local client Args: builder: requester: + allow_no_prev_events: Whether to allow this event to be created an empty + list of prev_events. Normally this is prohibited just because most + events should have a prev_event and we should only use this in special + cases like MSC2716. prev_event_ids: the forward extremities to use as the prev_events for the new event. @@ -908,7 +921,6 @@ async def create_new_client_event( Returns: Tuple of created event, context """ - # Strip down the auth_event_ids to only what we need to auth the event. # For example, we don't need extra m.room.member that don't match event.sender full_state_ids_at_event = None diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py index f880aa93d29a..f8137ec04cc5 100644 --- a/synapse/handlers/room_batch.py +++ b/synapse/handlers/room_batch.py @@ -13,10 +13,6 @@ logger = logging.getLogger(__name__) -def generate_fake_event_id() -> str: - return "$fake_" + random_string(43) - - class RoomBatchHandler: def __init__(self, hs: "HomeServer"): self.hs = hs @@ -182,11 +178,12 @@ async def persist_state_events_at_start( state_event_ids_at_start = [] auth_event_ids = initial_auth_event_ids.copy() - # Make the state events float off on their own so we don't have a - # bunch of `@mxid joined the room` noise between each batch - prev_event_id_for_state_chain = generate_fake_event_id() + # Make the state events float off on their own by specifying no + # prev_events for the first one in the chain so we don't have a bunch of + # `@mxid joined the room` noise between each batch. + prev_event_ids_for_state_chain: List[str] = [] - for state_event in state_events_at_start: + for index, state_event in enumerate(state_events_at_start): assert_params_in_dict( state_event, ["type", "origin_server_ts", "content", "sender"] ) @@ -222,7 +219,10 @@ async def persist_state_events_at_start( content=event_dict["content"], outlier=True, historical=True, - prev_event_ids=[prev_event_id_for_state_chain], + # Only the first event in the chain should be floating. + # The rest should hang off each other in a chain. + allow_no_prev_events=index == 0, + prev_event_ids=prev_event_ids_for_state_chain, # Make sure to use a copy of this list because we modify it # later in the loop here. Otherwise it will be the same # reference and also update in the event when we append later. @@ -242,7 +242,10 @@ async def persist_state_events_at_start( event_dict, outlier=True, historical=True, - prev_event_ids=[prev_event_id_for_state_chain], + # Only the first event in the chain should be floating. + # The rest should hang off each other in a chain. + allow_no_prev_events=index == 0, + prev_event_ids=prev_event_ids_for_state_chain, # Make sure to use a copy of this list because we modify it # later in the loop here. Otherwise it will be the same # reference and also update in the event when we append later. @@ -253,7 +256,7 @@ async def persist_state_events_at_start( state_event_ids_at_start.append(event_id) auth_event_ids.append(event_id) # Connect all the state in a floating chain - prev_event_id_for_state_chain = event_id + prev_event_ids_for_state_chain = [event_id] return state_event_ids_at_start @@ -261,7 +264,6 @@ async def persist_historical_events( self, events_to_create: List[JsonDict], room_id: str, - initial_prev_event_ids: List[str], inherited_depth: int, auth_event_ids: List[str], app_service_requester: Requester, @@ -277,9 +279,6 @@ async def persist_historical_events( events_to_create: List of historical events to create in JSON dictionary format. room_id: Room where you want the events persisted in. - initial_prev_event_ids: These will be the prev_events for the first - event created. Each event created afterwards will point to the - previous event created. inherited_depth: The depth to create the events at (you will probably by calling inherit_depth_from_prev_ids(...)). auth_event_ids: Define which events allow you to create the given @@ -291,11 +290,14 @@ async def persist_historical_events( """ assert app_service_requester.app_service - prev_event_ids = initial_prev_event_ids.copy() + # Make the historical event chain float off on its own by specifying no + # prev_events for the first event in the chain which causes the HS to + # ask for the state at the start of the batch later. + prev_event_ids: List[str] = [] event_ids = [] events_to_persist = [] - for ev in events_to_create: + for index, ev in enumerate(events_to_create): assert_params_in_dict(ev, ["type", "origin_server_ts", "content", "sender"]) assert self.hs.is_mine_id(ev["sender"]), "User must be our own: %s" % ( @@ -319,6 +321,9 @@ async def persist_historical_events( ev["sender"], app_service_requester.app_service ), event_dict, + # Only the first event in the chain should be floating. + # The rest should hang off each other in a chain. + allow_no_prev_events=index == 0, prev_event_ids=event_dict.get("prev_events"), auth_event_ids=auth_event_ids, historical=True, @@ -370,7 +375,6 @@ async def handle_batch_of_events( events_to_create: List[JsonDict], room_id: str, batch_id_to_connect_to: str, - initial_prev_event_ids: List[str], inherited_depth: int, auth_event_ids: List[str], app_service_requester: Requester, @@ -385,9 +389,6 @@ async def handle_batch_of_events( room_id: Room where you want the events created in. batch_id_to_connect_to: The batch_id from the insertion event you want this batch to connect to. - initial_prev_event_ids: These will be the prev_events for the first - event created. Each event created afterwards will point to the - previous event created. inherited_depth: The depth to create the events at (you will probably by calling inherit_depth_from_prev_ids(...)). auth_event_ids: Define which events allow you to create the given @@ -436,7 +437,6 @@ async def handle_batch_of_events( event_ids = await self.persist_historical_events( events_to_create=events_to_create, room_id=room_id, - initial_prev_event_ids=initial_prev_event_ids, inherited_depth=inherited_depth, auth_event_ids=auth_event_ids, app_service_requester=app_service_requester, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index efe6b4c9aaf3..bf1a47efb020 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -268,7 +268,8 @@ async def _local_membership_update( target: UserID, room_id: str, membership: str, - prev_event_ids: List[str], + allow_no_prev_events: bool = False, + prev_event_ids: Optional[List[str]] = None, auth_event_ids: Optional[List[str]] = None, txn_id: Optional[str] = None, ratelimit: bool = True, @@ -286,8 +287,12 @@ async def _local_membership_update( target: room_id: membership: - prev_event_ids: The event IDs to use as the prev events + allow_no_prev_events: Whether to allow this event to be created an empty + list of prev_events. Normally this is prohibited just because most + events should have a prev_event and we should only use this in special + cases like MSC2716. + prev_event_ids: The event IDs to use as the prev events auth_event_ids: The event ids to use as the auth_events for the new event. Should normally be left as None, which will cause them to be calculated @@ -344,6 +349,7 @@ async def _local_membership_update( "membership": membership, }, txn_id=txn_id, + allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, require_consent=require_consent, @@ -446,6 +452,7 @@ async def update_membership( require_consent: bool = True, outlier: bool = False, historical: bool = False, + allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, auth_event_ids: Optional[List[str]] = None, ) -> Tuple[str, int]: @@ -470,6 +477,10 @@ async def update_membership( historical: Indicates whether the message is being inserted back in time around some existing events. This is used to skip a few checks and mark the event as backfilled. + allow_no_prev_events: Whether to allow this event to be created an empty + list of prev_events. Normally this is prohibited just because most + events should have a prev_event and we should only use this in special + cases like MSC2716. prev_event_ids: The event IDs to use as the prev events auth_event_ids: The event ids to use as the auth_events for the new event. @@ -504,6 +515,7 @@ async def update_membership( require_consent=require_consent, outlier=outlier, historical=historical, + allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, ) @@ -525,6 +537,7 @@ async def update_membership_locked( require_consent: bool = True, outlier: bool = False, historical: bool = False, + allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, auth_event_ids: Optional[List[str]] = None, ) -> Tuple[str, int]: @@ -551,6 +564,10 @@ async def update_membership_locked( historical: Indicates whether the message is being inserted back in time around some existing events. This is used to skip a few checks and mark the event as backfilled. + allow_no_prev_events: Whether to allow this event to be created an empty + list of prev_events. Normally this is prohibited just because most + events should have a prev_event and we should only use this in special + cases like MSC2716. prev_event_ids: The event IDs to use as the prev events auth_event_ids: The event ids to use as the auth_events for the new event. @@ -680,6 +697,7 @@ async def update_membership_locked( membership=effective_membership_state, txn_id=txn_id, ratelimit=ratelimit, + allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, content=content, diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index e4c9451ae06a..4b6be38327e0 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -131,6 +131,14 @@ async def on_POST( prev_event_ids_from_query ) + if not auth_event_ids: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "No auth events found for given prev_event query parameter. The prev_event=%s probably does not exist." + % prev_event_ids_from_query, + errcode=Codes.INVALID_PARAM, + ) + state_event_ids_at_start = [] # Create and persist all of the state events that float off on their own # before the batch. These will most likely be all of the invite/member @@ -197,21 +205,12 @@ async def on_POST( EventContentFields.MSC2716_NEXT_BATCH_ID ] - # Also connect the historical event chain to the end of the floating - # state chain, which causes the HS to ask for the state at the start of - # the batch later. If there is no state chain to connect to, just make - # the insertion event float itself. - prev_event_ids = [] - if len(state_event_ids_at_start): - prev_event_ids = [state_event_ids_at_start[-1]] - # Create and persist all of the historical events as well as insertion # and batch meta events to make the batch navigable in the DAG. event_ids, next_batch_id = await self.room_batch_handler.handle_batch_of_events( events_to_create=events_to_create, room_id=room_id, batch_id_to_connect_to=batch_id_to_connect_to, - initial_prev_event_ids=prev_event_ids, inherited_depth=inherited_depth, auth_event_ids=auth_event_ids, app_service_requester=requester, diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ca71f073fcbc..22f64741277a 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -16,9 +16,10 @@ from queue import Empty, PriorityQueue from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple +import attr from prometheus_client import Counter, Gauge -from synapse.api.constants import MAX_DEPTH +from synapse.api.constants import MAX_DEPTH, EventTypes from synapse.api.errors import StoreError from synapse.api.room_versions import EventFormatVersions, RoomVersion from synapse.events import EventBase, make_event_from_dict @@ -60,6 +61,15 @@ logger = logging.getLogger(__name__) +# All the info we need while iterating the DAG while backfilling +@attr.s(frozen=True, slots=True, auto_attribs=True) +class BackfillQueueNavigationItem: + depth: int + stream_ordering: int + event_id: str + type: str + + class _NoChainCoverIndex(Exception): def __init__(self, room_id: str): super().__init__("Unexpectedly no chain cover for events in %s" % (room_id,)) @@ -74,6 +84,8 @@ def __init__( ): super().__init__(database, db_conn, hs) + self.hs = hs + if hs.config.worker.run_background_tasks: hs.get_clock().looping_call( self._delete_old_forward_extrem_cache, 60 * 60 * 1000 @@ -737,7 +749,7 @@ def get_oldest_event_ids_with_depth_in_room_txn(txn, room_id): room_id, ) - async def get_insertion_event_backwards_extremities_in_room( + async def get_insertion_event_backward_extremities_in_room( self, room_id ) -> Dict[str, int]: """Get the insertion events we know about that we haven't backfilled yet. @@ -754,7 +766,7 @@ async def get_insertion_event_backwards_extremities_in_room( Map from event_id to depth """ - def get_insertion_event_backwards_extremities_in_room_txn(txn, room_id): + def get_insertion_event_backward_extremities_in_room_txn(txn, room_id): sql = """ SELECT b.event_id, MAX(e.depth) FROM insertion_events as i /* We only want insertion events that are also marked as backwards extremities */ @@ -770,8 +782,8 @@ def get_insertion_event_backwards_extremities_in_room_txn(txn, room_id): return dict(txn) return await self.db_pool.runInteraction( - "get_insertion_event_backwards_extremities_in_room", - get_insertion_event_backwards_extremities_in_room_txn, + "get_insertion_event_backward_extremities_in_room", + get_insertion_event_backward_extremities_in_room_txn, room_id, ) @@ -997,143 +1009,242 @@ def get_forward_extremeties_for_room_txn(txn): "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn ) - async def get_backfill_events(self, room_id: str, event_list: list, limit: int): - """Get a list of Events for a given topic that occurred before (and - including) the events in event_list. Return a list of max size `limit` + def _get_connected_batch_event_backfill_results_txn( + self, txn: LoggingTransaction, insertion_event_id: str, limit: int + ) -> List[BackfillQueueNavigationItem]: + """ + Find any batch connections of a given insertion event. + A batch event points at a insertion event via: + batch_event.content[MSC2716_BATCH_ID] -> insertion_event.content[MSC2716_NEXT_BATCH_ID] Args: - room_id - event_list - limit + txn: The database transaction to use + insertion_event_id: The event ID to navigate from. We will find + batch events that point back at this insertion event. + limit: Max number of event ID's to query for and return + + Returns: + List of batch events that the backfill queue can process + """ + batch_connection_query = """ + SELECT e.depth, e.stream_ordering, c.event_id, e.type FROM insertion_events AS i + /* Find the batch that connects to the given insertion event */ + INNER JOIN batch_events AS c + ON i.next_batch_id = c.batch_id + /* Get the depth of the batch start event from the events table */ + INNER JOIN events AS e USING (event_id) + /* Find an insertion event which matches the given event_id */ + WHERE i.event_id = ? + LIMIT ? """ - event_ids = await self.db_pool.runInteraction( - "get_backfill_events", - self._get_backfill_events, - room_id, - event_list, - limit, - ) - events = await self.get_events_as_list(event_ids) - return sorted(events, key=lambda e: -e.depth) - def _get_backfill_events(self, txn, room_id, event_list, limit): - logger.debug("_get_backfill_events: %s, %r, %s", room_id, event_list, limit) + # Find any batch connections for the given insertion event + txn.execute( + batch_connection_query, + (insertion_event_id, limit), + ) + return [ + BackfillQueueNavigationItem( + depth=row[0], + stream_ordering=row[1], + event_id=row[2], + type=row[3], + ) + for row in txn + ] - event_results = set() + def _get_connected_prev_event_backfill_results_txn( + self, txn: LoggingTransaction, event_id: str, limit: int + ) -> List[BackfillQueueNavigationItem]: + """ + Find any events connected by prev_event the specified event_id. - # We want to make sure that we do a breadth-first, "depth" ordered - # search. + Args: + txn: The database transaction to use + event_id: The event ID to navigate from + limit: Max number of event ID's to query for and return + Returns: + List of prev events that the backfill queue can process + """ # Look for the prev_event_id connected to the given event_id - query = """ - SELECT depth, prev_event_id FROM event_edges - /* Get the depth of the prev_event_id from the events table */ + connected_prev_event_query = """ + SELECT depth, stream_ordering, prev_event_id, events.type FROM event_edges + /* Get the depth and stream_ordering of the prev_event_id from the events table */ INNER JOIN events ON prev_event_id = events.event_id - /* Find an event which matches the given event_id */ + /* Look for an edge which matches the given event_id */ WHERE event_edges.event_id = ? AND event_edges.is_state = ? + /* Because we can have many events at the same depth, + * we want to also tie-break and sort on stream_ordering */ + ORDER BY depth DESC, stream_ordering DESC LIMIT ? """ - # Look for the "insertion" events connected to the given event_id - connected_insertion_event_query = """ - SELECT e.depth, i.event_id FROM insertion_event_edges AS i - /* Get the depth of the insertion event from the events table */ - INNER JOIN events AS e USING (event_id) - /* Find an insertion event which points via prev_events to the given event_id */ - WHERE i.insertion_prev_event_id = ? - LIMIT ? + txn.execute( + connected_prev_event_query, + (event_id, False, limit), + ) + return [ + BackfillQueueNavigationItem( + depth=row[0], + stream_ordering=row[1], + event_id=row[2], + type=row[3], + ) + for row in txn + ] + + async def get_backfill_events( + self, room_id: str, seed_event_id_list: list, limit: int + ): + """Get a list of Events for a given topic that occurred before (and + including) the events in seed_event_id_list. Return a list of max size `limit` + + Args: + room_id + seed_event_id_list + limit """ + event_ids = await self.db_pool.runInteraction( + "get_backfill_events", + self._get_backfill_events, + room_id, + seed_event_id_list, + limit, + ) + events = await self.get_events_as_list(event_ids) + return sorted( + events, key=lambda e: (-e.depth, -e.internal_metadata.stream_ordering) + ) - # Find any batch connections of a given insertion event - batch_connection_query = """ - SELECT e.depth, c.event_id FROM insertion_events AS i - /* Find the batch that connects to the given insertion event */ - INNER JOIN batch_events AS c - ON i.next_batch_id = c.batch_id - /* Get the depth of the batch start event from the events table */ - INNER JOIN events AS e USING (event_id) - /* Find an insertion event which matches the given event_id */ - WHERE i.event_id = ? - LIMIT ? + def _get_backfill_events(self, txn, room_id, seed_event_id_list, limit): + """ + We want to make sure that we do a breadth-first, "depth" ordered search. + We also handle navigating historical branches of history connected by + insertion and batch events. """ + logger.debug( + "_get_backfill_events(room_id=%s): seeding backfill with seed_event_id_list=%s limit=%s", + room_id, + seed_event_id_list, + limit, + ) + + event_id_results = set() # In a PriorityQueue, the lowest valued entries are retrieved first. - # We're using depth as the priority in the queue. - # Depth is lowest at the oldest-in-time message and highest and - # newest-in-time message. We add events to the queue with a negative depth so that - # we process the newest-in-time messages first going backwards in time. + # We're using depth as the priority in the queue and tie-break based on + # stream_ordering. Depth is lowest at the oldest-in-time message and + # highest and newest-in-time message. We add events to the queue with a + # negative depth so that we process the newest-in-time messages first + # going backwards in time. stream_ordering follows the same pattern. queue = PriorityQueue() - for event_id in event_list: - depth = self.db_pool.simple_select_one_onecol_txn( + for seed_event_id in seed_event_id_list: + event_lookup_result = self.db_pool.simple_select_one_txn( txn, table="events", - keyvalues={"event_id": event_id, "room_id": room_id}, - retcol="depth", + keyvalues={"event_id": seed_event_id, "room_id": room_id}, + retcols=( + "type", + "depth", + "stream_ordering", + ), allow_none=True, ) - if depth: - queue.put((-depth, event_id)) + if event_lookup_result is not None: + logger.debug( + "_get_backfill_events(room_id=%s): seed_event_id=%s depth=%s stream_ordering=%s type=%s", + room_id, + seed_event_id, + event_lookup_result["depth"], + event_lookup_result["stream_ordering"], + event_lookup_result["type"], + ) - while not queue.empty() and len(event_results) < limit: + if event_lookup_result["depth"]: + queue.put( + ( + -event_lookup_result["depth"], + -event_lookup_result["stream_ordering"], + seed_event_id, + event_lookup_result["type"], + ) + ) + + while not queue.empty() and len(event_id_results) < limit: try: - _, event_id = queue.get_nowait() + _, _, event_id, event_type = queue.get_nowait() except Empty: break - if event_id in event_results: + if event_id in event_id_results: continue - event_results.add(event_id) + event_id_results.add(event_id) # Try and find any potential historical batches of message history. - # - # First we look for an insertion event connected to the current - # event (by prev_event). If we find any, we need to go and try to - # find any batch events connected to the insertion event (by - # batch_id). If we find any, we'll add them to the queue and - # navigate up the DAG like normal in the next iteration of the loop. - txn.execute( - connected_insertion_event_query, (event_id, limit - len(event_results)) - ) - connected_insertion_event_id_results = txn.fetchall() - logger.debug( - "_get_backfill_events: connected_insertion_event_query %s", - connected_insertion_event_id_results, - ) - for row in connected_insertion_event_id_results: - connected_insertion_event_depth = row[0] - connected_insertion_event = row[1] - queue.put((-connected_insertion_event_depth, connected_insertion_event)) + if self.hs.config.experimental.msc2716_enabled: + # We need to go and try to find any batch events connected + # to a given insertion event (by batch_id). If we find any, we'll + # add them to the queue and navigate up the DAG like normal in the + # next iteration of the loop. + if event_type == EventTypes.MSC2716_INSERTION: + # Find any batch connections for the given insertion event + connected_batch_event_backfill_results = ( + self._get_connected_batch_event_backfill_results_txn( + txn, event_id, limit - len(event_id_results) + ) + ) + logger.debug( + "_get_backfill_events(room_id=%s): connected_batch_event_backfill_results=%s", + room_id, + connected_batch_event_backfill_results, + ) + for ( + connected_batch_event_backfill_item + ) in connected_batch_event_backfill_results: + if ( + connected_batch_event_backfill_item.event_id + not in event_id_results + ): + queue.put( + ( + -connected_batch_event_backfill_item.depth, + -connected_batch_event_backfill_item.stream_ordering, + connected_batch_event_backfill_item.event_id, + connected_batch_event_backfill_item.type, + ) + ) - # Find any batch connections for the given insertion event - txn.execute( - batch_connection_query, - (connected_insertion_event, limit - len(event_results)), - ) - batch_start_event_id_results = txn.fetchall() - logger.debug( - "_get_backfill_events: batch_start_event_id_results %s", - batch_start_event_id_results, + # Now we just look up the DAG by prev_events as normal + connected_prev_event_backfill_results = ( + self._get_connected_prev_event_backfill_results_txn( + txn, event_id, limit - len(event_id_results) ) - for row in batch_start_event_id_results: - if row[1] not in event_results: - queue.put((-row[0], row[1])) - - txn.execute(query, (event_id, False, limit - len(event_results))) - prev_event_id_results = txn.fetchall() + ) logger.debug( - "_get_backfill_events: prev_event_ids %s", prev_event_id_results + "_get_backfill_events(room_id=%s): connected_prev_event_backfill_results=%s", + room_id, + connected_prev_event_backfill_results, ) + for ( + connected_prev_event_backfill_item + ) in connected_prev_event_backfill_results: + if connected_prev_event_backfill_item.event_id not in event_id_results: + queue.put( + ( + -connected_prev_event_backfill_item.depth, + -connected_prev_event_backfill_item.stream_ordering, + connected_prev_event_backfill_item.event_id, + connected_prev_event_backfill_item.type, + ) + ) - for row in prev_event_id_results: - if row[1] not in event_results: - queue.put((-row[0], row[1])) - - return event_results + return event_id_results async def get_missing_events(self, room_id, earliest_events, latest_events, limit): ids = await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b7554154ac19..b804185c404e 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2215,9 +2215,14 @@ def _update_backward_extremeties(self, txn, events): " SELECT 1 FROM event_backward_extremities" " WHERE event_id = ? AND room_id = ?" " )" + # 1. Don't add an event as a extremity again if we already persisted it + # as a non-outlier. + # 2. Don't add an outlier as an extremity if it has no prev_events " AND NOT EXISTS (" - " SELECT 1 FROM events WHERE event_id = ? AND room_id = ? " - " AND outlier = ?" + " SELECT 1 FROM events" + " LEFT JOIN event_edges edge" + " ON edge.event_id = events.event_id" + " WHERE events.event_id = ? AND events.room_id = ? AND (events.outlier = ? OR edge.event_id IS NULL)" " )" ) @@ -2243,6 +2248,10 @@ def _update_backward_extremeties(self, txn, events): (ev.event_id, ev.room_id) for ev in events if not ev.internal_metadata.is_outlier() + # If we encountered an event with no prev_events, then we might + # as well remove it now because it won't ever have anything else + # to backfill from. + or len(ev.prev_event_ids()) == 0 ], ) From 0640f8ebaa34e10a69ad7481b738ae36fda1c103 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 8 Feb 2022 11:20:32 +0100 Subject: [PATCH 204/474] Add a callback to allow modules to deny 3PID (#11854) Part of the Tchap Synapse mainlining. This allows modules to implement extra logic to figure out whether a given 3PID can be added to the local homeserver. In the Tchap use case, this will allow a Synapse module to interface with the custom endpoint /internal_info. --- changelog.d/11854.feature | 1 + .../password_auth_provider_callbacks.md | 19 +++++ synapse/handlers/auth.py | 44 +++++++++++ synapse/module_api/__init__.py | 3 + synapse/rest/client/account.py | 4 +- synapse/rest/client/register.py | 8 +- synapse/util/threepids.py | 13 +++- tests/handlers/test_password_providers.py | 76 ++++++++++++++++++- 8 files changed, 161 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11854.feature diff --git a/changelog.d/11854.feature b/changelog.d/11854.feature new file mode 100644 index 000000000000..975e95bc52e4 --- /dev/null +++ b/changelog.d/11854.feature @@ -0,0 +1 @@ +Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index 3697e3782ec3..88b59bb09e61 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -166,6 +166,25 @@ any of the subsequent implementations of this callback. If every callback return the username provided by the user is used, if any (otherwise one is automatically generated). +## `is_3pid_allowed` + +_First introduced in Synapse v1.53.0_ + +```python +async def is_3pid_allowed(self, medium: str, address: str, registration: bool) -> bool +``` + +Called when attempting to bind a third-party identifier (i.e. an email address or a phone +number). The module is given the medium of the third-party identifier (which is `email` if +the identifier is an email address, or `msisdn` if the identifier is a phone number) and +its address, as well as a boolean indicating whether the attempt to bind is happening as +part of registering a new user. The module must return a boolean indicating whether the +identifier can be allowed to be bound to an account on the local homeserver. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. ## Example diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index e32c93e234d8..6959d1aa7e47 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -2064,6 +2064,7 @@ def run(*args: Tuple, **kwargs: Dict) -> Awaitable: [JsonDict, JsonDict], Awaitable[Optional[str]], ] +IS_3PID_ALLOWED_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] class PasswordAuthProvider: @@ -2079,6 +2080,7 @@ def __init__(self) -> None: self.get_username_for_registration_callbacks: List[ GET_USERNAME_FOR_REGISTRATION_CALLBACK ] = [] + self.is_3pid_allowed_callbacks: List[IS_3PID_ALLOWED_CALLBACK] = [] # Mapping from login type to login parameters self._supported_login_types: Dict[str, Iterable[str]] = {} @@ -2090,6 +2092,7 @@ def register_password_auth_provider_callbacks( self, check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None, on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None, + is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None, auth_checkers: Optional[ Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, @@ -2145,6 +2148,9 @@ def register_password_auth_provider_callbacks( get_username_for_registration, ) + if is_3pid_allowed is not None: + self.is_3pid_allowed_callbacks.append(is_3pid_allowed) + def get_supported_login_types(self) -> Mapping[str, Iterable[str]]: """Get the login types supported by this password provider @@ -2343,3 +2349,41 @@ async def get_username_for_registration( raise SynapseError(code=500, msg="Internal Server Error") return None + + async def is_3pid_allowed( + self, + medium: str, + address: str, + registration: bool, + ) -> bool: + """Check if the user can be allowed to bind a 3PID on this homeserver. + + Args: + medium: The medium of the 3PID. + address: The address of the 3PID. + registration: Whether the 3PID is being bound when registering a new user. + + Returns: + Whether the 3PID is allowed to be bound on this homeserver + """ + for callback in self.is_3pid_allowed_callbacks: + try: + res = await callback(medium, address, registration) + + if res is False: + return res + elif not isinstance(res, bool): + # mypy complains that this line is unreachable because it assumes the + # data returned by the module fits the expected type. We just want + # to make sure this is the case. + logger.warning( # type: ignore[unreachable] + "Ignoring non-string value returned by" + " is_3pid_allowed callback %s: %s", + callback, + res, + ) + except Exception as e: + logger.error("Module raised an exception in is_3pid_allowed: %s", e) + raise SynapseError(code=500, msg="Internal Server Error") + + return True diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 29fbc73c971d..a91a7fa3ceb0 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -72,6 +72,7 @@ CHECK_3PID_AUTH_CALLBACK, CHECK_AUTH_CALLBACK, GET_USERNAME_FOR_REGISTRATION_CALLBACK, + IS_3PID_ALLOWED_CALLBACK, ON_LOGGED_OUT_CALLBACK, AuthHandler, ) @@ -312,6 +313,7 @@ def register_password_auth_provider_callbacks( auth_checkers: Optional[ Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, + is_3pid_allowed: Optional[IS_3PID_ALLOWED_CALLBACK] = None, get_username_for_registration: Optional[ GET_USERNAME_FOR_REGISTRATION_CALLBACK ] = None, @@ -323,6 +325,7 @@ def register_password_auth_provider_callbacks( return self._password_auth_provider.register_password_auth_provider_callbacks( check_3pid_auth=check_3pid_auth, on_logged_out=on_logged_out, + is_3pid_allowed=is_3pid_allowed, auth_checkers=auth_checkers, get_username_for_registration=get_username_for_registration, ) diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 6b272658fc3c..cfa2aee76d49 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -385,7 +385,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): + if not await check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, "Your email domain is not authorized on this server", @@ -468,7 +468,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: msisdn = phone_number_to_msisdn(country, phone_number) - if not check_3pid_allowed(self.hs, "msisdn", msisdn): + if not await check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( 403, "Account phone numbers are not authorized on this server", diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index c283313e8da3..c965e2bda2f9 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -112,7 +112,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): + if not await check_3pid_allowed(self.hs, "email", email, registration=True): raise SynapseError( 403, "Your email domain is not authorized to register on this server", @@ -192,7 +192,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: msisdn = phone_number_to_msisdn(country, phone_number) - if not check_3pid_allowed(self.hs, "msisdn", msisdn): + if not await check_3pid_allowed(self.hs, "msisdn", msisdn, registration=True): raise SynapseError( 403, "Phone numbers are not authorized to register on this server", @@ -616,7 +616,9 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: medium = auth_result[login_type]["medium"] address = auth_result[login_type]["address"] - if not check_3pid_allowed(self.hs, medium, address): + if not await check_3pid_allowed( + self.hs, medium, address, registration=True + ): raise SynapseError( 403, "Third party identifiers (email/phone numbers)" diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index 389adf00f619..1e9c2faa644a 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -32,7 +32,12 @@ MAX_EMAIL_ADDRESS_LENGTH = 500 -def check_3pid_allowed(hs: "HomeServer", medium: str, address: str) -> bool: +async def check_3pid_allowed( + hs: "HomeServer", + medium: str, + address: str, + registration: bool = False, +) -> bool: """Checks whether a given format of 3PID is allowed to be used on this HS Args: @@ -40,9 +45,15 @@ def check_3pid_allowed(hs: "HomeServer", medium: str, address: str) -> bool: medium: 3pid medium - e.g. email, msisdn address: address within that medium (e.g. "wotan@matrix.org") msisdns need to first have been canonicalised + registration: whether we want to bind the 3PID as part of registering a new user. + Returns: bool: whether the 3PID medium/address is allowed to be added to this HS """ + if not await hs.get_password_auth_provider().is_3pid_allowed( + medium, address, registration + ): + return False if hs.config.registration.allowed_local_3pids: for constraint in hs.config.registration.allowed_local_3pids: diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 94809cb8bea3..4740dd0a65e3 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -21,13 +21,15 @@ import synapse from synapse.api.constants import LoginType +from synapse.api.errors import Codes from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.module_api import ModuleApi -from synapse.rest.client import devices, login, logout, register +from synapse.rest.client import account, devices, login, logout, register from synapse.types import JsonDict, UserID from tests import unittest from tests.server import FakeChannel +from tests.test_utils import make_awaitable from tests.unittest import override_config # (possibly experimental) login flows we expect to appear in the list after the normal @@ -158,6 +160,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): devices.register_servlets, logout.register_servlets, register.register_servlets, + account.register_servlets, ] def setUp(self): @@ -803,6 +806,77 @@ def test_username_uia(self): # Check that the callback has been called. m.assert_called_once() + # Set some email configuration so the test doesn't fail because of its absence. + @override_config({"email": {"notif_from": "noreply@test"}}) + def test_3pid_allowed(self): + """Tests that an is_3pid_allowed_callbacks forbidding a 3PID makes Synapse refuse + to bind the new 3PID, and that one allowing a 3PID makes Synapse accept to bind + the 3PID. Also checks that the module is passed a boolean indicating whether the + user to bind this 3PID to is currently registering. + """ + self._test_3pid_allowed("rin", False) + self._test_3pid_allowed("kitay", True) + + def _test_3pid_allowed(self, username: str, registration: bool): + """Tests that the "is_3pid_allowed" module callback is called correctly, using + either /register or /account URLs depending on the arguments. + + Args: + username: The username to use for the test. + registration: Whether to test with registration URLs. + """ + self.hs.get_identity_handler().send_threepid_validation = Mock( + return_value=make_awaitable(0), + ) + + m = Mock(return_value=make_awaitable(False)) + self.hs.get_password_auth_provider().is_3pid_allowed_callbacks = [m] + + self.register_user(username, "password") + tok = self.login(username, "password") + + if registration: + url = "/register/email/requestToken" + else: + url = "/account/3pid/email/requestToken" + + channel = self.make_request( + "POST", + url, + { + "client_secret": "foo", + "email": "foo@test.com", + "send_attempt": 0, + }, + access_token=tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], + Codes.THREEPID_DENIED, + channel.json_body, + ) + + m.assert_called_once_with("email", "foo@test.com", registration) + + m = Mock(return_value=make_awaitable(True)) + self.hs.get_password_auth_provider().is_3pid_allowed_callbacks = [m] + + channel = self.make_request( + "POST", + url, + { + "client_secret": "foo", + "email": "bar@test.com", + "send_attempt": 0, + }, + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertIn("sid", channel.json_body) + + m.assert_called_once_with("email", "bar@test.com", registration) + def _setup_get_username_for_registration(self) -> Mock: """Registers a get_username_for_registration callback that appends "-foo" to the username the client is trying to register. From 3655585e85bfebd9165151d9d69fd907d65d6db0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 8 Feb 2022 10:52:22 +0000 Subject: [PATCH 205/474] Add a docstring to `add_device_change_to_streams` and fix some nearby types (#11912) --- changelog.d/11912.misc | 1 + synapse/storage/databases/main/devices.py | 22 ++++++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11912.misc diff --git a/changelog.d/11912.misc b/changelog.d/11912.misc new file mode 100644 index 000000000000..805588c2e963 --- /dev/null +++ b/changelog.d/11912.misc @@ -0,0 +1 @@ +Various refactors to the application service notifier code. \ No newline at end of file diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index b2a5cd9a6508..8d845fe9516f 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1496,13 +1496,23 @@ def _update_remote_device_list_cache_txn( ) async def add_device_change_to_streams( - self, user_id: str, device_ids: Collection[str], hosts: List[str] - ) -> int: + self, user_id: str, device_ids: Collection[str], hosts: Collection[str] + ) -> Optional[int]: """Persist that a user's devices have been updated, and which hosts (if any) should be poked. + + Args: + user_id: The ID of the user whose device changed. + device_ids: The IDs of any changed devices. If empty, this function will + return None. + hosts: The remote destinations that should be notified of the change. + + Returns: + The maximum stream ID of device list updates that were added to the database, or + None if no updates were added. """ if not device_ids: - return + return None async with self._device_list_id_gen.get_next_mult( len(device_ids) @@ -1573,11 +1583,11 @@ def _add_device_outbound_poke_to_stream_txn( self, txn: LoggingTransaction, user_id: str, - device_ids: Collection[str], - hosts: List[str], + device_ids: Iterable[str], + hosts: Collection[str], stream_ids: List[str], context: Dict[str, str], - ): + ) -> None: for host in hosts: txn.call_after( self._device_list_federation_stream_cache.entity_has_changed, From 7d56b6c083d7d2eb683795d482453923c3e8be15 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 8 Feb 2022 11:35:05 +0000 Subject: [PATCH 206/474] 1.52.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 36707db03bf7..cee0549036c5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.52.0 (2022-02-08) +=========================== + +No significant changes. + + Synapse 1.52.0rc1 (2022-02-01) ============================== diff --git a/debian/changelog b/debian/changelog index a45888565554..64ea103f3e8f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.52.0) stable; urgency=medium + + * New synapse release 1.52.0. + + -- Synapse Packaging team Tue, 08 Feb 2022 11:34:54 +0000 + matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium * New synapse release 1.52.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5e6503306101..a23563937ad4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.52.0rc1" +__version__ = "1.52.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 5cdd4913100961f943f6432d9fbdaa20907142c2 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 8 Feb 2022 11:47:35 +0000 Subject: [PATCH 207/474] Add words about the Twisted security fix --- CHANGES.md | 11 +++++++++-- docs/upgrade.md | 11 +++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index cee0549036c5..9bccf3f5ec2a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,14 @@ Synapse 1.52.0 (2022-02-08) =========================== -No significant changes. +No significant changes since 1.52.0rc1. + +During the making of this release, the developers of Twisted have released +[Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0), which +fixes [a security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) +within Twisted. We do not believe Synapse to be vulnerable to any security problem caused +by this issue, though we advise server administrators to update their local version of +Twisted if they can. Synapse 1.52.0rc1 (2022-02-01) @@ -13,7 +20,7 @@ Features - Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) - Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) - Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) -- Add a config flag to inhibit M_USER_IN_USE during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) +- Add a config flag to inhibit `M_USER_IN_USE` during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) - Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) - Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846)) diff --git a/docs/upgrade.md b/docs/upgrade.md index f455d257babf..913f97385d48 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,17 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.52.0 + +## Twisted security release + +During the making of this release, the developers of Twisted have released +[Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0), which +fixes [a security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) +within Twisted. We do not believe Synapse to be vulnerable to any security problem caused +by this issue, though we advise server administrators to update their local version of +Twisted if they can. + # Upgrading to v1.51.0 ## Deprecation of `webclient` listeners and non-HTTP(S) `web_client_location` From 1aa2231e271f1b0b08757fc6f94c7c69c2993b25 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 8 Feb 2022 11:55:46 +0000 Subject: [PATCH 208/474] Fix wording --- CHANGES.md | 13 +++++++------ docs/upgrade.md | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9bccf3f5ec2a..958024ff0c84 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,12 +3,13 @@ Synapse 1.52.0 (2022-02-08) No significant changes since 1.52.0rc1. -During the making of this release, the developers of Twisted have released -[Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0), which -fixes [a security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) -within Twisted. We do not believe Synapse to be vulnerable to any security problem caused -by this issue, though we advise server administrators to update their local version of -Twisted if they can. +Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0) +has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) +within the Twisted library. We do not believe Synapse is affected by this vulnerability, +though we advise server administrators who installed Synapse via pip to upgrade Twisted +with `pip install --upgrade Twisted` as a matter of good practice. The Docker image +`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the +updated library. Synapse 1.52.0rc1 (2022-02-01) diff --git a/docs/upgrade.md b/docs/upgrade.md index 913f97385d48..0105f87f90b7 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,12 +89,13 @@ process, for example: ## Twisted security release -During the making of this release, the developers of Twisted have released -[Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0), which -fixes [a security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) -within Twisted. We do not believe Synapse to be vulnerable to any security problem caused -by this issue, though we advise server administrators to update their local version of -Twisted if they can. +Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twisted-22.1.0) +has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) +within the Twisted library. We do not believe Synapse is affected by this vulnerability, +though we advise server administrators who installed Synapse via pip to upgrade Twisted +with `pip install --upgrade Twisted` as a matter of good practice. The Docker image +`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the +updated library. # Upgrading to v1.51.0 From 380c3d40f4e48c9cb871b7fe3488c42e1cad3883 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Feb 2022 07:06:25 -0500 Subject: [PATCH 209/474] Return JSON errors for unknown resources under /matrix/client. (#11930) Re-applies the changes from 3e0cfd447e17658a937fe62555db9e968f00b15b (#11602), reverting d93ec0a0ba5f6d2fbf2bc321086d4ad4c03136e0 (#11764) now that the conflict with the webclient listener was fixed in 119edf51eb3e4f5ed5139dc370f5d7aed46edc1c (#11895). --- changelog.d/11930.bugfix | 1 + synapse/app/homeserver.py | 9 ++------- 2 files changed, 3 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11930.bugfix diff --git a/changelog.d/11930.bugfix b/changelog.d/11930.bugfix new file mode 100644 index 000000000000..e0dfbf1a1520 --- /dev/null +++ b/changelog.d/11930.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 24d55b049488..66e1a213319c 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -26,6 +26,7 @@ import synapse.config.logger from synapse import events from synapse.api.urls import ( + CLIENT_API_PREFIX, FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_R0_PREFIX, @@ -195,13 +196,7 @@ def _configure_named_resource( resources.update( { - "/_matrix/client/api/v1": client_resource, - "/_matrix/client/r0": client_resource, - "/_matrix/client/v1": client_resource, - "/_matrix/client/v3": client_resource, - "/_matrix/client/unstable": client_resource, - "/_matrix/client/v2_alpha": client_resource, - "/_matrix/client/versions": client_resource, + CLIENT_API_PREFIX: client_resource, "/.well-known": well_known_resource(self), "/_synapse/admin": AdminRestResource(self), **build_synapse_client_resource_tree(self), From 8b309adb436c162510ed1402f33b8741d71fc058 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Feb 2022 07:43:30 -0500 Subject: [PATCH 210/474] Fetch edits for multiple events in a single query. (#11660) This should reduce database usage when fetching bundled aggregations as the number of individual queries (and round trips to the database) are reduced. --- changelog.d/11660.misc | 1 + synapse/storage/databases/main/events.py | 4 +- synapse/storage/databases/main/relations.py | 150 +++++++++++++------- 3 files changed, 101 insertions(+), 54 deletions(-) create mode 100644 changelog.d/11660.misc diff --git a/changelog.d/11660.misc b/changelog.d/11660.misc new file mode 100644 index 000000000000..47e085e4d931 --- /dev/null +++ b/changelog.d/11660.misc @@ -0,0 +1 @@ +Improve performance when fetching bundled aggregations for multiple events. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b804185c404e..2e44c7771591 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1801,9 +1801,7 @@ def _handle_event_relations( ) if rel_type == RelationTypes.REPLACE: - txn.call_after( - self.store.get_applicable_edit.invalidate, (parent_id, event.room_id) - ) + txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) if rel_type == RelationTypes.THREAD: txn.call_after( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 37468a518381..6180b1729605 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -13,12 +13,22 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, + cast, +) import attr from frozendict import frozendict -from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.constants import RelationTypes from synapse.events import EventBase from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -28,13 +38,14 @@ make_in_list_sql_clause, ) from synapse.storage.databases.main.stream import generate_pagination_where_clause +from synapse.storage.engines import PostgresEngine from synapse.storage.relations import ( AggregationPaginationToken, PaginationChunk, RelationPaginationToken, ) from synapse.types import JsonDict -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: from synapse.server import HomeServer @@ -340,20 +351,24 @@ def _get_aggregation_groups_for_event_txn( ) @cached() - async def get_applicable_edit( - self, event_id: str, room_id: str - ) -> Optional[EventBase]: + def get_applicable_edit(self, event_id: str) -> Optional[EventBase]: + raise NotImplementedError() + + @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") + async def _get_applicable_edits( + self, event_ids: Collection[str] + ) -> Dict[str, Optional[EventBase]]: """Get the most recent edit (if any) that has happened for the given - event. + events. Correctly handles checking whether edits were allowed to happen. Args: - event_id: The original event ID - room_id: The original event's room ID + event_ids: The original event IDs Returns: - The most recent edit, if any. + A map of the most recent edit for each event. If there are no edits, + the event will map to None. """ # We only allow edits for `m.room.message` events that have the same sender @@ -362,37 +377,67 @@ async def get_applicable_edit( # Fetches latest edit that has the same type and sender as the # original, and is an `m.room.message`. - sql = """ - SELECT edit.event_id FROM events AS edit - INNER JOIN event_relations USING (event_id) - INNER JOIN events AS original ON - original.event_id = relates_to_id - AND edit.type = original.type - AND edit.sender = original.sender - WHERE - relates_to_id = ? - AND relation_type = ? - AND edit.room_id = ? - AND edit.type = 'm.room.message' - ORDER by edit.origin_server_ts DESC, edit.event_id DESC - LIMIT 1 - """ + if isinstance(self.database_engine, PostgresEngine): + # The `DISTINCT ON` clause will pick the *first* row it encounters, + # so ordering by origin server ts + event ID desc will ensure we get + # the latest edit. + sql = """ + SELECT DISTINCT ON (original.event_id) original.event_id, edit.event_id FROM events AS edit + INNER JOIN event_relations USING (event_id) + INNER JOIN events AS original ON + original.event_id = relates_to_id + AND edit.type = original.type + AND edit.sender = original.sender + AND edit.room_id = original.room_id + WHERE + %s + AND relation_type = ? + AND edit.type = 'm.room.message' + ORDER by original.event_id DESC, edit.origin_server_ts DESC, edit.event_id DESC + """ + else: + # SQLite uses a simplified query which returns all edits for an + # original event. The results are then de-duplicated when turned into + # a dict. Due to the chosen ordering, the latest edit stomps on + # earlier edits. + sql = """ + SELECT original.event_id, edit.event_id FROM events AS edit + INNER JOIN event_relations USING (event_id) + INNER JOIN events AS original ON + original.event_id = relates_to_id + AND edit.type = original.type + AND edit.sender = original.sender + AND edit.room_id = original.room_id + WHERE + %s + AND relation_type = ? + AND edit.type = 'm.room.message' + ORDER by edit.origin_server_ts, edit.event_id + """ - def _get_applicable_edit_txn(txn: LoggingTransaction) -> Optional[str]: - txn.execute(sql, (event_id, RelationTypes.REPLACE, room_id)) - row = txn.fetchone() - if row: - return row[0] - return None + def _get_applicable_edits_txn(txn: LoggingTransaction) -> Dict[str, str]: + clause, args = make_in_list_sql_clause( + txn.database_engine, "relates_to_id", event_ids + ) + args.append(RelationTypes.REPLACE) - edit_id = await self.db_pool.runInteraction( - "get_applicable_edit", _get_applicable_edit_txn + txn.execute(sql % (clause,), args) + return dict(cast(Iterable[Tuple[str, str]], txn.fetchall())) + + edit_ids = await self.db_pool.runInteraction( + "get_applicable_edits", _get_applicable_edits_txn ) - if not edit_id: - return None + edits = await self.get_events(edit_ids.values()) # type: ignore[attr-defined] - return await self.get_event(edit_id, allow_none=True) # type: ignore[attr-defined] + # Map to the original event IDs to the edit events. + # + # There might not be an edit event due to there being no edits or + # due to the event not being known, either case is treated the same. + return { + original_event_id: edits.get(edit_ids.get(original_event_id)) + for original_event_id in event_ids + } @cached() async def get_thread_summary( @@ -612,9 +657,6 @@ async def _get_bundled_aggregation_for_event( The bundled aggregations for an event, if bundled aggregations are enabled and the event can have bundled aggregations. """ - # State events and redacted events do not get bundled aggregations. - if event.is_state() or event.internal_metadata.is_redacted(): - return None # Do not bundle aggregations for an event which represents an edit or an # annotation. It does not make sense for them to have related events. @@ -642,13 +684,6 @@ async def _get_bundled_aggregation_for_event( if references.chunk: aggregations.references = references.to_dict() - edit = None - if event.type == EventTypes.Message: - edit = await self.get_applicable_edit(event_id, room_id) - - if edit: - aggregations.replace = edit - # If this event is the start of a thread, include a summary of the replies. if self._msc3440_enabled: thread_count, latest_thread_event = await self.get_thread_summary( @@ -668,9 +703,7 @@ async def _get_bundled_aggregation_for_event( return aggregations async def get_bundled_aggregations( - self, - events: Iterable[EventBase], - user_id: str, + self, events: Iterable[EventBase], user_id: str ) -> Dict[str, BundledAggregations]: """Generate bundled aggregations for events. @@ -683,13 +716,28 @@ async def get_bundled_aggregations( events may have bundled aggregations in the results. """ - # TODO Parallelize. - results = {} + # State events and redacted events do not get bundled aggregations. + events = [ + event + for event in events + if not event.is_state() and not event.internal_metadata.is_redacted() + ] + + # event ID -> bundled aggregation in non-serialized form. + results: Dict[str, BundledAggregations] = {} + + # Fetch other relations per event. for event in events: event_result = await self._get_bundled_aggregation_for_event(event, user_id) if event_result: results[event.event_id] = event_result + # Fetch any edits. + event_ids = [event.event_id for event in events] + edits = await self._get_applicable_edits(event_ids) + for event_id, edit in edits.items(): + results.setdefault(event_id, BundledAggregations()).replace = edit + return results From 63d90f10eccf804a6ca5bfa51cdb1b0d0233fe95 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Feb 2022 07:44:39 -0500 Subject: [PATCH 211/474] Add missing type hints to synapse.replication.http. (#11856) --- changelog.d/11856.misc | 1 + synapse/replication/http/__init__.py | 2 +- synapse/replication/http/_base.py | 31 ++++++---- synapse/replication/http/account_data.py | 38 +++++++++--- synapse/replication/http/devices.py | 14 +++-- synapse/replication/http/federation.py | 65 ++++++++++++------- synapse/replication/http/login.py | 47 +++++++------- synapse/replication/http/membership.py | 31 +++++----- synapse/replication/http/presence.py | 38 +++++++----- synapse/replication/http/push.py | 14 +++-- synapse/replication/http/register.py | 79 +++++++++++++----------- synapse/replication/http/send_event.py | 44 ++++++++----- synapse/replication/http/streams.py | 16 +++-- 13 files changed, 258 insertions(+), 162 deletions(-) create mode 100644 changelog.d/11856.misc diff --git a/changelog.d/11856.misc b/changelog.d/11856.misc new file mode 100644 index 000000000000..1d3a0030f77f --- /dev/null +++ b/changelog.d/11856.misc @@ -0,0 +1 @@ +Add missing type hints to replication code. diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index 1457d9d59b1f..aec040ee19f6 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -40,7 +40,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs, canonical_json=False, extract_context=True) self.register_servlets(hs) - def register_servlets(self, hs: "HomeServer"): + def register_servlets(self, hs: "HomeServer") -> None: send_event.register_servlets(hs, self) federation.register_servlets(hs, self) presence.register_servlets(hs, self) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 585332b244a4..bc1d28dd19b1 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -15,16 +15,20 @@ import abc import logging import re -import urllib +import urllib.parse from inspect import signature from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Tuple from prometheus_client import Counter, Gauge +from twisted.web.server import Request + from synapse.api.errors import HttpResponseException, SynapseError from synapse.http import RequestTimedOutError +from synapse.http.server import HttpServer from synapse.logging import opentracing from synapse.logging.opentracing import trace +from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string @@ -113,10 +117,12 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.worker_replication_secret: self._replication_secret = hs.config.worker.worker_replication_secret - def _check_auth(self, request) -> None: + def _check_auth(self, request: Request) -> None: # Get the authorization header. auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") + if not auth_headers: + raise RuntimeError("Missing Authorization header.") if len(auth_headers) > 1: raise RuntimeError("Too many Authorization headers.") parts = auth_headers[0].split(b" ") @@ -129,7 +135,7 @@ def _check_auth(self, request) -> None: raise RuntimeError("Invalid Authorization header.") @abc.abstractmethod - async def _serialize_payload(**kwargs): + async def _serialize_payload(**kwargs) -> JsonDict: """Static method that is called when creating a request. Concrete implementations should have explicit parameters (rather than @@ -144,19 +150,20 @@ async def _serialize_payload(**kwargs): return {} @abc.abstractmethod - async def _handle_request(self, request, **kwargs): + async def _handle_request( + self, request: Request, **kwargs: Any + ) -> Tuple[int, JsonDict]: """Handle incoming request. This is called with the request object and PATH_ARGS. Returns: - tuple[int, dict]: HTTP status code and a JSON serialisable dict - to be used as response body of request. + HTTP status code and a JSON serialisable dict to be used as response + body of request. """ - pass @classmethod - def make_client(cls, hs: "HomeServer"): + def make_client(cls, hs: "HomeServer") -> Callable: """Create a client that makes requests. Returns a callable that accepts the same parameters as @@ -182,7 +189,7 @@ def make_client(cls, hs: "HomeServer"): ) @trace(opname="outgoing_replication_request") - async def send_request(*, instance_name="master", **kwargs): + async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") @@ -268,7 +275,7 @@ async def send_request(*, instance_name="master", **kwargs): return send_request - def register(self, http_server): + def register(self, http_server: HttpServer) -> None: """Called by the server to register this as a handler to the appropriate path. """ @@ -289,7 +296,9 @@ def register(self, http_server): self.__class__.__name__, ) - async def _check_auth_and_handle(self, request, **kwargs): + async def _check_auth_and_handle( + self, request: Request, **kwargs: Any + ) -> Tuple[int, JsonDict]: """Called on new incoming requests when caching is enabled. Checks if there is a cached response for the request and returns that, otherwise calls `_handle_request` and caches its response. diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index 5f0f225aa953..310f60915324 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -13,10 +13,14 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Tuple +from twisted.web.server import Request + +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -48,14 +52,18 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(user_id, account_data_type, content): + async def _serialize_payload( # type: ignore[override] + user_id: str, account_data_type: str, content: JsonDict + ) -> JsonDict: payload = { "content": content, } return payload - async def _handle_request(self, request, user_id, account_data_type): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str, account_data_type: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) max_stream_id = await self.handler.add_account_data_for_user( @@ -89,14 +97,18 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(user_id, room_id, account_data_type, content): + async def _serialize_payload( # type: ignore[override] + user_id: str, room_id: str, account_data_type: str, content: JsonDict + ) -> JsonDict: payload = { "content": content, } return payload - async def _handle_request(self, request, user_id, room_id, account_data_type): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str, room_id: str, account_data_type: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) max_stream_id = await self.handler.add_account_data_to_room( @@ -130,14 +142,18 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(user_id, room_id, tag, content): + async def _serialize_payload( # type: ignore[override] + user_id: str, room_id: str, tag: str, content: JsonDict + ) -> JsonDict: payload = { "content": content, } return payload - async def _handle_request(self, request, user_id, room_id, tag): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str, room_id: str, tag: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) max_stream_id = await self.handler.add_tag_to_room( @@ -173,11 +189,13 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(user_id, room_id, tag): + async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: # type: ignore[override] return {} - async def _handle_request(self, request, user_id, room_id, tag): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str, room_id: str, tag: str + ) -> Tuple[int, JsonDict]: max_stream_id = await self.handler.remove_tag_from_room( user_id, room_id, @@ -187,7 +205,7 @@ async def _handle_request(self, request, user_id, room_id, tag): return 200, {"max_stream_id": max_stream_id} -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationUserAccountDataRestServlet(hs).register(http_server) ReplicationRoomAccountDataRestServlet(hs).register(http_server) ReplicationAddTagRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 42dffb39cbef..f2f40129fe68 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -13,9 +13,13 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Tuple +from twisted.web.server import Request + +from synapse.http.server import HttpServer from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -63,14 +67,16 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload(user_id): + async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override] return {} - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: user_devices = await self.device_list_updater.user_device_resync(user_id) return 200, user_devices -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationUserDevicesResyncRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 5ed535c90dea..d529c8a19fa2 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -13,17 +13,22 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List, Tuple -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import make_event_from_dict +from twisted.web.server import Request + +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion +from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict from synapse.util.metrics import Measure if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -69,14 +74,18 @@ def __init__(self, hs: "HomeServer"): self.federation_event_handler = hs.get_federation_event_handler() @staticmethod - async def _serialize_payload(store, room_id, event_and_contexts, backfilled): + async def _serialize_payload( # type: ignore[override] + store: "DataStore", + room_id: str, + event_and_contexts: List[Tuple[EventBase, EventContext]], + backfilled: bool, + ) -> JsonDict: """ Args: store - room_id (str) - event_and_contexts (list[tuple[FrozenEvent, EventContext]]) - backfilled (bool): Whether or not the events are the result of - backfilling + room_id + event_and_contexts + backfilled: Whether or not the events are the result of backfilling """ event_payloads = [] for event, context in event_and_contexts: @@ -102,7 +111,7 @@ async def _serialize_payload(store, room_id, event_and_contexts, backfilled): return payload - async def _handle_request(self, request): + async def _handle_request(self, request: Request) -> Tuple[int, JsonDict]: # type: ignore[override] with Measure(self.clock, "repl_fed_send_events_parse"): content = parse_json_object_from_request(request) @@ -163,10 +172,14 @@ def __init__(self, hs: "HomeServer"): self.registry = hs.get_federation_registry() @staticmethod - async def _serialize_payload(edu_type, origin, content): + async def _serialize_payload( # type: ignore[override] + edu_type: str, origin: str, content: JsonDict + ) -> JsonDict: return {"origin": origin, "content": content} - async def _handle_request(self, request, edu_type): + async def _handle_request( # type: ignore[override] + self, request: Request, edu_type: str + ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_fed_send_edu_parse"): content = parse_json_object_from_request(request) @@ -175,9 +188,9 @@ async def _handle_request(self, request, edu_type): logger.info("Got %r edu from %s", edu_type, origin) - result = await self.registry.on_edu(edu_type, origin, edu_content) + await self.registry.on_edu(edu_type, origin, edu_content) - return 200, result + return 200, {} class ReplicationGetQueryRestServlet(ReplicationEndpoint): @@ -206,15 +219,17 @@ def __init__(self, hs: "HomeServer"): self.registry = hs.get_federation_registry() @staticmethod - async def _serialize_payload(query_type, args): + async def _serialize_payload(query_type: str, args: JsonDict) -> JsonDict: # type: ignore[override] """ Args: - query_type (str) - args (dict): The arguments received for the given query type + query_type + args: The arguments received for the given query type """ return {"args": args} - async def _handle_request(self, request, query_type): + async def _handle_request( # type: ignore[override] + self, request: Request, query_type: str + ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_fed_query_parse"): content = parse_json_object_from_request(request) @@ -248,14 +263,16 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() @staticmethod - async def _serialize_payload(room_id, args): + async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override] """ Args: - room_id (str) + room_id """ return {} - async def _handle_request(self, request, room_id): + async def _handle_request( # type: ignore[override] + self, request: Request, room_id: str + ) -> Tuple[int, JsonDict]: await self.store.clean_room_for_join(room_id) return 200, {} @@ -283,17 +300,19 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() @staticmethod - async def _serialize_payload(room_id, room_version): + async def _serialize_payload(room_id: str, room_version: RoomVersion) -> JsonDict: # type: ignore[override] return {"room_version": room_version.identifier} - async def _handle_request(self, request, room_id): + async def _handle_request( # type: ignore[override] + self, request: Request, room_id: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) room_version = KNOWN_ROOM_VERSIONS[content["room_version"]] await self.store.maybe_store_room_on_outlier_membership(room_id, room_version) return 200, {} -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationFederationSendEventsRestServlet(hs).register(http_server) ReplicationFederationSendEduRestServlet(hs).register(http_server) ReplicationGetQueryRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index daacc34ceac4..c68e18da129b 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -13,10 +13,14 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Tuple, cast +from twisted.web.server import Request + +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -39,25 +43,24 @@ def __init__(self, hs: "HomeServer"): self.registration_handler = hs.get_registration_handler() @staticmethod - async def _serialize_payload( - user_id, - device_id, - initial_display_name, - is_guest, - is_appservice_ghost, - should_issue_refresh_token, - auth_provider_id, - auth_provider_session_id, - ): + async def _serialize_payload( # type: ignore[override] + user_id: str, + device_id: Optional[str], + initial_display_name: Optional[str], + is_guest: bool, + is_appservice_ghost: bool, + should_issue_refresh_token: bool, + auth_provider_id: Optional[str], + auth_provider_session_id: Optional[str], + ) -> JsonDict: """ Args: - user_id (int) - device_id (str|None): Device ID to use, if None a new one is - generated. - initial_display_name (str|None) - is_guest (bool) - is_appservice_ghost (bool) - should_issue_refresh_token (bool) + user_id + device_id: Device ID to use, if None a new one is generated. + initial_display_name + is_guest + is_appservice_ghost + should_issue_refresh_token """ return { "device_id": device_id, @@ -69,7 +72,9 @@ async def _serialize_payload( "auth_provider_session_id": auth_provider_session_id, } - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) device_id = content["device_id"] @@ -91,8 +96,8 @@ async def _handle_request(self, request, user_id): auth_provider_session_id=auth_provider_session_id, ) - return 200, res + return 200, cast(JsonDict, res) -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: RegisterDeviceReplicationServlet(hs).register(http_server) diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 7371c240b274..0145858e4719 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -16,6 +16,7 @@ from twisted.web.server import Request +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.replication.http._base import ReplicationEndpoint @@ -53,7 +54,7 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload( # type: ignore + async def _serialize_payload( # type: ignore[override] requester: Requester, room_id: str, user_id: str, @@ -77,7 +78,7 @@ async def _serialize_payload( # type: ignore "content": content, } - async def _handle_request( # type: ignore + async def _handle_request( # type: ignore[override] self, request: SynapseRequest, room_id: str, user_id: str ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) @@ -122,13 +123,13 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload( # type: ignore + async def _serialize_payload( # type: ignore[override] requester: Requester, room_id: str, user_id: str, remote_room_hosts: List[str], content: JsonDict, - ): + ) -> JsonDict: """ Args: requester: The user making the request, according to the access token. @@ -143,12 +144,12 @@ async def _serialize_payload( # type: ignore "content": content, } - async def _handle_request( # type: ignore + async def _handle_request( # type: ignore[override] self, request: SynapseRequest, room_id: str, user_id: str, - ): + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) remote_room_hosts = content["remote_room_hosts"] @@ -192,7 +193,7 @@ def __init__(self, hs: "HomeServer"): self.member_handler = hs.get_room_member_handler() @staticmethod - async def _serialize_payload( # type: ignore + async def _serialize_payload( # type: ignore[override] invite_event_id: str, txn_id: Optional[str], requester: Requester, @@ -215,7 +216,7 @@ async def _serialize_payload( # type: ignore "content": content, } - async def _handle_request( # type: ignore + async def _handle_request( # type: ignore[override] self, request: SynapseRequest, invite_event_id: str ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) @@ -262,12 +263,12 @@ def __init__(self, hs: "HomeServer"): self.member_handler = hs.get_room_member_handler() @staticmethod - async def _serialize_payload( # type: ignore + async def _serialize_payload( # type: ignore[override] knock_event_id: str, txn_id: Optional[str], requester: Requester, content: JsonDict, - ): + ) -> JsonDict: """ Args: knock_event_id: The ID of the knock to be rescinded. @@ -281,11 +282,11 @@ async def _serialize_payload( # type: ignore "content": content, } - async def _handle_request( # type: ignore + async def _handle_request( # type: ignore[override] self, request: SynapseRequest, knock_event_id: str, - ): + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) txn_id = content["txn_id"] @@ -329,7 +330,7 @@ def __init__(self, hs: "HomeServer"): self.distributor = hs.get_distributor() @staticmethod - async def _serialize_payload( # type: ignore + async def _serialize_payload( # type: ignore[override] room_id: str, user_id: str, change: str ) -> JsonDict: """ @@ -345,7 +346,7 @@ async def _serialize_payload( # type: ignore return {} - async def _handle_request( # type: ignore + async def _handle_request( # type: ignore[override] self, request: Request, room_id: str, user_id: str, change: str ) -> Tuple[int, JsonDict]: logger.info("user membership change: %s in %s", user_id, room_id) @@ -360,7 +361,7 @@ async def _handle_request( # type: ignore return 200, {} -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationRemoteJoinRestServlet(hs).register(http_server) ReplicationRemoteRejectInviteRestServlet(hs).register(http_server) ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index 63143085d521..4a5b08f56f73 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -13,11 +13,14 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Tuple +from twisted.web.server import Request + +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint -from synapse.types import UserID +from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -49,18 +52,17 @@ def __init__(self, hs: "HomeServer"): self._presence_handler = hs.get_presence_handler() @staticmethod - async def _serialize_payload(user_id): + async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override] return {} - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: await self._presence_handler.bump_presence_active_time( UserID.from_string(user_id) ) - return ( - 200, - {}, - ) + return (200, {}) class ReplicationPresenceSetState(ReplicationEndpoint): @@ -92,16 +94,21 @@ def __init__(self, hs: "HomeServer"): self._presence_handler = hs.get_presence_handler() @staticmethod - async def _serialize_payload( - user_id, state, ignore_status_msg=False, force_notify=False - ): + async def _serialize_payload( # type: ignore[override] + user_id: str, + state: JsonDict, + ignore_status_msg: bool = False, + force_notify: bool = False, + ) -> JsonDict: return { "state": state, "ignore_status_msg": ignore_status_msg, "force_notify": force_notify, } - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) await self._presence_handler.set_state( @@ -111,12 +118,9 @@ async def _handle_request(self, request, user_id): content["force_notify"], ) - return ( - 200, - {}, - ) + return (200, {}) -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationBumpPresenceActiveTime(hs).register(http_server) ReplicationPresenceSetState(hs).register(http_server) diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index 6c8db3061ee2..af5c2f66a735 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -13,10 +13,14 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Tuple +from twisted.web.server import Request + +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -48,7 +52,7 @@ def __init__(self, hs: "HomeServer"): self.pusher_pool = hs.get_pusherpool() @staticmethod - async def _serialize_payload(app_id, pushkey, user_id): + async def _serialize_payload(app_id: str, pushkey: str, user_id: str) -> JsonDict: # type: ignore[override] payload = { "app_id": app_id, "pushkey": pushkey, @@ -56,7 +60,9 @@ async def _serialize_payload(app_id, pushkey, user_id): return payload - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) app_id = content["app_id"] @@ -67,5 +73,5 @@ async def _handle_request(self, request, user_id): return 200, {} -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationRemovePusherRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 7adfbb666f39..c7f751b70d0d 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -13,10 +13,14 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Tuple +from twisted.web.server import Request + +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -36,34 +40,34 @@ def __init__(self, hs: "HomeServer"): self.registration_handler = hs.get_registration_handler() @staticmethod - async def _serialize_payload( - user_id, - password_hash, - was_guest, - make_guest, - appservice_id, - create_profile_with_displayname, - admin, - user_type, - address, - shadow_banned, - ): + async def _serialize_payload( # type: ignore[override] + user_id: str, + password_hash: Optional[str], + was_guest: bool, + make_guest: bool, + appservice_id: Optional[str], + create_profile_with_displayname: Optional[str], + admin: bool, + user_type: Optional[str], + address: Optional[str], + shadow_banned: bool, + ) -> JsonDict: """ Args: - user_id (str): The desired user ID to register. - password_hash (str|None): Optional. The password hash for this user. - was_guest (bool): Optional. Whether this is a guest account being - upgraded to a non-guest account. - make_guest (boolean): True if the the new user should be guest, - false to add a regular user account. - appservice_id (str|None): The ID of the appservice registering the user. - create_profile_with_displayname (unicode|None): Optionally create a - profile for the user, setting their displayname to the given value - admin (boolean): is an admin user? - user_type (str|None): type of user. One of the values from - api.constants.UserTypes, or None for a normal user. - address (str|None): the IP address used to perform the regitration. - shadow_banned (bool): Whether to shadow-ban the user + user_id: The desired user ID to register. + password_hash: Optional. The password hash for this user. + was_guest: Optional. Whether this is a guest account being upgraded + to a non-guest account. + make_guest: True if the the new user should be guest, false to add a + regular user account. + appservice_id: The ID of the appservice registering the user. + create_profile_with_displayname: Optionally create a profile for the + user, setting their displayname to the given value + admin: is an admin user? + user_type: type of user. One of the values from api.constants.UserTypes, + or None for a normal user. + address: the IP address used to perform the regitration. + shadow_banned: Whether to shadow-ban the user """ return { "password_hash": password_hash, @@ -77,7 +81,9 @@ async def _serialize_payload( "shadow_banned": shadow_banned, } - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) await self.registration_handler.check_registration_ratelimit(content["address"]) @@ -110,18 +116,21 @@ def __init__(self, hs: "HomeServer"): self.registration_handler = hs.get_registration_handler() @staticmethod - async def _serialize_payload(user_id, auth_result, access_token): + async def _serialize_payload( # type: ignore[override] + user_id: str, auth_result: JsonDict, access_token: Optional[str] + ) -> JsonDict: """ Args: - user_id (str): The user ID that consented - auth_result (dict): The authenticated credentials of the newly - registered user. - access_token (str|None): The access token of the newly logged in + user_id: The user ID that consented + auth_result: The authenticated credentials of the newly registered user. + access_token: The access token of the newly logged in device, or None if `inhibit_login` enabled. """ return {"auth_result": auth_result, "access_token": access_token} - async def _handle_request(self, request, user_id): + async def _handle_request( # type: ignore[override] + self, request: Request, user_id: str + ) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) auth_result = content["auth_result"] @@ -134,6 +143,6 @@ async def _handle_request(self, request, user_id): return 200, {} -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationRegisterServlet(hs).register(http_server) ReplicationPostRegisterActionsServlet(hs).register(http_server) diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 9f6851d0592e..33e98daf8aba 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -13,18 +13,22 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List, Tuple + +from twisted.web.server import Request from synapse.api.room_versions import KNOWN_ROOM_VERSIONS -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext +from synapse.http.server import HttpServer from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint -from synapse.types import Requester, UserID +from synapse.types import JsonDict, Requester, UserID from synapse.util.metrics import Measure if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -70,18 +74,24 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @staticmethod - async def _serialize_payload( - event_id, store, event, context, requester, ratelimit, extra_users - ): + async def _serialize_payload( # type: ignore[override] + event_id: str, + store: "DataStore", + event: EventBase, + context: EventContext, + requester: Requester, + ratelimit: bool, + extra_users: List[UserID], + ) -> JsonDict: """ Args: - event_id (str) - store (DataStore) - requester (Requester) - event (FrozenEvent) - context (EventContext) - ratelimit (bool) - extra_users (list(UserID)): Any extra users to notify about event + event_id + store + requester + event + context + ratelimit + extra_users: Any extra users to notify about event """ serialized_context = await context.serialize(event, store) @@ -100,7 +110,9 @@ async def _serialize_payload( return payload - async def _handle_request(self, request, event_id): + async def _handle_request( # type: ignore[override] + self, request: Request, event_id: str + ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_send_event_parse"): content = parse_json_object_from_request(request) @@ -120,8 +132,6 @@ async def _handle_request(self, request, event_id): ratelimit = content["ratelimit"] extra_users = [UserID.from_string(u) for u in content["extra_users"]] - request.requester = requester - logger.info( "Got event to send with ID: %s into room: %s", event.event_id, event.room_id ) @@ -139,5 +149,5 @@ async def _handle_request(self, request, event_id): ) -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationSendEventRestServlet(hs).register(http_server) diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py index 3223bc2432b2..c06522536254 100644 --- a/synapse/replication/http/streams.py +++ b/synapse/replication/http/streams.py @@ -13,11 +13,15 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Tuple + +from twisted.web.server import Request from synapse.api.errors import SynapseError +from synapse.http.server import HttpServer from synapse.http.servlet import parse_integer from synapse.replication.http._base import ReplicationEndpoint +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -57,10 +61,14 @@ def __init__(self, hs: "HomeServer"): self.streams = hs.get_replication_streams() @staticmethod - async def _serialize_payload(stream_name, from_token, upto_token): + async def _serialize_payload( # type: ignore[override] + stream_name: str, from_token: int, upto_token: int + ) -> JsonDict: return {"from_token": from_token, "upto_token": upto_token} - async def _handle_request(self, request, stream_name): + async def _handle_request( # type: ignore[override] + self, request: Request, stream_name: str + ) -> Tuple[int, JsonDict]: stream = self.streams.get(stream_name) if stream is None: raise SynapseError(400, "Unknown stream") @@ -78,5 +86,5 @@ async def _handle_request(self, request, stream_name): ) -def register_servlets(hs: "HomeServer", http_server): +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationGetStreamUpdates(hs).register(http_server) From 6c0984e3f007de469af74d8b6a432c8704633b03 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Feb 2022 09:15:59 -0500 Subject: [PATCH 212/474] Remove unnecessary ignores due to Twisted upgrade. (#11939) Twisted 22.1.0 fixed some internal type hints, allowing Synapse to remove ignore calls for parameters to connectTCP. --- changelog.d/11939.misc | 1 + synapse/handlers/send_email.py | 2 +- synapse/replication/tcp/handler.py | 4 ++-- synapse/replication/tcp/redis.py | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/11939.misc diff --git a/changelog.d/11939.misc b/changelog.d/11939.misc new file mode 100644 index 000000000000..317526f9efb4 --- /dev/null +++ b/changelog.d/11939.misc @@ -0,0 +1 @@ +Remove an unnecessary ignoring of type hints due to fixes in upstream packages. diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index 1a062a784cd4..a305a66860a5 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -106,7 +106,7 @@ def build_sender_factory(**kwargs: Any) -> ESMTPSenderFactory: factory = build_sender_factory(hostname=smtphost if enable_tls else None) reactor.connectTCP( - smtphost, # type: ignore[arg-type] + smtphost, smtpport, factory, timeout=30, diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 21293038ef84..f7e6bc1e6282 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -318,7 +318,7 @@ def start_replication(self, hs: "HomeServer"): hs, outbound_redis_connection ) hs.get_reactor().connectTCP( - hs.config.redis.redis_host, # type: ignore[arg-type] + hs.config.redis.redis_host, hs.config.redis.redis_port, self._factory, timeout=30, @@ -330,7 +330,7 @@ def start_replication(self, hs: "HomeServer"): host = hs.config.worker.worker_replication_host port = hs.config.worker.worker_replication_port hs.get_reactor().connectTCP( - host, # type: ignore[arg-type] + host, port, self._factory, timeout=30, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 8d28bd3f3fcc..5b37f379d040 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -373,7 +373,7 @@ def lazyConnection( reactor = hs.get_reactor() reactor.connectTCP( - host, # type: ignore[arg-type] + host, port, factory, timeout=30, From 8c94b3abe93fe8c3e2ddd29fa350f54f69714151 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Feb 2022 09:21:20 -0500 Subject: [PATCH 213/474] Experimental support to include bundled aggregations in search results (MSC3666) (#11837) --- changelog.d/11837.feature | 1 + synapse/config/experimental.py | 2 ++ synapse/handlers/search.py | 29 ++++++++++++--- synapse/storage/databases/main/relations.py | 13 +++++-- tests/rest/client/test_relations.py | 39 ++++++++++++++++++++- 5 files changed, 76 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11837.feature diff --git a/changelog.d/11837.feature b/changelog.d/11837.feature new file mode 100644 index 000000000000..62ef707123db --- /dev/null +++ b/changelog.d/11837.feature @@ -0,0 +1 @@ +Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index e4719d19b857..f05a803a7104 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -26,6 +26,8 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3440 (thread relation) self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False) + # MSC3666: including bundled relations in /search. + self.msc3666_enabled: bool = experimental.get("msc3666_enabled", False) # MSC3026 (busy presence state) self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 02bb5ae72f51..41cb80907893 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -43,6 +43,8 @@ def __init__(self, hs: "HomeServer"): self.state_store = self.storage.state self.auth = hs.get_auth() + self._msc3666_enabled = hs.config.experimental.msc3666_enabled + async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]: """Retrieves room IDs of old rooms in the history of an upgraded room. @@ -238,8 +240,6 @@ async def search( results = search_result["results"] - results_map = {r["event"].event_id: r for r in results} - rank_map.update({r["event"].event_id: r["rank"] for r in results}) filtered_events = await search_filter.filter([r["event"] for r in results]) @@ -420,12 +420,29 @@ async def search( time_now = self.clock.time_msec() + aggregations = None + if self._msc3666_enabled: + aggregations = await self.store.get_bundled_aggregations( + # Generate an iterable of EventBase for all the events that will be + # returned, including contextual events. + itertools.chain( + # The events_before and events_after for each context. + itertools.chain.from_iterable( + itertools.chain(context["events_before"], context["events_after"]) # type: ignore[arg-type] + for context in contexts.values() + ), + # The returned events. + allowed_events, + ), + user.to_string(), + ) + for context in contexts.values(): context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now # type: ignore[arg-type] + context["events_before"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] ) context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now # type: ignore[arg-type] + context["events_after"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] ) state_results = {} @@ -442,7 +459,9 @@ async def search( results.append( { "rank": rank_map[e.event_id], - "result": self._event_serializer.serialize_event(e, time_now), + "result": self._event_serializer.serialize_event( + e, time_now, bundle_aggregations=aggregations + ), "context": contexts.get(e.event_id, {}), } ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 6180b1729605..7718acbf1cb8 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -715,6 +715,9 @@ async def get_bundled_aggregations( A map of event ID to the bundled aggregation for the event. Not all events may have bundled aggregations in the results. """ + # The already processed event IDs. Tracked separately from the result + # since the result omits events which do not have bundled aggregations. + seen_event_ids = set() # State events and redacted events do not get bundled aggregations. events = [ @@ -728,13 +731,19 @@ async def get_bundled_aggregations( # Fetch other relations per event. for event in events: + # De-duplicate events by ID to handle the same event requested multiple + # times. The caches that _get_bundled_aggregation_for_event use should + # capture this, but best to reduce work. + if event.event_id in seen_event_ids: + continue + seen_event_ids.add(event.event_id) + event_result = await self._get_bundled_aggregation_for_event(event, user_id) if event_result: results[event.event_id] = event_result # Fetch any edits. - event_ids = [event.event_id for event in events] - edits = await self._get_applicable_edits(event_ids) + edits = await self._get_applicable_edits(seen_event_ids) for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 96ae7790bb15..06721e67c9c0 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -453,7 +453,9 @@ def test_aggregation_must_be_annotation(self): ) self.assertEquals(400, channel.code, channel.json_body) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + @unittest.override_config( + {"experimental_features": {"msc3440_enabled": True, "msc3666_enabled": True}} + ) def test_bundled_aggregations(self): """ Test that annotations, references, and threads get correctly bundled. @@ -579,6 +581,23 @@ def assert_bundle(event_json: JsonDict) -> None: self.assertTrue(room_timeline["limited"]) assert_bundle(self._find_event_in_chunk(room_timeline["events"])) + # Request search. + channel = self.make_request( + "POST", + "/search", + # Search term matches the parent message. + content={"search_categories": {"room_events": {"search_term": "Hi"}}}, + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + chunk = [ + result["result"] + for result in channel.json_body["search_categories"]["room_events"][ + "results" + ] + ] + assert_bundle(self._find_event_in_chunk(chunk)) + def test_aggregation_get_event_for_annotation(self): """Test that annotations do not get bundled aggregations included when directly requested. @@ -759,6 +778,7 @@ def test_ignore_invalid_room(self): self.assertEquals(200, channel.code, channel.json_body) self.assertNotIn("m.relations", channel.json_body["unsigned"]) + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) def test_edit(self): """Test that a simple edit works.""" @@ -825,6 +845,23 @@ def assert_bundle(event_json: JsonDict) -> None: self.assertTrue(room_timeline["limited"]) assert_bundle(self._find_event_in_chunk(room_timeline["events"])) + # Request search. + channel = self.make_request( + "POST", + "/search", + # Search term matches the parent message. + content={"search_categories": {"room_events": {"search_term": "Hi"}}}, + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + chunk = [ + result["result"] + for result in channel.json_body["search_categories"]["room_events"][ + "results" + ] + ] + assert_bundle(self._find_event_in_chunk(chunk)) + def test_multi_edit(self): """Test that multiple edits, including attempts by people who shouldn't be allowed, are correctly handled. From d0e78af35e519ff76bd23e786007f3e7130d90f7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Feb 2022 11:03:08 -0500 Subject: [PATCH 214/474] Add missing type hints to synapse.replication. (#11938) --- changelog.d/11938.misc | 1 + mypy.ini | 3 + .../slave/storage/_slaved_id_tracker.py | 2 +- .../replication/slave/storage/client_ips.py | 4 +- synapse/replication/slave/storage/devices.py | 10 ++- synapse/replication/slave/storage/groups.py | 8 +- .../replication/slave/storage/push_rule.py | 7 +- synapse/replication/slave/storage/pushers.py | 6 +- synapse/replication/tcp/client.py | 45 ++++++----- synapse/replication/tcp/commands.py | 74 ++++++++++++------- synapse/replication/tcp/handler.py | 36 ++++----- synapse/replication/tcp/protocol.py | 68 ++++++++--------- synapse/replication/tcp/redis.py | 32 ++++---- synapse/replication/tcp/resource.py | 16 ++-- synapse/replication/tcp/streams/_base.py | 6 +- synapse/replication/tcp/streams/events.py | 23 ++++-- synapse/util/stringutils.py | 5 +- tests/replication/_base.py | 7 +- .../replication/tcp/test_remote_server_up.py | 3 +- 19 files changed, 209 insertions(+), 147 deletions(-) create mode 100644 changelog.d/11938.misc diff --git a/changelog.d/11938.misc b/changelog.d/11938.misc new file mode 100644 index 000000000000..1d3a0030f77f --- /dev/null +++ b/changelog.d/11938.misc @@ -0,0 +1 @@ +Add missing type hints to replication code. diff --git a/mypy.ini b/mypy.ini index 2884078d0ad3..cd28ac0dd2fe 100644 --- a/mypy.ini +++ b/mypy.ini @@ -169,6 +169,9 @@ disallow_untyped_defs = True [mypy-synapse.push.*] disallow_untyped_defs = True +[mypy-synapse.replication.*] +disallow_untyped_defs = True + [mypy-synapse.rest.*] disallow_untyped_defs = True diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py index fa132d10b414..8f3f953ed474 100644 --- a/synapse/replication/slave/storage/_slaved_id_tracker.py +++ b/synapse/replication/slave/storage/_slaved_id_tracker.py @@ -40,7 +40,7 @@ def __init__( for table, column in extra_tables: self.advance(None, _load_current_id(db_conn, table, column)) - def advance(self, instance_name: Optional[str], new_id: int): + def advance(self, instance_name: Optional[str], new_id: int) -> None: self._current = (max if self.step > 0 else min)(self._current, new_id) def get_current_token(self) -> int: diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index bc888ce1a871..b5b84c09ae41 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -37,7 +37,9 @@ def __init__( cache_name="client_ip_last_seen", max_size=50000 ) - async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): + async def insert_client_ip( + self, user_id: str, access_token: str, ip: str, user_agent: str, device_id: str + ) -> None: now = int(self._clock.time_msec()) key = (user_id, access_token, ip) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index a2aff75b7075..0ffd34f1dad0 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Iterable from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker @@ -60,7 +60,9 @@ def __init__( def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: if stream_name == DeviceListsStream.NAME: self._device_list_id_gen.advance(instance_name, token) self._invalidate_caches_for_devices(token, rows) @@ -70,7 +72,9 @@ def process_replication_rows(self, stream_name, instance_name, token, rows): self._user_signature_stream_cache.entity_has_changed(row.user_id, token) return super().process_replication_rows(stream_name, instance_name, token, rows) - def _invalidate_caches_for_devices(self, token, rows): + def _invalidate_caches_for_devices( + self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow] + ) -> None: for row in rows: # The entities are either user IDs (starting with '@') whose devices # have changed, or remote servers that we need to tell about diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 9d90e26375f0..d6f37d7479ba 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Iterable from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker @@ -44,10 +44,12 @@ def __init__( self._group_updates_id_gen.get_current_token(), ) - def get_group_stream_token(self): + def get_group_stream_token(self) -> int: return self._group_updates_id_gen.get_current_token() - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: if stream_name == GroupServerStream.NAME: self._group_updates_id_gen.advance(instance_name, token) for row in rows: diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index 7541e21de9dd..52ee3f7e58ea 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Iterable from synapse.replication.tcp.streams import PushRulesStream from synapse.storage.databases.main.push_rule import PushRulesWorkerStore @@ -20,10 +21,12 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore): - def get_max_push_rules_stream_id(self): + def get_max_push_rules_stream_id(self) -> int: return self._push_rules_stream_id_gen.get_current_token() - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: if stream_name == PushRulesStream.NAME: self._push_rules_stream_id_gen.advance(instance_name, token) for row in rows: diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index cea90c0f1bf4..de642bba71b0 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Iterable from synapse.replication.tcp.streams import PushersStream from synapse.storage.database import DatabasePool, LoggingDatabaseConnection @@ -41,8 +41,8 @@ def get_pushers_stream_token(self) -> int: return self._pushers_id_gen.get_current_token() def process_replication_rows( - self, stream_name: str, instance_name: str, token, rows + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] ) -> None: if stream_name == PushersStream.NAME: - self._pushers_id_gen.advance(instance_name, token) # type: ignore + self._pushers_id_gen.advance(instance_name, token) return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e29ae1e375af..d59ce7ccf967 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -14,10 +14,12 @@ """A replication client for use by synapse workers. """ import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple from twisted.internet.defer import Deferred +from twisted.internet.interfaces import IAddress, IConnector from twisted.internet.protocol import ReconnectingClientFactory +from twisted.python.failure import Failure from synapse.api.constants import EventTypes from synapse.federation import send_queue @@ -79,10 +81,10 @@ def __init__( hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying) - def startedConnecting(self, connector): + def startedConnecting(self, connector: IConnector) -> None: logger.info("Connecting to replication: %r", connector.getDestination()) - def buildProtocol(self, addr): + def buildProtocol(self, addr: IAddress) -> ClientReplicationStreamProtocol: logger.info("Connected to replication: %r", addr) return ClientReplicationStreamProtocol( self.hs, @@ -92,11 +94,11 @@ def buildProtocol(self, addr): self.command_handler, ) - def clientConnectionLost(self, connector, reason): + def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None: logger.error("Lost replication conn: %r", reason) ReconnectingClientFactory.clientConnectionLost(self, connector, reason) - def clientConnectionFailed(self, connector, reason): + def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None: logger.error("Failed to connect to replication: %r", reason) ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) @@ -131,7 +133,7 @@ def __init__(self, hs: "HomeServer"): async def on_rdata( self, stream_name: str, instance_name: str, token: int, rows: list - ): + ) -> None: """Called to handle a batch of replication data with a given stream token. By default this just pokes the slave store. Can be overridden in subclasses to @@ -252,14 +254,16 @@ async def on_rdata( # loop. (This maintains the order so no need to resort) waiting_list[:] = waiting_list[index_of_first_deferred_not_called:] - async def on_position(self, stream_name: str, instance_name: str, token: int): + async def on_position( + self, stream_name: str, instance_name: str, token: int + ) -> None: await self.on_rdata(stream_name, instance_name, token, []) # We poke the generic "replication" notifier to wake anything up that # may be streaming. self.notifier.notify_replication() - def on_remote_server_up(self, server: str): + def on_remote_server_up(self, server: str) -> None: """Called when get a new REMOTE_SERVER_UP command.""" # Let's wake up the transaction queue for the server in case we have @@ -269,7 +273,7 @@ def on_remote_server_up(self, server: str): async def wait_for_stream_position( self, instance_name: str, stream_name: str, position: int - ): + ) -> None: """Wait until this instance has received updates up to and including the given stream position. """ @@ -304,7 +308,7 @@ async def wait_for_stream_position( "Finished waiting for repl stream %r to reach %s", stream_name, position ) - def stop_pusher(self, user_id, app_id, pushkey): + def stop_pusher(self, user_id: str, app_id: str, pushkey: str) -> None: if not self._notify_pushers: return @@ -316,13 +320,13 @@ def stop_pusher(self, user_id, app_id, pushkey): logger.info("Stopping pusher %r / %r", user_id, key) pusher.on_stop() - async def start_pusher(self, user_id, app_id, pushkey): + async def start_pusher(self, user_id: str, app_id: str, pushkey: str) -> None: if not self._notify_pushers: return key = "%s:%s" % (app_id, pushkey) logger.info("Starting pusher %r / %r", user_id, key) - return await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) + await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) class FederationSenderHandler: @@ -353,10 +357,12 @@ def __init__(self, hs: "HomeServer"): self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") - def wake_destination(self, server: str): + def wake_destination(self, server: str) -> None: self.federation_sender.wake_destination(server) - async def process_replication_rows(self, stream_name, token, rows): + async def process_replication_rows( + self, stream_name: str, token: int, rows: list + ) -> None: # The federation stream contains things that we want to send out, e.g. # presence, typing, etc. if stream_name == "federation": @@ -384,11 +390,12 @@ async def process_replication_rows(self, stream_name, token, rows): for host in hosts: self.federation_sender.send_device_messages(host) - async def _on_new_receipts(self, rows): + async def _on_new_receipts( + self, rows: Iterable[ReceiptsStream.ReceiptsStreamRow] + ) -> None: """ Args: - rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): - new receipts to be processed + rows: new receipts to be processed """ for receipt in rows: # we only want to send on receipts for our own users @@ -408,7 +415,7 @@ async def _on_new_receipts(self, rows): ) await self.federation_sender.send_read_receipt(receipt_info) - async def update_token(self, token): + async def update_token(self, token: int) -> None: """Update the record of where we have processed to in the federation stream. Called after we have processed a an update received over replication. Sends @@ -428,7 +435,7 @@ async def update_token(self, token): run_as_background_process("_save_and_send_ack", self._save_and_send_ack) - async def _save_and_send_ack(self): + async def _save_and_send_ack(self) -> None: """Save the current federation position in the database and send an ACK to master with where we're up to. """ diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 1311b013dae7..3654f6c03c7e 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -18,12 +18,15 @@ """ import abc import logging -from typing import Tuple, Type +from typing import Optional, Tuple, Type, TypeVar +from synapse.replication.tcp.streams._base import StreamRow from synapse.util import json_decoder, json_encoder logger = logging.getLogger(__name__) +T = TypeVar("T", bound="Command") + class Command(metaclass=abc.ABCMeta): """The base command class. @@ -38,7 +41,7 @@ class Command(metaclass=abc.ABCMeta): @classmethod @abc.abstractmethod - def from_line(cls, line): + def from_line(cls: Type[T], line: str) -> T: """Deserialises a line from the wire into this command. `line` does not include the command. """ @@ -49,21 +52,24 @@ def to_line(self) -> str: prefix. """ - def get_logcontext_id(self): + def get_logcontext_id(self) -> str: """Get a suitable string for the logcontext when processing this command""" # by default, we just use the command name. return self.NAME +SC = TypeVar("SC", bound="_SimpleCommand") + + class _SimpleCommand(Command): """An implementation of Command whose argument is just a 'data' string.""" - def __init__(self, data): + def __init__(self, data: str): self.data = data @classmethod - def from_line(cls, line): + def from_line(cls: Type[SC], line: str) -> SC: return cls(line) def to_line(self) -> str: @@ -109,14 +115,16 @@ class RdataCommand(Command): NAME = "RDATA" - def __init__(self, stream_name, instance_name, token, row): + def __init__( + self, stream_name: str, instance_name: str, token: Optional[int], row: StreamRow + ): self.stream_name = stream_name self.instance_name = instance_name self.token = token self.row = row @classmethod - def from_line(cls, line): + def from_line(cls: Type["RdataCommand"], line: str) -> "RdataCommand": stream_name, instance_name, token, row_json = line.split(" ", 3) return cls( stream_name, @@ -125,7 +133,7 @@ def from_line(cls, line): json_decoder.decode(row_json), ) - def to_line(self): + def to_line(self) -> str: return " ".join( ( self.stream_name, @@ -135,7 +143,7 @@ def to_line(self): ) ) - def get_logcontext_id(self): + def get_logcontext_id(self) -> str: return "RDATA-" + self.stream_name @@ -164,18 +172,20 @@ class PositionCommand(Command): NAME = "POSITION" - def __init__(self, stream_name, instance_name, prev_token, new_token): + def __init__( + self, stream_name: str, instance_name: str, prev_token: int, new_token: int + ): self.stream_name = stream_name self.instance_name = instance_name self.prev_token = prev_token self.new_token = new_token @classmethod - def from_line(cls, line): + def from_line(cls: Type["PositionCommand"], line: str) -> "PositionCommand": stream_name, instance_name, prev_token, new_token = line.split(" ", 3) return cls(stream_name, instance_name, int(prev_token), int(new_token)) - def to_line(self): + def to_line(self) -> str: return " ".join( ( self.stream_name, @@ -218,14 +228,14 @@ class ReplicateCommand(Command): NAME = "REPLICATE" - def __init__(self): + def __init__(self) -> None: pass @classmethod - def from_line(cls, line): + def from_line(cls: Type[T], line: str) -> T: return cls() - def to_line(self): + def to_line(self) -> str: return "" @@ -247,14 +257,16 @@ class UserSyncCommand(Command): NAME = "USER_SYNC" - def __init__(self, instance_id, user_id, is_syncing, last_sync_ms): + def __init__( + self, instance_id: str, user_id: str, is_syncing: bool, last_sync_ms: int + ): self.instance_id = instance_id self.user_id = user_id self.is_syncing = is_syncing self.last_sync_ms = last_sync_ms @classmethod - def from_line(cls, line): + def from_line(cls: Type["UserSyncCommand"], line: str) -> "UserSyncCommand": instance_id, user_id, state, last_sync_ms = line.split(" ", 3) if state not in ("start", "end"): @@ -262,7 +274,7 @@ def from_line(cls, line): return cls(instance_id, user_id, state == "start", int(last_sync_ms)) - def to_line(self): + def to_line(self) -> str: return " ".join( ( self.instance_id, @@ -286,14 +298,16 @@ class ClearUserSyncsCommand(Command): NAME = "CLEAR_USER_SYNC" - def __init__(self, instance_id): + def __init__(self, instance_id: str): self.instance_id = instance_id @classmethod - def from_line(cls, line): + def from_line( + cls: Type["ClearUserSyncsCommand"], line: str + ) -> "ClearUserSyncsCommand": return cls(line) - def to_line(self): + def to_line(self) -> str: return self.instance_id @@ -316,7 +330,9 @@ def __init__(self, instance_name: str, token: int): self.token = token @classmethod - def from_line(cls, line: str) -> "FederationAckCommand": + def from_line( + cls: Type["FederationAckCommand"], line: str + ) -> "FederationAckCommand": instance_name, token = line.split(" ") return cls(instance_name, int(token)) @@ -334,7 +350,15 @@ class UserIpCommand(Command): NAME = "USER_IP" - def __init__(self, user_id, access_token, ip, user_agent, device_id, last_seen): + def __init__( + self, + user_id: str, + access_token: str, + ip: str, + user_agent: str, + device_id: str, + last_seen: int, + ): self.user_id = user_id self.access_token = access_token self.ip = ip @@ -343,14 +367,14 @@ def __init__(self, user_id, access_token, ip, user_agent, device_id, last_seen): self.last_seen = last_seen @classmethod - def from_line(cls, line): + def from_line(cls: Type["UserIpCommand"], line: str) -> "UserIpCommand": user_id, jsn = line.split(" ", 1) access_token, ip, user_agent, device_id, last_seen = json_decoder.decode(jsn) return cls(user_id, access_token, ip, user_agent, device_id, last_seen) - def to_line(self): + def to_line(self) -> str: return ( self.user_id + " " diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index f7e6bc1e6282..17e157239336 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -261,7 +261,7 @@ def _add_command_to_stream_queue( "process-replication-data", self._unsafe_process_queue, stream_name ) - async def _unsafe_process_queue(self, stream_name: str): + async def _unsafe_process_queue(self, stream_name: str) -> None: """Processes the command queue for the given stream, until it is empty Does not check if there is already a thread processing the queue, hence "unsafe" @@ -294,7 +294,7 @@ async def _process_command( # This shouldn't be possible raise Exception("Unrecognised command %s in stream queue", cmd.NAME) - def start_replication(self, hs: "HomeServer"): + def start_replication(self, hs: "HomeServer") -> None: """Helper method to start a replication connection to the remote server using TCP. """ @@ -345,10 +345,10 @@ def get_streams_to_replicate(self) -> List[Stream]: """Get a list of streams that this instances replicates.""" return self._streams_to_replicate - def on_REPLICATE(self, conn: IReplicationConnection, cmd: ReplicateCommand): + def on_REPLICATE(self, conn: IReplicationConnection, cmd: ReplicateCommand) -> None: self.send_positions_to_connection(conn) - def send_positions_to_connection(self, conn: IReplicationConnection): + def send_positions_to_connection(self, conn: IReplicationConnection) -> None: """Send current position of all streams this process is source of to the connection. """ @@ -392,7 +392,7 @@ def on_CLEAR_USER_SYNC( def on_FEDERATION_ACK( self, conn: IReplicationConnection, cmd: FederationAckCommand - ): + ) -> None: federation_ack_counter.inc() if self._federation_sender: @@ -408,7 +408,7 @@ def on_USER_IP( else: return None - async def _handle_user_ip(self, cmd: UserIpCommand): + async def _handle_user_ip(self, cmd: UserIpCommand) -> None: await self._store.insert_client_ip( cmd.user_id, cmd.access_token, @@ -421,7 +421,7 @@ async def _handle_user_ip(self, cmd: UserIpCommand): assert self._server_notices_sender is not None await self._server_notices_sender.on_user_ip(cmd.user_id) - def on_RDATA(self, conn: IReplicationConnection, cmd: RdataCommand): + def on_RDATA(self, conn: IReplicationConnection, cmd: RdataCommand) -> None: if cmd.instance_name == self._instance_name: # Ignore RDATA that are just our own echoes return @@ -497,7 +497,7 @@ async def _process_rdata( async def on_rdata( self, stream_name: str, instance_name: str, token: int, rows: list - ): + ) -> None: """Called to handle a batch of replication data with a given stream token. Args: @@ -512,7 +512,7 @@ async def on_rdata( stream_name, instance_name, token, rows ) - def on_POSITION(self, conn: IReplicationConnection, cmd: PositionCommand): + def on_POSITION(self, conn: IReplicationConnection, cmd: PositionCommand) -> None: if cmd.instance_name == self._instance_name: # Ignore POSITION that are just our own echoes return @@ -581,7 +581,7 @@ async def _process_position( def on_REMOTE_SERVER_UP( self, conn: IReplicationConnection, cmd: RemoteServerUpCommand - ): + ) -> None: """Called when get a new REMOTE_SERVER_UP command.""" self._replication_data_handler.on_remote_server_up(cmd.data) @@ -604,7 +604,7 @@ def on_REMOTE_SERVER_UP( # between two instances, but that is not currently supported). self.send_command(cmd, ignore_conn=conn) - def new_connection(self, connection: IReplicationConnection): + def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" self._connections.append(connection) @@ -631,7 +631,7 @@ def new_connection(self, connection: IReplicationConnection): UserSyncCommand(self._instance_id, user_id, True, now) ) - def lost_connection(self, connection: IReplicationConnection): + def lost_connection(self, connection: IReplicationConnection) -> None: """Called when a connection is closed/lost.""" # we no longer need _streams_by_connection for this connection. streams = self._streams_by_connection.pop(connection, None) @@ -653,7 +653,7 @@ def connected(self) -> bool: def send_command( self, cmd: Command, ignore_conn: Optional[IReplicationConnection] = None - ): + ) -> None: """Send a command to all connected connections. Args: @@ -680,7 +680,7 @@ def send_command( else: logger.warning("Dropping command as not connected: %r", cmd.NAME) - def send_federation_ack(self, token: int): + def send_federation_ack(self, token: int) -> None: """Ack data for the federation stream. This allows the master to drop data stored purely in memory. """ @@ -688,7 +688,7 @@ def send_federation_ack(self, token: int): def send_user_sync( self, instance_id: str, user_id: str, is_syncing: bool, last_sync_ms: int - ): + ) -> None: """Poke the master that a user has started/stopped syncing.""" self.send_command( UserSyncCommand(instance_id, user_id, is_syncing, last_sync_ms) @@ -702,15 +702,15 @@ def send_user_ip( user_agent: str, device_id: str, last_seen: int, - ): + ) -> None: """Tell the master that the user made a request.""" cmd = UserIpCommand(user_id, access_token, ip, user_agent, device_id, last_seen) self.send_command(cmd) - def send_remote_server_up(self, server: str): + def send_remote_server_up(self, server: str) -> None: self.send_command(RemoteServerUpCommand(server)) - def stream_update(self, stream_name: str, token: str, data: Any): + def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None: """Called when a new update is available to stream to clients. We need to check if the client is interested in the stream or not diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 7bae36db169b..7763ffb2d0c7 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -49,7 +49,7 @@ import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, Collection, List, Optional +from typing import TYPE_CHECKING, Any, Collection, List, Optional from prometheus_client import Counter from zope.interface import Interface, implementer @@ -123,7 +123,7 @@ class ConnectionStates: class IReplicationConnection(Interface): """An interface for replication connections.""" - def send_command(cmd: Command): + def send_command(cmd: Command) -> None: """Send the command down the connection""" @@ -190,7 +190,7 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): "replication-conn", self.conn_id ) - def connectionMade(self): + def connectionMade(self) -> None: logger.info("[%s] Connection established", self.id()) self.state = ConnectionStates.ESTABLISHED @@ -207,11 +207,11 @@ def connectionMade(self): # Always send the initial PING so that the other side knows that they # can time us out. - self.send_command(PingCommand(self.clock.time_msec())) + self.send_command(PingCommand(str(self.clock.time_msec()))) self.command_handler.new_connection(self) - def send_ping(self): + def send_ping(self) -> None: """Periodically sends a ping and checks if we should close the connection due to the other side timing out. """ @@ -226,7 +226,7 @@ def send_ping(self): self.transport.abortConnection() else: if now - self.last_sent_command >= PING_TIME: - self.send_command(PingCommand(now)) + self.send_command(PingCommand(str(now))) if ( self.received_ping @@ -239,12 +239,12 @@ def send_ping(self): ) self.send_error("ping timeout") - def lineReceived(self, line: bytes): + def lineReceived(self, line: bytes) -> None: """Called when we've received a line""" with PreserveLoggingContext(self._logging_context): self._parse_and_dispatch_line(line) - def _parse_and_dispatch_line(self, line: bytes): + def _parse_and_dispatch_line(self, line: bytes) -> None: if line.strip() == "": # Ignore blank lines return @@ -309,24 +309,24 @@ def handle_command(self, cmd: Command) -> None: if not handled: logger.warning("Unhandled command: %r", cmd) - def close(self): + def close(self) -> None: logger.warning("[%s] Closing connection", self.id()) self.time_we_closed = self.clock.time_msec() assert self.transport is not None self.transport.loseConnection() self.on_connection_closed() - def send_error(self, error_string, *args): + def send_error(self, error_string: str, *args: Any) -> None: """Send an error to remote and close the connection.""" self.send_command(ErrorCommand(error_string % args)) self.close() - def send_command(self, cmd, do_buffer=True): + def send_command(self, cmd: Command, do_buffer: bool = True) -> None: """Send a command if connection has been established. Args: - cmd (Command) - do_buffer (bool): Whether to buffer the message or always attempt + cmd + do_buffer: Whether to buffer the message or always attempt to send the command. This is mostly used to send an error message if we're about to close the connection due our buffers becoming full. @@ -357,7 +357,7 @@ def send_command(self, cmd, do_buffer=True): self.last_sent_command = self.clock.time_msec() - def _queue_command(self, cmd): + def _queue_command(self, cmd: Command) -> None: """Queue the command until the connection is ready to write to again.""" logger.debug("[%s] Queueing as conn %r, cmd: %r", self.id(), self.state, cmd) self.pending_commands.append(cmd) @@ -370,20 +370,20 @@ def _queue_command(self, cmd): self.send_command(ErrorCommand("Failed to keep up"), do_buffer=False) self.close() - def _send_pending_commands(self): + def _send_pending_commands(self) -> None: """Send any queued commandes""" pending = self.pending_commands self.pending_commands = [] for cmd in pending: self.send_command(cmd) - def on_PING(self, line): + def on_PING(self, cmd: PingCommand) -> None: self.received_ping = True - def on_ERROR(self, cmd): + def on_ERROR(self, cmd: ErrorCommand) -> None: logger.error("[%s] Remote reported error: %r", self.id(), cmd.data) - def pauseProducing(self): + def pauseProducing(self) -> None: """This is called when both the kernel send buffer and the twisted tcp connection send buffers have become full. @@ -394,26 +394,26 @@ def pauseProducing(self): logger.info("[%s] Pause producing", self.id()) self.state = ConnectionStates.PAUSED - def resumeProducing(self): + def resumeProducing(self) -> None: """The remote has caught up after we started buffering!""" logger.info("[%s] Resume producing", self.id()) self.state = ConnectionStates.ESTABLISHED self._send_pending_commands() - def stopProducing(self): + def stopProducing(self) -> None: """We're never going to send any more data (normally because either we or the remote has closed the connection) """ logger.info("[%s] Stop producing", self.id()) self.on_connection_closed() - def connectionLost(self, reason): + def connectionLost(self, reason: Failure) -> None: # type: ignore[override] logger.info("[%s] Replication connection closed: %r", self.id(), reason) if isinstance(reason, Failure): assert reason.type is not None connection_close_counter.labels(reason.type.__name__).inc() else: - connection_close_counter.labels(reason.__class__.__name__).inc() + connection_close_counter.labels(reason.__class__.__name__).inc() # type: ignore[unreachable] try: # Remove us from list of connections to be monitored @@ -427,7 +427,7 @@ def connectionLost(self, reason): self.on_connection_closed() - def on_connection_closed(self): + def on_connection_closed(self) -> None: logger.info("[%s] Connection was closed", self.id()) self.state = ConnectionStates.CLOSED @@ -445,7 +445,7 @@ def on_connection_closed(self): # the sentinel context is now active, which may not be correct. # PreserveLoggingContext() will restore the correct logging context. - def __str__(self): + def __str__(self) -> str: addr = None if self.transport: addr = str(self.transport.getPeer()) @@ -455,10 +455,10 @@ def __str__(self): addr, ) - def id(self): + def id(self) -> str: return "%s-%s" % (self.name, self.conn_id) - def lineLengthExceeded(self, line): + def lineLengthExceeded(self, line: str) -> None: """Called when we receive a line that is above the maximum line length""" self.send_error("Line length exceeded") @@ -474,11 +474,11 @@ def __init__( self.server_name = server_name - def connectionMade(self): + def connectionMade(self) -> None: self.send_command(ServerCommand(self.server_name)) super().connectionMade() - def on_NAME(self, cmd): + def on_NAME(self, cmd: NameCommand) -> None: logger.info("[%s] Renamed to %r", self.id(), cmd.data) self.name = cmd.data @@ -500,19 +500,19 @@ def __init__( self.client_name = client_name self.server_name = server_name - def connectionMade(self): + def connectionMade(self) -> None: self.send_command(NameCommand(self.client_name)) super().connectionMade() # Once we've connected subscribe to the necessary streams self.replicate() - def on_SERVER(self, cmd): + def on_SERVER(self, cmd: ServerCommand) -> None: if cmd.data != self.server_name: logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data) self.send_error("Wrong remote") - def replicate(self): + def replicate(self) -> None: """Send the subscription request to the server""" logger.info("[%s] Subscribing to replication streams", self.id()) @@ -529,7 +529,7 @@ def replicate(self): ) -def transport_buffer_size(protocol): +def transport_buffer_size(protocol: BaseReplicationStreamProtocol) -> int: if protocol.transport: size = len(protocol.transport.dataBuffer) + protocol.transport._tempDataLen return size @@ -544,7 +544,9 @@ def transport_buffer_size(protocol): ) -def transport_kernel_read_buffer_size(protocol, read=True): +def transport_kernel_read_buffer_size( + protocol: BaseReplicationStreamProtocol, read: bool = True +) -> int: SIOCINQ = 0x541B SIOCOUTQ = 0x5411 diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 5b37f379d040..3170f7c59b04 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -14,7 +14,7 @@ import logging from inspect import isawaitable -from typing import TYPE_CHECKING, Generic, Optional, Type, TypeVar, cast +from typing import TYPE_CHECKING, Any, Generic, Optional, Type, TypeVar, cast import attr import txredisapi @@ -62,7 +62,7 @@ class ConstantProperty(Generic[T, V]): def __get__(self, obj: Optional[T], objtype: Optional[Type[T]] = None) -> V: return self.constant - def __set__(self, obj: Optional[T], value: V): + def __set__(self, obj: Optional[T], value: V) -> None: pass @@ -95,7 +95,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_stream_name: str synapse_outbound_redis_connection: txredisapi.RedisProtocol - def __init__(self, *args, **kwargs): + def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) # a logcontext which we use for processing incoming commands. We declare it as a @@ -108,12 +108,12 @@ def __init__(self, *args, **kwargs): "replication_command_handler" ) - def connectionMade(self): + def connectionMade(self) -> None: logger.info("Connected to redis") super().connectionMade() run_as_background_process("subscribe-replication", self._send_subscribe) - async def _send_subscribe(self): + async def _send_subscribe(self) -> None: # it's important to make sure that we only send the REPLICATE command once we # have successfully subscribed to the stream - otherwise we might miss the # POSITION response sent back by the other end. @@ -131,12 +131,12 @@ async def _send_subscribe(self): # otherside won't know we've connected and so won't issue a REPLICATE. self.synapse_handler.send_positions_to_connection(self) - def messageReceived(self, pattern: str, channel: str, message: str): + def messageReceived(self, pattern: str, channel: str, message: str) -> None: """Received a message from redis.""" with PreserveLoggingContext(self._logging_context): self._parse_and_dispatch_message(message) - def _parse_and_dispatch_message(self, message: str): + def _parse_and_dispatch_message(self, message: str) -> None: if message.strip() == "": # Ignore blank lines return @@ -181,7 +181,7 @@ def handle_command(self, cmd: Command) -> None: "replication-" + cmd.get_logcontext_id(), lambda: res ) - def connectionLost(self, reason): + def connectionLost(self, reason: Failure) -> None: # type: ignore[override] logger.info("Lost connection to redis") super().connectionLost(reason) self.synapse_handler.lost_connection(self) @@ -193,17 +193,17 @@ def connectionLost(self, reason): # the sentinel context is now active, which may not be correct. # PreserveLoggingContext() will restore the correct logging context. - def send_command(self, cmd: Command): + def send_command(self, cmd: Command) -> None: """Send a command if connection has been established. Args: - cmd (Command) + cmd: The command to send """ run_as_background_process( "send-cmd", self._async_send_command, cmd, bg_start_span=False ) - async def _async_send_command(self, cmd: Command): + async def _async_send_command(self, cmd: Command) -> None: """Encode a replication command and send it over our outbound connection""" string = "%s %s" % (cmd.NAME, cmd.to_line()) if "\n" in string: @@ -259,7 +259,7 @@ def __init__( hs.get_clock().looping_call(self._send_ping, 30 * 1000) @wrap_as_background_process("redis_ping") - async def _send_ping(self): + async def _send_ping(self) -> None: for connection in self.pool: try: await make_deferred_yieldable(connection.ping()) @@ -269,13 +269,13 @@ async def _send_ping(self): # ReconnectingClientFactory has some logging (if you enable `self.noisy`), but # it's rubbish. We add our own here. - def startedConnecting(self, connector: IConnector): + def startedConnecting(self, connector: IConnector) -> None: logger.info( "Connecting to redis server %s", format_address(connector.getDestination()) ) super().startedConnecting(connector) - def clientConnectionFailed(self, connector: IConnector, reason: Failure): + def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None: logger.info( "Connection to redis server %s failed: %s", format_address(connector.getDestination()), @@ -283,7 +283,7 @@ def clientConnectionFailed(self, connector: IConnector, reason: Failure): ) super().clientConnectionFailed(connector, reason) - def clientConnectionLost(self, connector: IConnector, reason: Failure): + def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None: logger.info( "Connection to redis server %s lost: %s", format_address(connector.getDestination()), @@ -330,7 +330,7 @@ def __init__( self.synapse_outbound_redis_connection = outbound_redis_connection - def buildProtocol(self, addr): + def buildProtocol(self, addr: IAddress) -> RedisSubscriber: p = super().buildProtocol(addr) p = cast(RedisSubscriber, p) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index a9d85f4f6cd0..ecd6190f5b9b 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -16,16 +16,18 @@ import logging import random -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List, Optional, Tuple from prometheus_client import Counter +from twisted.internet.interfaces import IAddress from twisted.internet.protocol import ServerFactory from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.commands import PositionCommand from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol from synapse.replication.tcp.streams import EventsStream +from synapse.replication.tcp.streams._base import StreamRow, Token from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -56,7 +58,7 @@ def __init__(self, hs: "HomeServer"): # listener config again or always starting a `ReplicationStreamer`.) hs.get_replication_streamer() - def buildProtocol(self, addr): + def buildProtocol(self, addr: IAddress) -> ServerReplicationStreamProtocol: return ServerReplicationStreamProtocol( self.server_name, self.clock, self.command_handler ) @@ -105,7 +107,7 @@ def __init__(self, hs: "HomeServer"): if any(EventsStream.NAME == s.NAME for s in self.streams): self.clock.looping_call(self.on_notifier_poke, 1000) - def on_notifier_poke(self): + def on_notifier_poke(self) -> None: """Checks if there is actually any new data and sends it to the connections if there are. @@ -137,7 +139,7 @@ def on_notifier_poke(self): run_as_background_process("replication_notifier", self._run_notifier_loop) - async def _run_notifier_loop(self): + async def _run_notifier_loop(self) -> None: self.is_looping = True try: @@ -238,7 +240,9 @@ async def _run_notifier_loop(self): self.is_looping = False -def _batch_updates(updates): +def _batch_updates( + updates: List[Tuple[Token, StreamRow]] +) -> List[Tuple[Optional[Token], StreamRow]]: """Takes a list of updates of form [(token, row)] and sets the token to None for all rows where the next row has the same token. This is used to implement batching. @@ -254,7 +258,7 @@ def _batch_updates(updates): if not updates: return [] - new_updates = [] + new_updates: List[Tuple[Optional[Token], StreamRow]] = [] for i, update in enumerate(updates[:-1]): if update[0] == updates[i + 1][0]: new_updates.append((None, update[1])) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 5a2d90c5309f..914b9eae8492 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -90,7 +90,7 @@ class Stream: ROW_TYPE: Any = None @classmethod - def parse_row(cls, row: StreamRow): + def parse_row(cls, row: StreamRow) -> Any: """Parse a row received over replication By default, assumes that the row data is an array object and passes its contents @@ -139,7 +139,7 @@ def __init__( # The token from which we last asked for updates self.last_token = self.current_token(self.local_instance_name) - def discard_updates_and_advance(self): + def discard_updates_and_advance(self) -> None: """Called when the stream should advance but the updates would be discarded, e.g. when there are no currently connected workers. """ @@ -200,7 +200,7 @@ def current_token_without_instance( return lambda instance_name: current_token() -def make_http_update_function(hs, stream_name: str) -> UpdateFunction: +def make_http_update_function(hs: "HomeServer", stream_name: str) -> UpdateFunction: """Makes a suitable function for use as an `update_function` that queries the master process for updates. """ diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 4f4f1ad45378..50c4a5ba03a1 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -13,12 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. import heapq -from collections.abc import Iterable -from typing import TYPE_CHECKING, Optional, Tuple, Type +from typing import TYPE_CHECKING, Iterable, Optional, Tuple, Type, TypeVar, cast import attr -from ._base import Stream, StreamUpdateResult, Token +from synapse.replication.tcp.streams._base import ( + Stream, + StreamRow, + StreamUpdateResult, + Token, +) if TYPE_CHECKING: from synapse.server import HomeServer @@ -58,6 +62,9 @@ class EventsStreamRow: data: "BaseEventsStreamRow" +T = TypeVar("T", bound="BaseEventsStreamRow") + + class BaseEventsStreamRow: """Base class for rows to be sent in the events stream. @@ -68,7 +75,7 @@ class BaseEventsStreamRow: TypeId: str @classmethod - def from_data(cls, data): + def from_data(cls: Type[T], data: Iterable[Optional[str]]) -> T: """Parse the data from the replication stream into a row. By default we just call the constructor with the data list as arguments @@ -221,7 +228,7 @@ async def _update_function( return updates, upper_limit, limited @classmethod - def parse_row(cls, row): - (typ, data) = row - data = TypeToRow[typ].from_data(data) - return EventsStreamRow(typ, data) + def parse_row(cls, row: StreamRow) -> "EventsStreamRow": + (typ, data) = cast(Tuple[str, Iterable[Optional[str]]], row) + event_stream_row_data = TypeToRow[typ].from_data(data) + return EventsStreamRow(typ, event_stream_row_data) diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index ea1032b4fcfb..b26546aecdb7 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -16,8 +16,7 @@ import re import secrets import string -from collections.abc import Iterable -from typing import Optional, Tuple +from typing import Iterable, Optional, Tuple from netaddr import valid_ipv6 @@ -197,7 +196,7 @@ def shortstr(iterable: Iterable, maxitems: int = 5) -> str: """If iterable has maxitems or fewer, return the stringification of a list containing those items. - Otherwise, return the stringification of a a list with the first maxitems items, + Otherwise, return the stringification of a list with the first maxitems items, followed by "...". Args: diff --git a/tests/replication/_base.py b/tests/replication/_base.py index cb02eddf0797..9fc50f885227 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -14,6 +14,7 @@ import logging from typing import Any, Dict, List, Optional, Tuple +from twisted.internet.address import IPv4Address from twisted.internet.protocol import Protocol from twisted.web.resource import Resource @@ -53,7 +54,7 @@ def prepare(self, reactor, clock, hs): server_factory = ReplicationStreamProtocolFactory(hs) self.streamer = hs.get_replication_streamer() self.server: ServerReplicationStreamProtocol = server_factory.buildProtocol( - None + IPv4Address("TCP", "127.0.0.1", 0) ) # Make a new HomeServer object for the worker @@ -345,7 +346,9 @@ def make_worker_hs( self.clock, repl_handler, ) - server = self.server_factory.buildProtocol(None) + server = self.server_factory.buildProtocol( + IPv4Address("TCP", "127.0.0.1", 0) + ) client_transport = FakeTransport(server, self.reactor) client.makeConnection(client_transport) diff --git a/tests/replication/tcp/test_remote_server_up.py b/tests/replication/tcp/test_remote_server_up.py index 262c35cef3c6..545f11acd1bf 100644 --- a/tests/replication/tcp/test_remote_server_up.py +++ b/tests/replication/tcp/test_remote_server_up.py @@ -14,6 +14,7 @@ from typing import Tuple +from twisted.internet.address import IPv4Address from twisted.internet.interfaces import IProtocol from twisted.test.proto_helpers import StringTransport @@ -29,7 +30,7 @@ def prepare(self, reactor, clock, hs): def _make_client(self) -> Tuple[IProtocol, StringTransport]: """Create a new direct TCP replication connection""" - proto = self.factory.buildProtocol(("127.0.0.1", 0)) + proto = self.factory.buildProtocol(IPv4Address("TCP", "127.0.0.1", 0)) transport = StringTransport() proto.makeConnection(transport) From 6f440fd859c411af8c32478b4353f7550619c3bd Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 9 Feb 2022 16:06:51 +0100 Subject: [PATCH 215/474] Recommend upgrading treq alongside twisted (#11943) --- CHANGES.md | 2 +- docs/upgrade.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 958024ff0c84..cd62e5256a19 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,7 +7,7 @@ Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twist has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) within the Twisted library. We do not believe Synapse is affected by this vulnerability, though we advise server administrators who installed Synapse via pip to upgrade Twisted -with `pip install --upgrade Twisted` as a matter of good practice. The Docker image +with `pip install --upgrade Twisted treq` as a matter of good practice. The Docker image `matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the updated library. diff --git a/docs/upgrade.md b/docs/upgrade.md index 0105f87f90b7..df873e5317fa 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -93,7 +93,7 @@ Note that [Twisted 22.1.0](https://github.com/twisted/twisted/releases/tag/twist has recently been released, which fixes a [security issue](https://github.com/twisted/twisted/security/advisories/GHSA-92x2-jw7w-xvvx) within the Twisted library. We do not believe Synapse is affected by this vulnerability, though we advise server administrators who installed Synapse via pip to upgrade Twisted -with `pip install --upgrade Twisted` as a matter of good practice. The Docker image +with `pip install --upgrade Twisted treq` as a matter of good practice. The Docker image `matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` are using the updated library. From 0408d694eec2553441fa08168007c49472fbe862 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Feb 2022 11:32:20 -0500 Subject: [PATCH 216/474] Update changelog from #11867 to be a single line. --- changelog.d/11867.feature | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/changelog.d/11867.feature b/changelog.d/11867.feature index dbd9de0e4cf7..601705e0004e 100644 --- a/changelog.d/11867.feature +++ b/changelog.d/11867.feature @@ -1,5 +1 @@ -Stabilize [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). - -Client implementations using `m.login.registration_token` should switch to the stable identifiers: -* `org.matrix.msc3231.login.registration_token` in query parameters and request/response bodies becomes `m.login.registration_token`. -* `/_matrix/client/unstable/org.matrix.msc3231/register/org.matrix.msc3231.login.registration_token/validity` becomes `/_matrix/client/v1/register/m.login.registration_token/validity`. \ No newline at end of file +Stabilize support for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients should switch to the stable identifier and endpoint. From 3914576b2b6272a4be790c89d87649d257018dcf Mon Sep 17 00:00:00 2001 From: Brad Jones Date: Wed, 9 Feb 2022 13:56:33 -0700 Subject: [PATCH 217/474] Fix example for structured logging. (#11946) The StreamHandler takes a stream parameter, not location. --- changelog.d/11946.doc | 1 + docs/structured_logging.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11946.doc diff --git a/changelog.d/11946.doc b/changelog.d/11946.doc new file mode 100644 index 000000000000..eedf035a3bf3 --- /dev/null +++ b/changelog.d/11946.doc @@ -0,0 +1 @@ +Correct the structured logging configuration example. Contributed by Brad Jones. diff --git a/docs/structured_logging.md b/docs/structured_logging.md index b1281667e02b..14db85f58728 100644 --- a/docs/structured_logging.md +++ b/docs/structured_logging.md @@ -141,7 +141,7 @@ formatters: handlers: console: class: logging.StreamHandler - location: ext://sys.stdout + stream: ext://sys.stdout file: class: logging.FileHandler formatter: json From e4fdf459e24745be856efb535d539082a00da51b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Feb 2022 08:15:10 -0500 Subject: [PATCH 218/474] Basic documentation for the release cycle. (#11954) --- changelog.d/11954.doc | 1 + docs/SUMMARY.md | 1 + docs/development/releases.md | 37 ++++++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+) create mode 100644 changelog.d/11954.doc create mode 100644 docs/development/releases.md diff --git a/changelog.d/11954.doc b/changelog.d/11954.doc new file mode 100644 index 000000000000..6e7b3908909f --- /dev/null +++ b/changelog.d/11954.doc @@ -0,0 +1 @@ +Add information on the Synapse release cycle. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 3eeb1a2799d3..ef9cabf55524 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -79,6 +79,7 @@ # Development - [Contributing Guide](development/contributing_guide.md) - [Code Style](code_style.md) + - [Release Cycle](development/releases.md) - [Git Usage](development/git.md) - [Testing]() - [OpenTracing](opentracing.md) diff --git a/docs/development/releases.md b/docs/development/releases.md new file mode 100644 index 000000000000..c9a8c6994597 --- /dev/null +++ b/docs/development/releases.md @@ -0,0 +1,37 @@ +# Synapse Release Cycle + +Releases of Synapse follow a two week release cycle with new releases usually +occurring on Tuesdays: + +* Day 0: Synapse `N - 1` is released. +* Day 7: Synapse `N` release candidate 1 is released. +* Days 7 - 13: Synapse `N` release candidates 2+ are released, if bugs are found. +* Day 14: Synapse `N` is released. + +Note that this schedule might be modified depending on the availability of the +Synapse team, e.g. releases may be skipped to avoid holidays. + +Release announcements can be found in the +[release category of the Matrix blog](https://matrix.org/blog/category/releases). + +## Bugfix releases + +If a bug is found after release that is deemed severe enough (by a combination +of the impacted users and the impact on those users) then a bugfix release may +be issued. This may be at any point in the release cycle. + +## Security releases + +Security will sometimes be backported to the previous version and released +immediately before the next release candidate. An example of this might be: + +* Day 0: Synapse N - 1 is released. +* Day 7: Synapse (N - 1).1 is released as Synapse N - 1 + the security fix. +* Day 7: Synapse N release candidate 1 is released (including the security fix). + +Depending on the impact and complexity of security fixes, multiple fixes might +be held to be released together. + +In some cases, a pre-disclosure of a security release will be issued as a notice +to Synapse operators that there is an upcoming security release. These can be +found in the [security category of the Matrix blog](https://matrix.org/blog/category/security). From 06e5a76322f1341a8a8db79b1ef2dbdfb295b08f Mon Sep 17 00:00:00 2001 From: Alexander Mnich <56564725+a-mnich@users.noreply.github.com> Date: Thu, 10 Feb 2022 15:18:27 +0100 Subject: [PATCH 219/474] Fix broken link in README to admin API. (#11955) From when the documentation was converted from rst -> md. --- README.rst | 2 +- changelog.d/11955.doc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11955.doc diff --git a/README.rst b/README.rst index 50de3a49b05f..4281c87d1f80 100644 --- a/README.rst +++ b/README.rst @@ -246,7 +246,7 @@ Password reset ============== Users can reset their password through their client. Alternatively, a server admin -can reset a users password using the `admin API `_ +can reset a users password using the `admin API `_ or by directly editing the database as shown below. First calculate the hash of the new password:: diff --git a/changelog.d/11955.doc b/changelog.d/11955.doc new file mode 100644 index 000000000000..3d93115f596e --- /dev/null +++ b/changelog.d/11955.doc @@ -0,0 +1 @@ +Fix broken link in the README to the admin API for password reset. From 337f38cac38bc57bc6a3cc8b319e3079c60c4549 Mon Sep 17 00:00:00 2001 From: Denis Kasak Date: Thu, 10 Feb 2022 15:43:01 +0000 Subject: [PATCH 220/474] Implement a content type allow list for URL previews (#11936) This implements an allow list for content types for which Synapse will attempt URL preview. If a URL resolves to a resource with a content type which isn't in the list, the download will terminate immediately. This makes sense given that Synapse would never successfully generate a URL preview for such files in the first place, and helps prevent issues with streaming media servers, such as #8302. Signed-off-by: Denis Kasak dkasak@termina.org.uk --- changelog.d/11936.bugfix | 1 + synapse/http/client.py | 18 +++++ synapse/rest/media/v1/preview_url_resource.py | 8 +++ tests/rest/media/v1/test_url_preview.py | 72 +++++++++++++++++++ 4 files changed, 99 insertions(+) create mode 100644 changelog.d/11936.bugfix diff --git a/changelog.d/11936.bugfix b/changelog.d/11936.bugfix new file mode 100644 index 000000000000..bc149f280106 --- /dev/null +++ b/changelog.d/11936.bugfix @@ -0,0 +1 @@ +Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. diff --git a/synapse/http/client.py b/synapse/http/client.py index 743a7ffcb159..d617055617a6 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -20,6 +20,7 @@ TYPE_CHECKING, Any, BinaryIO, + Callable, Dict, Iterable, List, @@ -693,12 +694,18 @@ async def get_file( output_stream: BinaryIO, max_size: Optional[int] = None, headers: Optional[RawHeaders] = None, + is_allowed_content_type: Optional[Callable[[str], bool]] = None, ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: """GETs a file from a given URL Args: url: The URL to GET output_stream: File to write the response body to. headers: A map from header name to a list of values for that header + is_allowed_content_type: A predicate to determine whether the + content type of the file we're downloading is allowed. If set and + it evaluates to False when called with the content type, the + request will be terminated before completing the download by + raising SynapseError. Returns: A tuple of the file length, dict of the response headers, absolute URI of the response and HTTP response code. @@ -726,6 +733,17 @@ async def get_file( HTTPStatus.BAD_GATEWAY, "Got error %d" % (response.code,), Codes.UNKNOWN ) + if is_allowed_content_type and b"Content-Type" in resp_headers: + content_type = resp_headers[b"Content-Type"][0].decode("ascii") + if not is_allowed_content_type(content_type): + raise SynapseError( + HTTPStatus.BAD_GATEWAY, + ( + "Requested file's content type not allowed for this operation: %s" + % content_type + ), + ) + # TODO: if our Content-Type is HTML or something, just read the first # N bytes into RAM rather than saving it all to disk only to read it # straight back in again diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index efd84ced8f54..8d3d1e54dc9f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -403,6 +403,7 @@ async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResu output_stream=output_stream, max_size=self.max_spider_size, headers={"Accept-Language": self.url_preview_accept_language}, + is_allowed_content_type=_is_previewable, ) except SynapseError: # Pass SynapseErrors through directly, so that the servlet @@ -761,3 +762,10 @@ def _is_html(content_type: str) -> bool: def _is_json(content_type: str) -> bool: return content_type.lower().startswith("application/json") + + +def _is_previewable(content_type: str) -> bool: + """Returns True for content types for which we will perform URL preview and False + otherwise.""" + + return _is_html(content_type) or _is_media(content_type) or _is_json(content_type) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 53f618621305..da2c53326019 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -243,6 +243,78 @@ def test_non_ascii_preview_httpequiv(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430") + def test_video_rejected(self): + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + + end_content = b"anything" + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b"Content-Type: video/mp4\r\n\r\n" + ) + % (len(end_content)) + + end_content + ) + + self.pump() + self.assertEqual(channel.code, 502) + self.assertEqual( + channel.json_body, + { + "errcode": "M_UNKNOWN", + "error": "Requested file's content type not allowed for this operation: video/mp4", + }, + ) + + def test_audio_rejected(self): + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + + end_content = b"anything" + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b"Content-Type: audio/aac\r\n\r\n" + ) + % (len(end_content)) + + end_content + ) + + self.pump() + self.assertEqual(channel.code, 502) + self.assertEqual( + channel.json_body, + { + "errcode": "M_UNKNOWN", + "error": "Requested file's content type not allowed for this operation: audio/aac", + }, + ) + def test_non_ascii_preview_content_type(self): self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] From df36945ff0e4a293a9dac0da07e2c94256835b32 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Feb 2022 10:52:48 -0500 Subject: [PATCH 221/474] Support pagination tokens from /sync and /messages in the relations API. (#11952) --- changelog.d/11952.bugfix | 1 + synapse/rest/client/relations.py | 57 +++++--- synapse/storage/databases/main/relations.py | 46 ++++-- synapse/storage/relations.py | 15 +- tests/rest/client/test_relations.py | 151 ++++++++++++++++++-- 5 files changed, 217 insertions(+), 53 deletions(-) create mode 100644 changelog.d/11952.bugfix diff --git a/changelog.d/11952.bugfix b/changelog.d/11952.bugfix new file mode 100644 index 000000000000..e38a08f559d5 --- /dev/null +++ b/changelog.d/11952.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 8cf5ebaa07b7..9ec425888a21 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -32,14 +32,45 @@ PaginationChunk, RelationPaginationToken, ) -from synapse.types import JsonDict +from synapse.types import JsonDict, RoomStreamToken, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) +async def _parse_token( + store: "DataStore", token: Optional[str] +) -> Optional[StreamToken]: + """ + For backwards compatibility support RelationPaginationToken, but new pagination + tokens are generated as full StreamTokens, to be compatible with /sync and /messages. + """ + if not token: + return None + # Luckily the format for StreamToken and RelationPaginationToken differ enough + # that they can easily be separated. An "_" appears in the serialization of + # RoomStreamToken (as part of StreamToken), but RelationPaginationToken uses + # "-" only for separators. + if "_" in token: + return await StreamToken.from_string(store, token) + else: + relation_token = RelationPaginationToken.from_string(token) + return StreamToken( + room_key=RoomStreamToken(relation_token.topological, relation_token.stream), + presence_key=0, + typing_key=0, + receipt_key=0, + account_data_key=0, + push_rules_key=0, + to_device_key=0, + device_list_key=0, + groups_key=0, + ) + + class RelationPaginationServlet(RestServlet): """API to paginate relations on an event by topological ordering, optionally filtered by relation type and event type. @@ -88,13 +119,8 @@ async def on_GET( pagination_chunk = PaginationChunk(chunk=[]) else: # Return the relations - from_token = None - if from_token_str: - from_token = RelationPaginationToken.from_string(from_token_str) - - to_token = None - if to_token_str: - to_token = RelationPaginationToken.from_string(to_token_str) + from_token = await _parse_token(self.store, from_token_str) + to_token = await _parse_token(self.store, to_token_str) pagination_chunk = await self.store.get_relations_for_event( event_id=parent_id, @@ -125,7 +151,7 @@ async def on_GET( events, now, bundle_aggregations=aggregations ) - return_value = pagination_chunk.to_dict() + return_value = await pagination_chunk.to_dict(self.store) return_value["chunk"] = serialized_events return_value["original_event"] = original_event @@ -216,7 +242,7 @@ async def on_GET( to_token=to_token, ) - return 200, pagination_chunk.to_dict() + return 200, await pagination_chunk.to_dict(self.store) class RelationAggregationGroupPaginationServlet(RestServlet): @@ -287,13 +313,8 @@ async def on_GET( from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - from_token = None - if from_token_str: - from_token = RelationPaginationToken.from_string(from_token_str) - - to_token = None - if to_token_str: - to_token = RelationPaginationToken.from_string(to_token_str) + from_token = await _parse_token(self.store, from_token_str) + to_token = await _parse_token(self.store, to_token_str) result = await self.store.get_relations_for_event( event_id=parent_id, @@ -313,7 +334,7 @@ async def on_GET( now = self.clock.time_msec() serialized_events = self._event_serializer.serialize_events(events, now) - return_value = result.to_dict() + return_value = await result.to_dict(self.store) return_value["chunk"] = serialized_events return 200, return_value diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 7718acbf1cb8..ad79cc56104a 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -39,16 +39,13 @@ ) from synapse.storage.databases.main.stream import generate_pagination_where_clause from synapse.storage.engines import PostgresEngine -from synapse.storage.relations import ( - AggregationPaginationToken, - PaginationChunk, - RelationPaginationToken, -) -from synapse.types import JsonDict +from synapse.storage.relations import AggregationPaginationToken, PaginationChunk +from synapse.types import JsonDict, RoomStreamToken, StreamToken from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -98,8 +95,8 @@ async def get_relations_for_event( aggregation_key: Optional[str] = None, limit: int = 5, direction: str = "b", - from_token: Optional[RelationPaginationToken] = None, - to_token: Optional[RelationPaginationToken] = None, + from_token: Optional[StreamToken] = None, + to_token: Optional[StreamToken] = None, ) -> PaginationChunk: """Get a list of relations for an event, ordered by topological ordering. @@ -138,8 +135,10 @@ async def get_relations_for_event( pagination_clause = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), - from_token=attr.astuple(from_token) if from_token else None, # type: ignore[arg-type] - to_token=attr.astuple(to_token) if to_token else None, # type: ignore[arg-type] + from_token=from_token.room_key.as_historical_tuple() + if from_token + else None, + to_token=to_token.room_key.as_historical_tuple() if to_token else None, engine=self.database_engine, ) @@ -177,12 +176,27 @@ def _get_recent_references_for_event_txn( last_topo_id = row[1] last_stream_id = row[2] - next_batch = None + # If there are more events, generate the next pagination key. + next_token = None if len(events) > limit and last_topo_id and last_stream_id: - next_batch = RelationPaginationToken(last_topo_id, last_stream_id) + next_key = RoomStreamToken(last_topo_id, last_stream_id) + if from_token: + next_token = from_token.copy_and_replace("room_key", next_key) + else: + next_token = StreamToken( + room_key=next_key, + presence_key=0, + typing_key=0, + receipt_key=0, + account_data_key=0, + push_rules_key=0, + to_device_key=0, + device_list_key=0, + groups_key=0, + ) return PaginationChunk( - chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token + chunk=list(events[:limit]), next_batch=next_token, prev_batch=from_token ) return await self.db_pool.runInteraction( @@ -676,13 +690,15 @@ async def _get_bundled_aggregation_for_event( annotations = await self.get_aggregation_groups_for_event(event_id, room_id) if annotations.chunk: - aggregations.annotations = annotations.to_dict() + aggregations.annotations = await annotations.to_dict( + cast("DataStore", self) + ) references = await self.get_relations_for_event( event_id, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: - aggregations.references = references.to_dict() + aggregations.references = await references.to_dict(cast("DataStore", self)) # If this event is the start of a thread, include a summary of the replies. if self._msc3440_enabled: diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index b1536c1ca491..36ca2b827398 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -13,13 +13,16 @@ # limitations under the License. import logging -from typing import Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import attr from synapse.api.errors import SynapseError from synapse.types import JsonDict +if TYPE_CHECKING: + from synapse.storage.databases.main import DataStore + logger = logging.getLogger(__name__) @@ -39,14 +42,14 @@ class PaginationChunk: next_batch: Optional[Any] = None prev_batch: Optional[Any] = None - def to_dict(self) -> Dict[str, Any]: + async def to_dict(self, store: "DataStore") -> Dict[str, Any]: d = {"chunk": self.chunk} if self.next_batch: - d["next_batch"] = self.next_batch.to_string() + d["next_batch"] = await self.next_batch.to_string(store) if self.prev_batch: - d["prev_batch"] = self.prev_batch.to_string() + d["prev_batch"] = await self.prev_batch.to_string(store) return d @@ -75,7 +78,7 @@ def from_string(string: str) -> "RelationPaginationToken": except ValueError: raise SynapseError(400, "Invalid relation pagination token") - def to_string(self) -> str: + async def to_string(self, store: "DataStore") -> str: return "%d-%d" % (self.topological, self.stream) def as_tuple(self) -> Tuple[Any, ...]: @@ -105,7 +108,7 @@ def from_string(string: str) -> "AggregationPaginationToken": except ValueError: raise SynapseError(400, "Invalid aggregation pagination token") - def to_string(self) -> str: + async def to_string(self, store: "DataStore") -> str: return "%d-%d" % (self.count, self.stream) def as_tuple(self) -> Tuple[Any, ...]: diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 06721e67c9c0..9768fb29711e 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -21,7 +21,8 @@ from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin from synapse.rest.client import login, register, relations, room, sync -from synapse.types import JsonDict +from synapse.storage.relations import RelationPaginationToken +from synapse.types import JsonDict, StreamToken from tests import unittest from tests.server import FakeChannel @@ -200,6 +201,15 @@ def test_basic_paginate_relations(self): channel.json_body.get("next_batch"), str, channel.json_body ) + def _stream_token_to_relation_token(self, token: str) -> str: + """Convert a StreamToken into a legacy token (RelationPaginationToken).""" + room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key + return self.get_success( + RelationPaginationToken( + topological=room_key.topological, stream=room_key.stream + ).to_string(self.store) + ) + def test_repeated_paginate_relations(self): """Test that if we paginate using a limit and tokens then we get the expected events. @@ -213,7 +223,7 @@ def test_repeated_paginate_relations(self): self.assertEquals(200, channel.code, channel.json_body) expected_event_ids.append(channel.json_body["event_id"]) - prev_token: Optional[str] = None + prev_token = "" found_event_ids: List[str] = [] for _ in range(20): from_token = "" @@ -222,8 +232,35 @@ def test_repeated_paginate_relations(self): channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1%s" - % (self.room, self.parent_id, from_token), + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEquals(found_event_ids, expected_event_ids) + + # Reset and try again, but convert the tokens to the legacy format. + prev_token = "" + found_event_ids = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + self._stream_token_to_relation_token(prev_token) + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) @@ -241,6 +278,65 @@ def test_repeated_paginate_relations(self): found_event_ids.reverse() self.assertEquals(found_event_ids, expected_event_ids) + def test_pagination_from_sync_and_messages(self): + """Pagination tokens from /sync and /messages can be used to paginate /relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") + self.assertEquals(200, channel.code, channel.json_body) + annotation_id = channel.json_body["event_id"] + # Send an event after the relation events. + self.helper.send(self.room, body="Latest event", tok=self.user_token) + + # Request /sync, limiting it such that only the latest event is returned + # (and not the relation). + filter = urllib.parse.quote_plus( + '{"room": {"timeline": {"limit": 1}}}'.encode() + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEquals(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + sync_prev_batch = room_timeline["prev_batch"] + self.assertIsNotNone(sync_prev_batch) + # Ensure the relation event is not in the batch returned from /sync. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in room_timeline["events"]] + ) + + # Request /messages, limiting it such that only the latest event is + # returned (and not the relation). + channel = self.make_request( + "GET", + f"/rooms/{self.room}/messages?dir=b&limit=1", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + messages_end = channel.json_body["end"] + self.assertIsNotNone(messages_end) + # Ensure the relation event is not in the chunk returned from /messages. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) + + # Request /relations with the pagination tokens received from both the + # /sync and /messages responses above, in turn. + # + # This is a tiny bit silly since the client wouldn't know the parent ID + # from the requests above; consider the parent ID to be known from a + # previous /sync. + for from_token in (sync_prev_batch, messages_end): + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + + # The relation should be in the returned chunk. + self.assertIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) + def test_aggregation_pagination_groups(self): """Test that we can paginate annotation groups correctly.""" @@ -337,7 +433,7 @@ def test_aggregation_pagination_within_group(self): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") self.assertEquals(200, channel.code, channel.json_body) - prev_token: Optional[str] = None + prev_token = "" found_event_ids: List[str] = [] encoded_key = urllib.parse.quote_plus("👍".encode()) for _ in range(20): @@ -347,15 +443,42 @@ def test_aggregation_pagination_within_group(self): channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s" - "/aggregations/%s/%s/m.reaction/%s?limit=1%s" - % ( - self.room, - self.parent_id, - RelationTypes.ANNOTATION, - encoded_key, - from_token, - ), + f"/_matrix/client/unstable/rooms/{self.room}" + f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" + f"/m.reaction/{encoded_key}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEquals(found_event_ids, expected_event_ids) + + # Reset and try again, but convert the tokens to the legacy format. + prev_token = "" + found_event_ids = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + self._stream_token_to_relation_token(prev_token) + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}" + f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" + f"/m.reaction/{encoded_key}?limit=1{from_token}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) From 1e12efa1b250f33bfcc8d07c259047ad23236fcc Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 10 Feb 2022 13:59:42 -0800 Subject: [PATCH 222/474] Drop support for EOL Ubuntu 21.04 (#11961) --- changelog.d/11961.removal | 1 + scripts-dev/build_debian_packages | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/11961.removal diff --git a/changelog.d/11961.removal b/changelog.d/11961.removal new file mode 100644 index 000000000000..67b86ac7c177 --- /dev/null +++ b/changelog.d/11961.removal @@ -0,0 +1 @@ +No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. \ No newline at end of file diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index 4d34e9070363..7ff96a1ee6fe 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -25,7 +25,6 @@ DISTS = ( "debian:bookworm", "debian:sid", "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:hirsute", # 21.04 (EOL 2022-01-05) "ubuntu:impish", # 21.10 (EOL 2022-07) ) From d36943c4df841e789ef85c206c7fc7c4665ba4bd Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 11 Feb 2022 10:32:11 +0100 Subject: [PATCH 223/474] Support the stable API endpoint for MSC3283: new settings in `/capabilities` endpoint (#11933) --- changelog.d/11933.feature | 1 + docs/upgrade.md | 19 +++++++ synapse/rest/client/capabilities.py | 15 +++++- tests/rest/client/test_capabilities.py | 69 ++++++++------------------ 4 files changed, 55 insertions(+), 49 deletions(-) create mode 100644 changelog.d/11933.feature diff --git a/changelog.d/11933.feature b/changelog.d/11933.feature new file mode 100644 index 000000000000..2b1b0d1786a6 --- /dev/null +++ b/changelog.d/11933.feature @@ -0,0 +1 @@ +Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. diff --git a/docs/upgrade.md b/docs/upgrade.md index 581fd7de536b..c5e0697333fd 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -100,6 +100,25 @@ to: Please update any relevant reverse proxy or firewall configurations appropriately. +## Deprecation of `capability` `org.matrix.msc3283.*` + +The `capabilities` of MSC3283 from the REST API `/_matrix/client/r0/capabilities` +becomes stable. + +The old `capabilities` +- `org.matrix.msc3283.set_displayname`, +- `org.matrix.msc3283.set_avatar_url` and +- `org.matrix.msc3283.3pid_changes` + +are deprecated and scheduled to be removed in Synapse v1.(next+1).0. + +The new `capabilities` +- `m.set_displayname`, +- `m.set_avatar_url` and +- `m.3pid_changes` + +are now active by default. + # Upgrading to v1.53.0 ## Dropping support for `webclient` listeners and non-HTTP(S) `web_client_location` diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 5c0e3a568007..6682da077a3e 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from http import HTTPStatus from typing import TYPE_CHECKING, Tuple from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES @@ -54,6 +55,15 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: }, }, "m.change_password": {"enabled": change_password}, + "m.set_displayname": { + "enabled": self.config.registration.enable_set_displayname + }, + "m.set_avatar_url": { + "enabled": self.config.registration.enable_set_avatar_url + }, + "m.3pid_changes": { + "enabled": self.config.registration.enable_3pid_changes + }, } } @@ -62,6 +72,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "org.matrix.msc3244.room_capabilities" ] = MSC3244_CAPABILITIES + # Must be removed in later versions. + # Is only included for migration. + # Also the parts in `synapse/config/experimental.py`. if self.config.experimental.msc3283_enabled: response["capabilities"]["org.matrix.msc3283.set_displayname"] = { "enabled": self.config.registration.enable_set_displayname @@ -76,7 +89,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if self.config.experimental.msc3440_enabled: response["capabilities"]["io.element.thread"] = {"enabled": True} - return 200, response + return HTTPStatus.OK, response def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index 249808b03149..989e80176820 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus + import synapse.rest.admin from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.rest.client import capabilities, login @@ -28,7 +30,7 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor, clock): - self.url = b"/_matrix/client/r0/capabilities" + self.url = b"/capabilities" hs = self.setup_test_homeserver() self.config = hs.config self.auth_handler = hs.get_auth_handler() @@ -96,39 +98,20 @@ def test_get_change_password_capabilities_password_disabled(self): self.assertEqual(channel.code, 200) self.assertFalse(capabilities["m.change_password"]["enabled"]) - def test_get_change_users_attributes_capabilities_when_msc3283_disabled(self): - """Test that per default msc3283 is disabled server returns `m.change_password`.""" + def test_get_change_users_attributes_capabilities(self): + """Test that server returns capabilities by default.""" access_token = self.login(self.localpart, self.password) channel = self.make_request("GET", self.url, access_token=access_token) capabilities = channel.json_body["capabilities"] - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertTrue(capabilities["m.change_password"]["enabled"]) - self.assertNotIn("org.matrix.msc3283.set_displayname", capabilities) - self.assertNotIn("org.matrix.msc3283.set_avatar_url", capabilities) - self.assertNotIn("org.matrix.msc3283.3pid_changes", capabilities) - - @override_config({"experimental_features": {"msc3283_enabled": True}}) - def test_get_change_users_attributes_capabilities_when_msc3283_enabled(self): - """Test if msc3283 is enabled server returns capabilities.""" - access_token = self.login(self.localpart, self.password) - - channel = self.make_request("GET", self.url, access_token=access_token) - capabilities = channel.json_body["capabilities"] + self.assertTrue(capabilities["m.set_displayname"]["enabled"]) + self.assertTrue(capabilities["m.set_avatar_url"]["enabled"]) + self.assertTrue(capabilities["m.3pid_changes"]["enabled"]) - self.assertEqual(channel.code, 200) - self.assertTrue(capabilities["m.change_password"]["enabled"]) - self.assertTrue(capabilities["org.matrix.msc3283.set_displayname"]["enabled"]) - self.assertTrue(capabilities["org.matrix.msc3283.set_avatar_url"]["enabled"]) - self.assertTrue(capabilities["org.matrix.msc3283.3pid_changes"]["enabled"]) - - @override_config( - { - "enable_set_displayname": False, - "experimental_features": {"msc3283_enabled": True}, - } - ) + @override_config({"enable_set_displayname": False}) def test_get_set_displayname_capabilities_displayname_disabled(self): """Test if set displayname is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) @@ -136,15 +119,10 @@ def test_get_set_displayname_capabilities_displayname_disabled(self): channel = self.make_request("GET", self.url, access_token=access_token) capabilities = channel.json_body["capabilities"] - self.assertEqual(channel.code, 200) - self.assertFalse(capabilities["org.matrix.msc3283.set_displayname"]["enabled"]) - - @override_config( - { - "enable_set_avatar_url": False, - "experimental_features": {"msc3283_enabled": True}, - } - ) + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertFalse(capabilities["m.set_displayname"]["enabled"]) + + @override_config({"enable_set_avatar_url": False}) def test_get_set_avatar_url_capabilities_avatar_url_disabled(self): """Test if set avatar_url is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) @@ -152,24 +130,19 @@ def test_get_set_avatar_url_capabilities_avatar_url_disabled(self): channel = self.make_request("GET", self.url, access_token=access_token) capabilities = channel.json_body["capabilities"] - self.assertEqual(channel.code, 200) - self.assertFalse(capabilities["org.matrix.msc3283.set_avatar_url"]["enabled"]) - - @override_config( - { - "enable_3pid_changes": False, - "experimental_features": {"msc3283_enabled": True}, - } - ) - def test_change_3pid_capabilities_3pid_disabled(self): + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertFalse(capabilities["m.set_avatar_url"]["enabled"]) + + @override_config({"enable_3pid_changes": False}) + def test_get_change_3pid_capabilities_3pid_disabled(self): """Test if change 3pid is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) channel = self.make_request("GET", self.url, access_token=access_token) capabilities = channel.json_body["capabilities"] - self.assertEqual(channel.code, 200) - self.assertFalse(capabilities["org.matrix.msc3283.3pid_changes"]["enabled"]) + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertFalse(capabilities["m.3pid_changes"]["enabled"]) @override_config({"experimental_features": {"msc3244_enabled": False}}) def test_get_does_not_include_msc3244_fields_when_disabled(self): From c3db7a0b59d48b8872bc24096f9a2467ef35f703 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 11 Feb 2022 12:06:02 +0000 Subject: [PATCH 224/474] Tests: replace mocked Authenticator with the real thing (#11913) If we prepopulate the test homeserver with a key for a remote homeserver, we can make federation requests to it without having to stub out the authenticator. This has two advantages: * means that what we are testing is closer to reality (ie, we now have complete tests for the incoming-request-authorisation flow) * some tests require that other objects be signed by the remote server (eg, the event in `/send_join`), and doing that would require a whole separate set of mocking out. It's much simpler just to use real keys. --- changelog.d/11913.misc | 1 + tests/federation/test_complexity.py | 4 +- tests/federation/test_federation_server.py | 4 +- tests/federation/transport/test_knocking.py | 4 +- tests/federation/transport/test_server.py | 6 +- tests/rest/client/test_third_party_rules.py | 6 +- tests/unittest.py | 136 +++++++++++++++----- 7 files changed, 117 insertions(+), 44 deletions(-) create mode 100644 changelog.d/11913.misc diff --git a/changelog.d/11913.misc b/changelog.d/11913.misc new file mode 100644 index 000000000000..cb705603640a --- /dev/null +++ b/changelog.d/11913.misc @@ -0,0 +1 @@ +Tests: replace mocked `Authenticator` with the real thing. diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 7b486aba4a04..e40ef9587417 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -47,7 +47,7 @@ def test_complexity_simple(self): ) # Get the room complexity - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) self.assertEquals(200, channel.code) @@ -59,7 +59,7 @@ def test_complexity_simple(self): store.get_current_state_event_counts = lambda x: make_awaitable(500 * 1.23) # Get the room complexity again -- make sure it's our artificial value - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) self.assertEquals(200, channel.code) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 03e1e11f492f..1af284bd2fb8 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -113,7 +113,7 @@ def test_without_event_id(self): room_1 = self.helper.create_room_as(u1, tok=u1_token) self.inject_room_member(room_1, "@user:other.example.com", "join") - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/state/%s" % (room_1,) ) self.assertEquals(200, channel.code, channel.result) @@ -145,7 +145,7 @@ def test_needs_to_be_in_room(self): room_1 = self.helper.create_room_as(u1, tok=u1_token) - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/state/%s" % (room_1,) ) self.assertEquals(403, channel.code, channel.result) diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index bfa156eebbe5..686f42ab48ac 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -245,7 +245,7 @@ def test_room_state_returned_when_knocking(self): self.hs, room_id, user_id ) - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/make_knock/%s/%s?ver=%s" % ( @@ -288,7 +288,7 @@ def test_room_state_returned_when_knocking(self): ) # Send the signed knock event into the room - channel = self.make_request( + channel = self.make_signed_federation_request( "PUT", "/_matrix/federation/v1/send_knock/%s/%s" % (room_id, signed_knock_event.event_id), diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index 84fa72b9ff14..eb62addda8c6 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -22,10 +22,9 @@ def test_blocked_public_room_list_over_federation(self): """Test that unauthenticated requests to the public rooms directory 403 when allow_public_rooms_over_federation is False. """ - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/publicRooms", - federation_auth_origin=b"example.com", ) self.assertEquals(403, channel.code) @@ -34,9 +33,8 @@ def test_open_public_room_list_over_federation(self): """Test that unauthenticated requests to the public rooms directory 200 when allow_public_rooms_over_federation is True. """ - channel = self.make_request( + channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/publicRooms", - federation_auth_origin=b"example.com", ) self.assertEquals(200, channel.code) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 4e71b6ec12e8..ac6b86ff6b02 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -107,6 +107,7 @@ async def _check_event_auth(origin, event, context, *args, **kwargs): return hs def prepare(self, reactor, clock, homeserver): + super().prepare(reactor, clock, homeserver) # Create some users and a room to play with during the tests self.user_id = self.register_user("kermit", "monkey") self.invitee = self.register_user("invitee", "hackme") @@ -473,8 +474,6 @@ def test_on_new_event(self): def _send_event_over_federation(self) -> None: """Send a dummy event over federation and check that the request succeeds.""" body = { - "origin": self.hs.config.server.server_name, - "origin_server_ts": self.clock.time_msec(), "pdus": [ { "sender": self.user_id, @@ -492,11 +491,10 @@ def _send_event_over_federation(self) -> None: ], } - channel = self.make_request( + channel = self.make_signed_federation_request( method="PUT", path="/_matrix/federation/v1/send/1", content=body, - federation_auth_origin=self.hs.config.server.server_name.encode("utf8"), ) self.assertEqual(channel.code, 200, channel.result) diff --git a/tests/unittest.py b/tests/unittest.py index 6fc617601a40..a71892cb9dbe 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -17,6 +17,7 @@ import hashlib import hmac import inspect +import json import logging import secrets import time @@ -36,9 +37,11 @@ ) from unittest.mock import Mock, patch -from canonicaljson import json +import canonicaljson +import signedjson.key +import unpaddedbase64 -from twisted.internet.defer import Deferred, ensureDeferred, succeed +from twisted.internet.defer import Deferred, ensureDeferred from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool from twisted.test.proto_helpers import MemoryReactor @@ -49,8 +52,7 @@ from synapse import events from synapse.api.constants import EventTypes, Membership from synapse.config.homeserver import HomeServerConfig -from synapse.config.ratelimiting import FederationRateLimitConfig -from synapse.federation.transport import server as federation_server +from synapse.federation.transport.server import TransportLayerServer from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest, SynapseSite from synapse.logging.context import ( @@ -61,10 +63,10 @@ ) from synapse.rest import RegisterServletsFunc from synapse.server import HomeServer +from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.ratelimitutils import FederationRateLimiter from tests.server import FakeChannel, get_clock, make_request, setup_test_homeserver from tests.test_utils import event_injection, setup_awaitable_errors @@ -755,42 +757,116 @@ def inject_room_member(self, room: str, user: str, membership: Membership) -> No class FederatingHomeserverTestCase(HomeserverTestCase): """ - A federating homeserver that authenticates incoming requests as `other.example.com`. + A federating homeserver, set up to validate incoming federation requests """ - def create_resource_dict(self) -> Dict[str, Resource]: - d = super().create_resource_dict() - d["/_matrix/federation"] = TestTransportLayerServer(self.hs) - return d + OTHER_SERVER_NAME = "other.example.com" + OTHER_SERVER_SIGNATURE_KEY = signedjson.key.generate_signing_key("test") + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + super().prepare(reactor, clock, hs) -class TestTransportLayerServer(JsonResource): - """A test implementation of TransportLayerServer + # poke the other server's signing key into the key store, so that we don't + # make requests for it + verify_key = signedjson.key.get_verify_key(self.OTHER_SERVER_SIGNATURE_KEY) + verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) - authenticates incoming requests as `other.example.com`. - """ + self.get_success( + hs.get_datastore().store_server_verify_keys( + from_server=self.OTHER_SERVER_NAME, + ts_added_ms=clock.time_msec(), + verify_keys=[ + ( + self.OTHER_SERVER_NAME, + verify_key_id, + FetchKeyResult( + verify_key=verify_key, + valid_until_ts=clock.time_msec() + 1000, + ), + ) + ], + ) + ) + + def create_resource_dict(self) -> Dict[str, Resource]: + d = super().create_resource_dict() + d["/_matrix/federation"] = TransportLayerServer(self.hs) + return d - def __init__(self, hs): - super().__init__(hs) + def make_signed_federation_request( + self, + method: str, + path: str, + content: Optional[JsonDict] = None, + await_result: bool = True, + custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, + client_ip: str = "127.0.0.1", + ) -> FakeChannel: + """Make an inbound signed federation request to this server - class Authenticator: - def authenticate_request(self, request, content): - return succeed("other.example.com") + The request is signed as if it came from "other.example.com", which our HS + already has the keys for. + """ - authenticator = Authenticator() + if custom_headers is None: + custom_headers = [] + else: + custom_headers = list(custom_headers) + + custom_headers.append( + ( + "Authorization", + _auth_header_for_request( + origin=self.OTHER_SERVER_NAME, + destination=self.hs.hostname, + signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + method=method, + path=path, + content=content, + ), + ) + ) - ratelimiter = FederationRateLimiter( - hs.get_clock(), - FederationRateLimitConfig( - window_size=1, - sleep_limit=1, - sleep_delay=1, - reject_limit=1000, - concurrent=1000, - ), + return make_request( + self.reactor, + self.site, + method=method, + path=path, + content=content, + shorthand=False, + await_result=await_result, + custom_headers=custom_headers, + client_ip=client_ip, ) - federation_server.register_servlets(hs, self, authenticator, ratelimiter) + +def _auth_header_for_request( + origin: str, + destination: str, + signing_key: signedjson.key.SigningKey, + method: str, + path: str, + content: Optional[JsonDict], +) -> str: + """Build a suitable Authorization header for an outgoing federation request""" + request_description: JsonDict = { + "method": method, + "uri": path, + "destination": destination, + "origin": origin, + } + if content is not None: + request_description["content"] = content + signature_base64 = unpaddedbase64.encode_base64( + signing_key.sign( + canonicaljson.encode_canonical_json(request_description) + ).signature + ) + return ( + f"X-Matrix origin={origin}," + f"key={signing_key.alg}:{signing_key.version}," + f"sig={signature_base64}" + ) def override_config(extra_config): From a121507cfec0ffce45a89f5a1019034eda5b0c70 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Feb 2022 07:20:16 -0500 Subject: [PATCH 225/474] Adds misc missing type hints (#11953) --- changelog.d/11953.misc | 1 + mypy.ini | 6 ++++ synapse/event_auth.py | 4 ++- synapse/handlers/oidc.py | 4 +-- synapse/http/client.py | 11 +++---- synapse/http/matrixfederationclient.py | 3 +- synapse/notifier.py | 43 ++++++++++++++------------ synapse/server.py | 8 ++--- tests/handlers/test_oidc.py | 9 ++---- 9 files changed, 48 insertions(+), 41 deletions(-) create mode 100644 changelog.d/11953.misc diff --git a/changelog.d/11953.misc b/changelog.d/11953.misc new file mode 100644 index 000000000000..d44571b73149 --- /dev/null +++ b/changelog.d/11953.misc @@ -0,0 +1 @@ +Add missing type hints. diff --git a/mypy.ini b/mypy.ini index cd28ac0dd2fe..63848d664c57 100644 --- a/mypy.ini +++ b/mypy.ini @@ -142,6 +142,9 @@ disallow_untyped_defs = True [mypy-synapse.crypto.*] disallow_untyped_defs = True +[mypy-synapse.event_auth] +disallow_untyped_defs = True + [mypy-synapse.events.*] disallow_untyped_defs = True @@ -166,6 +169,9 @@ disallow_untyped_defs = True [mypy-synapse.module_api.*] disallow_untyped_defs = True +[mypy-synapse.notifier] +disallow_untyped_defs = True + [mypy-synapse.push.*] disallow_untyped_defs = True diff --git a/synapse/event_auth.py b/synapse/event_auth.py index e88596169862..19b55a955942 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -763,7 +763,9 @@ def get_named_level(auth_events: StateMap[EventBase], name: str, default: int) - return default -def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase]): +def _verify_third_party_invite( + event: EventBase, auth_events: StateMap[EventBase] +) -> bool: """ Validates that the invite event is authorized by a previous third-party invite. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index deb353975143..8f71d975e9a4 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -544,9 +544,9 @@ async def _exchange_code(self, code: str) -> Token: """ metadata = await self.load_metadata() token_endpoint = metadata.get("token_endpoint") - raw_headers = { + raw_headers: Dict[str, str] = { "Content-Type": "application/x-www-form-urlencoded", - "User-Agent": self._http_client.user_agent, + "User-Agent": self._http_client.user_agent.decode("ascii"), "Accept": "application/json", } diff --git a/synapse/http/client.py b/synapse/http/client.py index d617055617a6..c01d2326cf33 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -322,21 +322,20 @@ def __init__( self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist self._extra_treq_args = treq_args or {} - - self.user_agent = hs.version_string self.clock = hs.get_clock() + + user_agent = hs.version_string if hs.config.server.user_agent_suffix: - self.user_agent = "%s %s" % ( - self.user_agent, + user_agent = "%s %s" % ( + user_agent, hs.config.server.user_agent_suffix, ) + self.user_agent = user_agent.encode("ascii") # We use this for our body producers to ensure that they use the correct # reactor. self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor())) - self.user_agent = self.user_agent.encode("ascii") - if self._ip_blacklist: # If we have an IP blacklist, we need to use a DNS resolver which # filters out blacklisted IP addresses, to prevent DNS rebinding. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 2e668363b2f5..c5f8fcbb2a7d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -334,12 +334,11 @@ def __init__(self, hs: "HomeServer", tls_client_options_factory): user_agent = hs.version_string if hs.config.server.user_agent_suffix: user_agent = "%s %s" % (user_agent, hs.config.server.user_agent_suffix) - user_agent = user_agent.encode("ascii") federation_agent = MatrixFederationAgent( self.reactor, tls_client_options_factory, - user_agent, + user_agent.encode("ascii"), hs.config.server.federation_ip_range_whitelist, hs.config.server.federation_ip_range_blacklist, ) diff --git a/synapse/notifier.py b/synapse/notifier.py index 5988c67d9076..e0fad2da66b7 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -14,6 +14,7 @@ import logging from typing import ( + TYPE_CHECKING, Awaitable, Callable, Collection, @@ -32,7 +33,6 @@ from twisted.internet import defer -import synapse.server from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import AuthError from synapse.events import EventBase @@ -53,6 +53,9 @@ from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) notified_events_counter = Counter("synapse_notifier_notified_events", "") @@ -82,7 +85,7 @@ class _NotificationListener: __slots__ = ["deferred"] - def __init__(self, deferred): + def __init__(self, deferred: "defer.Deferred"): self.deferred = deferred @@ -124,7 +127,7 @@ def notify( stream_key: str, stream_id: Union[int, RoomStreamToken], time_now_ms: int, - ): + ) -> None: """Notify any listeners for this user of a new event from an event source. Args: @@ -152,7 +155,7 @@ def notify( self.notify_deferred = ObservableDeferred(defer.Deferred()) noify_deferred.callback(self.current_token) - def remove(self, notifier: "Notifier"): + def remove(self, notifier: "Notifier") -> None: """Remove this listener from all the indexes in the Notifier it knows about. """ @@ -188,7 +191,7 @@ class EventStreamResult: start_token: StreamToken end_token: StreamToken - def __bool__(self): + def __bool__(self) -> bool: return bool(self.events) @@ -212,7 +215,7 @@ class Notifier: UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 - def __init__(self, hs: "synapse.server.HomeServer"): + def __init__(self, hs: "HomeServer"): self.user_to_user_stream: Dict[str, _NotifierUserStream] = {} self.room_to_user_streams: Dict[str, Set[_NotifierUserStream]] = {} @@ -248,7 +251,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at # most when scraping it. - def count_listeners(): + def count_listeners() -> int: all_user_streams: Set[_NotifierUserStream] = set() for streams in list(self.room_to_user_streams.values()): @@ -270,7 +273,7 @@ def count_listeners(): "synapse_notifier_users", "", [], lambda: len(self.user_to_user_stream) ) - def add_replication_callback(self, cb: Callable[[], None]): + def add_replication_callback(self, cb: Callable[[], None]) -> None: """Add a callback that will be called when some new data is available. Callback is not given any arguments. It should *not* return a Deferred - if it needs to do any asynchronous work, a background thread should be started and @@ -284,7 +287,7 @@ async def on_new_room_event( event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, - ): + ) -> None: """Unwraps event and calls `on_new_room_event_args`.""" await self.on_new_room_event_args( event_pos=event_pos, @@ -307,7 +310,7 @@ async def on_new_room_event_args( event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, extra_users: Optional[Collection[UserID]] = None, - ): + ) -> None: """Used by handlers to inform the notifier something has happened in the room, room event wise. @@ -338,7 +341,9 @@ async def on_new_room_event_args( self.notify_replication() - def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): + def _notify_pending_new_room_events( + self, max_room_stream_token: RoomStreamToken + ) -> None: """Notify for the room events that were queued waiting for a previous event to be persisted. Args: @@ -374,7 +379,7 @@ def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken ) self._on_updated_room_token(max_room_stream_token) - def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): + def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken) -> None: """Poke services that might care that the room position has been updated. """ @@ -386,13 +391,13 @@ def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): if self.federation_sender: self.federation_sender.notify_new_events(max_room_stream_token) - def _notify_app_services(self, max_room_stream_token: RoomStreamToken): + def _notify_app_services(self, max_room_stream_token: RoomStreamToken) -> None: try: self.appservice_handler.notify_interested_services(max_room_stream_token) except Exception: logger.exception("Error notifying application services of event") - def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): + def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken) -> None: try: self._pusher_pool.on_new_notifications(max_room_stream_token) except Exception: @@ -475,8 +480,8 @@ async def wait_for_events( user_id: str, timeout: int, callback: Callable[[StreamToken, StreamToken], Awaitable[T]], - room_ids=None, - from_token=StreamToken.START, + room_ids: Optional[Collection[str]] = None, + from_token: StreamToken = StreamToken.START, ) -> T: """Wait until the callback returns a non empty response or the timeout fires. @@ -700,14 +705,14 @@ def remove_expired_streams(self) -> None: for expired_stream in expired_streams: expired_stream.remove(self) - def _register_with_keys(self, user_stream: _NotifierUserStream): + def _register_with_keys(self, user_stream: _NotifierUserStream) -> None: self.user_to_user_stream[user_stream.user_id] = user_stream for room in user_stream.rooms: s = self.room_to_user_streams.setdefault(room, set()) s.add(user_stream) - def _user_joined_room(self, user_id: str, room_id: str): + def _user_joined_room(self, user_id: str, room_id: str) -> None: new_user_stream = self.user_to_user_stream.get(user_id) if new_user_stream is not None: room_streams = self.room_to_user_streams.setdefault(room_id, set()) @@ -719,7 +724,7 @@ def notify_replication(self) -> None: for cb in self.replication_callbacks: cb() - def notify_remote_server_up(self, server: str): + def notify_remote_server_up(self, server: str) -> None: """Notify any replication that a remote server has come back up""" # We call federation_sender directly rather than registering as a # callback as a) we already have a reference to it and b) it introduces diff --git a/synapse/server.py b/synapse/server.py index 3032f0b738a8..564afdcb9697 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -233,8 +233,8 @@ def __init__( self, hostname: str, config: HomeServerConfig, - reactor=None, - version_string="Synapse", + reactor: Optional[ISynapseReactor] = None, + version_string: str = "Synapse", ): """ Args: @@ -244,7 +244,7 @@ def __init__( if not reactor: from twisted.internet import reactor as _reactor - reactor = _reactor + reactor = cast(ISynapseReactor, _reactor) self._reactor = reactor self.hostname = hostname @@ -264,7 +264,7 @@ def __init__( self._module_web_resources: Dict[str, Resource] = {} self._module_web_resources_consumed = False - def register_module_web_resource(self, path: str, resource: Resource): + def register_module_web_resource(self, path: str, resource: Resource) -> None: """Allows a module to register a web resource to be served at the given path. If multiple modules register a resource for the same path, the module that diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index cfe3de526682..a552d8182e09 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -155,7 +155,7 @@ def default_config(self): def make_homeserver(self, reactor, clock): self.http_client = Mock(spec=["get_json"]) self.http_client.get_json.side_effect = get_json - self.http_client.user_agent = "Synapse Test" + self.http_client.user_agent = b"Synapse Test" hs = self.setup_test_homeserver(proxied_http_client=self.http_client) @@ -438,12 +438,9 @@ def test_callback(self): state = "state" nonce = "nonce" client_redirect_url = "http://client/redirect" - user_agent = "Browser" ip_address = "10.0.0.1" session = self._generate_oidc_session_token(state, nonce, client_redirect_url) - request = _build_callback_request( - code, state, session, user_agent=user_agent, ip_address=ip_address - ) + request = _build_callback_request(code, state, session, ip_address=ip_address) self.get_success(self.handler.handle_oidc_callback(request)) @@ -1274,7 +1271,6 @@ def _build_callback_request( code: str, state: str, session: str, - user_agent: str = "Browser", ip_address: str = "10.0.0.1", ): """Builds a fake SynapseRequest to mock the browser callback @@ -1289,7 +1285,6 @@ def _build_callback_request( query param. Should be the same as was embedded in the session in _build_oidc_session. session: the "session" which would have been passed around in the cookie. - user_agent: the user-agent to present ip_address: the IP address to pretend the request came from """ request = Mock( From 4ef39f3353ae2b2c2ccae78b93bb3a05f1911fe6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 11 Feb 2022 13:07:55 +0000 Subject: [PATCH 226/474] fix import cycle (#11965) --- changelog.d/11965.misc | 1 + synapse/event_auth.py | 54 ++++++++++++++++++++++++------------------ 2 files changed, 32 insertions(+), 23 deletions(-) create mode 100644 changelog.d/11965.misc diff --git a/changelog.d/11965.misc b/changelog.d/11965.misc new file mode 100644 index 000000000000..e0265e103ff2 --- /dev/null +++ b/changelog.d/11965.misc @@ -0,0 +1 @@ +Fix an import cycle in `synapse.event_auth`. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 19b55a955942..eca00bc97506 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +import typing from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union from canonicaljson import encode_canonical_json @@ -34,15 +35,18 @@ EventFormatVersions, RoomVersion, ) -from synapse.events import EventBase -from synapse.events.builder import EventBuilder from synapse.types import StateMap, UserID, get_domain_from_id +if typing.TYPE_CHECKING: + # conditional imports to avoid import cycle + from synapse.events import EventBase + from synapse.events.builder import EventBuilder + logger = logging.getLogger(__name__) def validate_event_for_room_version( - room_version_obj: RoomVersion, event: EventBase + room_version_obj: RoomVersion, event: "EventBase" ) -> None: """Ensure that the event complies with the limits, and has the right signatures @@ -113,7 +117,9 @@ def validate_event_for_room_version( def check_auth_rules_for_event( - room_version_obj: RoomVersion, event: EventBase, auth_events: Iterable[EventBase] + room_version_obj: RoomVersion, + event: "EventBase", + auth_events: Iterable["EventBase"], ) -> None: """Check that an event complies with the auth rules @@ -256,7 +262,7 @@ def check_auth_rules_for_event( logger.debug("Allowing! %s", event) -def _check_size_limits(event: EventBase) -> None: +def _check_size_limits(event: "EventBase") -> None: if len(event.user_id) > 255: raise EventSizeError("'user_id' too large") if len(event.room_id) > 255: @@ -271,7 +277,7 @@ def _check_size_limits(event: EventBase) -> None: raise EventSizeError("event too large") -def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool: +def _can_federate(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool: creation_event = auth_events.get((EventTypes.Create, "")) # There should always be a creation event, but if not don't federate. if not creation_event: @@ -281,7 +287,7 @@ def _can_federate(event: EventBase, auth_events: StateMap[EventBase]) -> bool: def _is_membership_change_allowed( - room_version: RoomVersion, event: EventBase, auth_events: StateMap[EventBase] + room_version: RoomVersion, event: "EventBase", auth_events: StateMap["EventBase"] ) -> None: """ Confirms that the event which changes membership is an allowed change. @@ -471,7 +477,7 @@ def _is_membership_change_allowed( def _check_event_sender_in_room( - event: EventBase, auth_events: StateMap[EventBase] + event: "EventBase", auth_events: StateMap["EventBase"] ) -> None: key = (EventTypes.Member, event.user_id) member_event = auth_events.get(key) @@ -479,7 +485,9 @@ def _check_event_sender_in_room( _check_joined_room(member_event, event.user_id, event.room_id) -def _check_joined_room(member: Optional[EventBase], user_id: str, room_id: str) -> None: +def _check_joined_room( + member: Optional["EventBase"], user_id: str, room_id: str +) -> None: if not member or member.membership != Membership.JOIN: raise AuthError( 403, "User %s not in room %s (%s)" % (user_id, room_id, repr(member)) @@ -487,7 +495,7 @@ def _check_joined_room(member: Optional[EventBase], user_id: str, room_id: str) def get_send_level( - etype: str, state_key: Optional[str], power_levels_event: Optional[EventBase] + etype: str, state_key: Optional[str], power_levels_event: Optional["EventBase"] ) -> int: """Get the power level required to send an event of a given type @@ -523,7 +531,7 @@ def get_send_level( return int(send_level) -def _can_send_event(event: EventBase, auth_events: StateMap[EventBase]) -> bool: +def _can_send_event(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool: power_levels_event = get_power_level_event(auth_events) send_level = get_send_level(event.type, event.get("state_key"), power_levels_event) @@ -547,8 +555,8 @@ def _can_send_event(event: EventBase, auth_events: StateMap[EventBase]) -> bool: def check_redaction( room_version_obj: RoomVersion, - event: EventBase, - auth_events: StateMap[EventBase], + event: "EventBase", + auth_events: StateMap["EventBase"], ) -> bool: """Check whether the event sender is allowed to redact the target event. @@ -585,8 +593,8 @@ def check_redaction( def check_historical( room_version_obj: RoomVersion, - event: EventBase, - auth_events: StateMap[EventBase], + event: "EventBase", + auth_events: StateMap["EventBase"], ) -> None: """Check whether the event sender is allowed to send historical related events like "insertion", "batch", and "marker". @@ -616,8 +624,8 @@ def check_historical( def _check_power_levels( room_version_obj: RoomVersion, - event: EventBase, - auth_events: StateMap[EventBase], + event: "EventBase", + auth_events: StateMap["EventBase"], ) -> None: user_list = event.content.get("users", {}) # Validate users @@ -710,11 +718,11 @@ def _check_power_levels( ) -def get_power_level_event(auth_events: StateMap[EventBase]) -> Optional[EventBase]: +def get_power_level_event(auth_events: StateMap["EventBase"]) -> Optional["EventBase"]: return auth_events.get((EventTypes.PowerLevels, "")) -def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int: +def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> int: """Get a user's power level Args: @@ -750,7 +758,7 @@ def get_user_power_level(user_id: str, auth_events: StateMap[EventBase]) -> int: return 0 -def get_named_level(auth_events: StateMap[EventBase], name: str, default: int) -> int: +def get_named_level(auth_events: StateMap["EventBase"], name: str, default: int) -> int: power_level_event = get_power_level_event(auth_events) if not power_level_event: @@ -764,7 +772,7 @@ def get_named_level(auth_events: StateMap[EventBase], name: str, default: int) - def _verify_third_party_invite( - event: EventBase, auth_events: StateMap[EventBase] + event: "EventBase", auth_events: StateMap["EventBase"] ) -> bool: """ Validates that the invite event is authorized by a previous third-party invite. @@ -829,7 +837,7 @@ def _verify_third_party_invite( return False -def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]: +def get_public_keys(invite_event: "EventBase") -> List[Dict[str, Any]]: public_keys = [] if "public_key" in invite_event.content: o = {"public_key": invite_event.content["public_key"]} @@ -841,7 +849,7 @@ def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]: def auth_types_for_event( - room_version: RoomVersion, event: Union[EventBase, EventBuilder] + room_version: RoomVersion, event: Union["EventBase", "EventBuilder"] ) -> Set[Tuple[str, str]]: """Given an event, return a list of (EventType, StateKey) that may be needed to auth the event. The returned list may be a superset of what From 79fb64e417686c91eeb64016e91dffeac9115a80 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Feb 2022 13:38:05 +0000 Subject: [PATCH 227/474] Fix to-device being dropped in limited sync in SQLite. (#11966) If ther are more than 100 to-device messages pending for a device `/sync` will only return the first 100, however the next batch token was incorrectly calculated and so all other pending messages would be dropped. This is due to `txn.rowcount` only returning the number of rows that *changed*, rather than the number *selected* in SQLite. --- changelog.d/11966.bugfix | 1 + synapse/storage/databases/main/deviceinbox.py | 5 ++- tests/rest/client/test_sendtodevice.py | 40 +++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11966.bugfix diff --git a/changelog.d/11966.bugfix b/changelog.d/11966.bugfix new file mode 100644 index 000000000000..af8e096667f2 --- /dev/null +++ b/changelog.d/11966.bugfix @@ -0,0 +1 @@ +Fix to-device messages being dropped during limited sync when using SQLite. diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 8801b7b2dd83..1392363de15a 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -362,7 +362,10 @@ def get_device_messages_txn(txn: LoggingTransaction): # intended for each device. last_processed_stream_pos = to_stream_id recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {} + rowcount = 0 for row in txn: + rowcount += 1 + last_processed_stream_pos = row[0] recipient_user_id = row[1] recipient_device_id = row[2] @@ -373,7 +376,7 @@ def get_device_messages_txn(txn: LoggingTransaction): (recipient_user_id, recipient_device_id), [] ).append(message_dict) - if limit is not None and txn.rowcount == limit: + if limit is not None and rowcount == limit: # We ended up bumping up against the message limit. There may be more messages # to retrieve. Return what we have, as well as the last stream position that # was processed. diff --git a/tests/rest/client/test_sendtodevice.py b/tests/rest/client/test_sendtodevice.py index 6db7062a8e1f..e2ed14457fc0 100644 --- a/tests/rest/client/test_sendtodevice.py +++ b/tests/rest/client/test_sendtodevice.py @@ -198,3 +198,43 @@ def test_remote_room_key_request(self): "content": {"idx": 3}, }, ) + + def test_limited_sync(self): + """If a limited sync for to-devices happens the next /sync should respond immediately.""" + + self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # Do an initial sync + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + sync_token = channel.json_body["next_batch"] + + # Send 150 to-device messages. We limit to 100 in `/sync` + for i in range(150): + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + f"/_matrix/client/r0/sendToDevice/m.test/1234-{i}", + content={"messages": {user2: {"d2": test_msg}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + channel = self.make_request( + "GET", f"/sync?since={sync_token}&timeout=300000", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + messages = channel.json_body.get("to_device", {}).get("events", []) + self.assertEqual(len(messages), 100) + sync_token = channel.json_body["next_batch"] + + channel = self.make_request( + "GET", f"/sync?since={sync_token}&timeout=300000", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + messages = channel.json_body.get("to_device", {}).get("events", []) + self.assertEqual(len(messages), 50) From 705a439972a00fe38fade21ec3a73815bd2b7d17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Feb 2022 13:49:40 +0000 Subject: [PATCH 228/474] Fix Newsfile \#11966 fixes a bug introduced by #11215, which isn't in a released version. --- changelog.d/11966.bugfix | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/11966.bugfix diff --git a/changelog.d/11966.bugfix b/changelog.d/11966.bugfix deleted file mode 100644 index af8e096667f2..000000000000 --- a/changelog.d/11966.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix to-device messages being dropped during limited sync when using SQLite. From 086d1d6d0bcff986199d4c00aea0c079854da861 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 11 Feb 2022 13:51:20 +0000 Subject: [PATCH 229/474] Fix Newsfile \#11966 fixes a bug introduced by #11215, which isn't in a released version. --- changelog.d/11966.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/11966.feature diff --git a/changelog.d/11966.feature b/changelog.d/11966.feature new file mode 100644 index 000000000000..468020834b3d --- /dev/null +++ b/changelog.d/11966.feature @@ -0,0 +1 @@ +Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. From 0171fa5226a6aa808d9965dab20f22f9794810d9 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 11 Feb 2022 14:58:11 +0100 Subject: [PATCH 230/474] Remove deprecated user_may_create_room_with_invites callback (#11950) Co-authored-by: Patrick Cloke --- changelog.d/11950.removal | 1 + docs/upgrade.md | 29 +++++--- synapse/events/spamcheck.py | 42 ----------- synapse/handlers/room.py | 5 -- synapse/module_api/__init__.py | 5 -- tests/rest/client/test_rooms.py | 119 +------------------------------- 6 files changed, 22 insertions(+), 179 deletions(-) create mode 100644 changelog.d/11950.removal diff --git a/changelog.d/11950.removal b/changelog.d/11950.removal new file mode 100644 index 000000000000..f75de40f2fa5 --- /dev/null +++ b/changelog.d/11950.removal @@ -0,0 +1 @@ +Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. diff --git a/docs/upgrade.md b/docs/upgrade.md index c5e0697333fd..6f2000029575 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -84,7 +84,18 @@ process, for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` -# Upgrading to v1.(next) + +# Upgrading to v1.53.0 + +## Dropping support for `webclient` listeners and non-HTTP(S) `web_client_location` + +Per the deprecation notice in Synapse v1.51.0, listeners of type `webclient` +are no longer supported and configuring them is a now a configuration error. + +Configuring a non-HTTP(S) `web_client_location` configuration is is now a +configuration error. Since the `webclient` listener is no longer supported, this +setting only applies to the root path `/` of Synapse's web server and no longer +the `/_matrix/client/` path. ## Stablisation of MSC3231 @@ -119,17 +130,15 @@ The new `capabilities` are now active by default. -# Upgrading to v1.53.0 - -## Dropping support for `webclient` listeners and non-HTTP(S) `web_client_location` +## Removal of `user_may_create_room_with_invites` -Per the deprecation notice in Synapse v1.51.0, listeners of type `webclient` -are no longer supported and configuring them is a now a configuration error. +As announced with the release of [Synapse 1.47.0](#deprecation-of-the-user_may_create_room_with_invites-module-callback), +the deprecated `user_may_create_room_with_invites` module callback has been removed. -Configuring a non-HTTP(S) `web_client_location` configuration is is now a -configuration error. Since the `webclient` listener is no longer supported, this -setting only applies to the root path `/` of Synapse's web server and no longer -the `/_matrix/client/` path. +Modules relying on it can instead implement [`user_may_invite`](https://matrix-org.github.io/synapse/latest/modules/spam_checker_callbacks.html#user_may_invite) +and use the [`get_room_state`](https://github.com/matrix-org/synapse/blob/872f23b95fa980a61b0866c1475e84491991fa20/synapse/module_api/__init__.py#L869-L876) +module API to infer whether the invite is happening while creating a room (see [this function](https://github.com/matrix-org/synapse-domain-rule-checker/blob/e7d092dd9f2a7f844928771dbfd9fd24c2332e48/synapse_domain_rule_checker/__init__.py#L56-L89) +as an example). Alternately, modules can also implement [`on_create_room`](https://matrix-org.github.io/synapse/latest/modules/third_party_rules_callbacks.html#on_create_room). # Upgrading to v1.52.0 diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 3134beb8d3c6..04afd48274e1 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -48,9 +48,6 @@ USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]] USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK = Callable[ - [str, List[str], List[Dict[str, str]]], Awaitable[bool] -] USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]] @@ -174,9 +171,6 @@ def __init__(self) -> None: USER_MAY_SEND_3PID_INVITE_CALLBACK ] = [] self._user_may_create_room_callbacks: List[USER_MAY_CREATE_ROOM_CALLBACK] = [] - self._user_may_create_room_with_invites_callbacks: List[ - USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK - ] = [] self._user_may_create_room_alias_callbacks: List[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = [] @@ -198,9 +192,6 @@ def register_callbacks( user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, - user_may_create_room_with_invites: Optional[ - USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK - ] = None, user_may_create_room_alias: Optional[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = None, @@ -229,11 +220,6 @@ def register_callbacks( if user_may_create_room is not None: self._user_may_create_room_callbacks.append(user_may_create_room) - if user_may_create_room_with_invites is not None: - self._user_may_create_room_with_invites_callbacks.append( - user_may_create_room_with_invites, - ) - if user_may_create_room_alias is not None: self._user_may_create_room_alias_callbacks.append( user_may_create_room_alias, @@ -359,34 +345,6 @@ async def user_may_create_room(self, userid: str) -> bool: return True - async def user_may_create_room_with_invites( - self, - userid: str, - invites: List[str], - threepid_invites: List[Dict[str, str]], - ) -> bool: - """Checks if a given user may create a room with invites - - If this method returns false, the creation request will be rejected. - - Args: - userid: The ID of the user attempting to create a room - invites: The IDs of the Matrix users to be invited if the room creation is - allowed. - threepid_invites: The threepids to be invited if the room creation is allowed, - as a dict including a "medium" key indicating the threepid's medium (e.g. - "email") and an "address" key indicating the threepid's address (e.g. - "alice@example.com") - - Returns: - True if the user may create the room, otherwise False - """ - for callback in self._user_may_create_room_with_invites_callbacks: - if await callback(userid, invites, threepid_invites) is False: - return False - - return True - async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias ) -> bool: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1420d6772955..a990727fc580 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -694,11 +694,6 @@ async def create_room( if not is_requester_admin and not ( await self.spam_checker.user_may_create_room(user_id) - and await self.spam_checker.user_may_create_room_with_invites( - user_id, - invite_list, - invite_3pid_list, - ) ): raise SynapseError( 403, "You are not permitted to create rooms", Codes.FORBIDDEN diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a91a7fa3ceb0..c6367793081b 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -48,7 +48,6 @@ CHECK_USERNAME_FOR_SPAM_CALLBACK, USER_MAY_CREATE_ROOM_ALIAS_CALLBACK, USER_MAY_CREATE_ROOM_CALLBACK, - USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK, USER_MAY_INVITE_CALLBACK, USER_MAY_JOIN_ROOM_CALLBACK, USER_MAY_PUBLISH_ROOM_CALLBACK, @@ -217,9 +216,6 @@ def register_spam_checker_callbacks( user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, user_may_send_3pid_invite: Optional[USER_MAY_SEND_3PID_INVITE_CALLBACK] = None, user_may_create_room: Optional[USER_MAY_CREATE_ROOM_CALLBACK] = None, - user_may_create_room_with_invites: Optional[ - USER_MAY_CREATE_ROOM_WITH_INVITES_CALLBACK - ] = None, user_may_create_room_alias: Optional[ USER_MAY_CREATE_ROOM_ALIAS_CALLBACK ] = None, @@ -240,7 +236,6 @@ def register_spam_checker_callbacks( user_may_invite=user_may_invite, user_may_send_3pid_invite=user_may_send_3pid_invite, user_may_create_room=user_may_create_room, - user_may_create_room_with_invites=user_may_create_room_with_invites, user_may_create_room_alias=user_may_create_room_alias, user_may_publish_room=user_may_publish_room, check_username_for_spam=check_username_for_spam, diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 10a4a4dc5ecc..b7f086927bcf 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,7 +18,7 @@ """Tests REST events for /rooms paths.""" import json -from typing import Dict, Iterable, List, Optional +from typing import Iterable, List from unittest.mock import Mock, call from urllib import parse as urlparse @@ -35,7 +35,7 @@ from synapse.handlers.pagination import PurgeStatus from synapse.rest import admin from synapse.rest.client import account, directory, login, profile, room, sync -from synapse.types import JsonDict, Requester, RoomAlias, UserID, create_requester +from synapse.types import JsonDict, RoomAlias, UserID, create_requester from synapse.util.stringutils import random_string from tests import unittest @@ -674,121 +674,6 @@ def test_post_room_invitees_ratelimit(self): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) - def test_spamchecker_invites(self): - """Tests the user_may_create_room_with_invites spam checker callback.""" - - # Mock do_3pid_invite, so we don't fail from failing to send a 3PID invite to an - # IS. - async def do_3pid_invite( - room_id: str, - inviter: UserID, - medium: str, - address: str, - id_server: str, - requester: Requester, - txn_id: Optional[str], - id_access_token: Optional[str] = None, - ) -> int: - return 0 - - do_3pid_invite_mock = Mock(side_effect=do_3pid_invite) - self.hs.get_room_member_handler().do_3pid_invite = do_3pid_invite_mock - - # Add a mock callback for user_may_create_room_with_invites. Make it allow any - # room creation request for now. - return_value = True - - async def user_may_create_room_with_invites( - user: str, - invites: List[str], - threepid_invites: List[Dict[str, str]], - ) -> bool: - return return_value - - callback_mock = Mock(side_effect=user_may_create_room_with_invites) - self.hs.get_spam_checker()._user_may_create_room_with_invites_callbacks.append( - callback_mock, - ) - - # The MXIDs we'll try to invite. - invited_mxids = [ - "@alice1:red", - "@alice2:red", - "@alice3:red", - "@alice4:red", - ] - - # The 3PIDs we'll try to invite. - invited_3pids = [ - { - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": "alice1@example.com", - }, - { - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": "alice2@example.com", - }, - { - "id_server": "example.com", - "id_access_token": "sometoken", - "medium": "email", - "address": "alice3@example.com", - }, - ] - - # Create a room and invite the Matrix users, and check that it succeeded. - channel = self.make_request( - "POST", - "/createRoom", - json.dumps({"invite": invited_mxids}).encode("utf8"), - ) - self.assertEqual(200, channel.code) - - # Check that the callback was called with the right arguments. - expected_call_args = ((self.user_id, invited_mxids, []),) - self.assertEquals( - callback_mock.call_args, - expected_call_args, - callback_mock.call_args, - ) - - # Create a room and invite the 3PIDs, and check that it succeeded. - channel = self.make_request( - "POST", - "/createRoom", - json.dumps({"invite_3pid": invited_3pids}).encode("utf8"), - ) - self.assertEqual(200, channel.code) - - # Check that do_3pid_invite was called the right amount of time - self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids)) - - # Check that the callback was called with the right arguments. - expected_call_args = ((self.user_id, [], invited_3pids),) - self.assertEquals( - callback_mock.call_args, - expected_call_args, - callback_mock.call_args, - ) - - # Now deny any room creation. - return_value = False - - # Create a room and invite the 3PIDs, and check that it failed. - channel = self.make_request( - "POST", - "/createRoom", - json.dumps({"invite_3pid": invited_3pids}).encode("utf8"), - ) - self.assertEqual(403, channel.code) - - # Check that do_3pid_invite wasn't called this time. - self.assertEquals(do_3pid_invite_mock.call_count, len(invited_3pids)) - def test_spam_checker_may_join_room(self): """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. From bb98c593a5df890341cddfda528dc9952df728a4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 11 Feb 2022 14:43:34 +0000 Subject: [PATCH 231/474] Prepare for rename of default complement branch (#11971) use `HEAD` rather than hardcoding `master` --- .github/workflows/tests.yml | 6 +++--- changelog.d/11971.misc | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11971.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c395f3e1c2fb..75ac1304bfbc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -345,7 +345,7 @@ jobs: path: synapse # Attempt to check out the same branch of Complement as the PR. If it - # doesn't exist, fallback to master. + # doesn't exist, fallback to HEAD. - name: Checkout complement shell: bash run: | @@ -358,8 +358,8 @@ jobs: # for pull requests, otherwise GITHUB_REF). # 2. Attempt to use the base branch, e.g. when merging into release-vX.Y # (GITHUB_BASE_REF for pull requests). - # 3. Use the default complement branch ("master"). - for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "master"; do + # 3. Use the default complement branch ("HEAD"). + for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do # Skip empty branch names and merge commits. if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then continue diff --git a/changelog.d/11971.misc b/changelog.d/11971.misc new file mode 100644 index 000000000000..4e5bd8a393e1 --- /dev/null +++ b/changelog.d/11971.misc @@ -0,0 +1 @@ +Prepare for rename of default complement branch. From b65acead428653b988351ae8d7b22127a22039cd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Feb 2022 09:50:14 -0500 Subject: [PATCH 232/474] Fetch thread summaries for multiple events in a single query (#11752) This should reduce database usage when fetching bundled aggregations as the number of individual queries (and round trips to the database) are reduced. --- changelog.d/11752.misc | 1 + synapse/storage/databases/main/events.py | 2 +- synapse/storage/databases/main/relations.py | 222 +++++++++++++------- 3 files changed, 151 insertions(+), 74 deletions(-) create mode 100644 changelog.d/11752.misc diff --git a/changelog.d/11752.misc b/changelog.d/11752.misc new file mode 100644 index 000000000000..47e085e4d931 --- /dev/null +++ b/changelog.d/11752.misc @@ -0,0 +1 @@ +Improve performance when fetching bundled aggregations for multiple events. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 2e44c7771591..5246fccad57a 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1812,7 +1812,7 @@ def _handle_event_relations( # potentially error-prone) so it is always invalidated. txn.call_after( self.store.get_thread_participated.invalidate, - (parent_id, event.room_id, event.sender), + (parent_id, event.sender), ) def _handle_insertion_event(self, txn: LoggingTransaction, event: EventBase): diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index ad79cc56104a..e2c27e594b24 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -20,6 +20,7 @@ Iterable, List, Optional, + Set, Tuple, Union, cast, @@ -454,106 +455,175 @@ def _get_applicable_edits_txn(txn: LoggingTransaction) -> Dict[str, str]: } @cached() - async def get_thread_summary( - self, event_id: str, room_id: str - ) -> Tuple[int, Optional[EventBase]]: + def get_thread_summary(self, event_id: str) -> Optional[Tuple[int, EventBase]]: + raise NotImplementedError() + + @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") + async def _get_thread_summaries( + self, event_ids: Collection[str] + ) -> Dict[str, Optional[Tuple[int, EventBase]]]: """Get the number of threaded replies and the latest reply (if any) for the given event. Args: - event_id: Summarize the thread related to this event ID. - room_id: The room the event belongs to. + event_ids: Summarize the thread related to this event ID. Returns: - The number of items in the thread and the most recent response, if any. + A map of the thread summary each event. A missing event implies there + are no threaded replies. + + Each summary includes the number of items in the thread and the most + recent response. """ - def _get_thread_summary_txn( + def _get_thread_summaries_txn( txn: LoggingTransaction, - ) -> Tuple[int, Optional[str]]: - # Fetch the latest event ID in the thread. + ) -> Tuple[Dict[str, int], Dict[str, str]]: + # Fetch the count of threaded events and the latest event ID. # TODO Should this only allow m.room.message events. - sql = """ - SELECT event_id - FROM event_relations - INNER JOIN events USING (event_id) - WHERE - relates_to_id = ? - AND room_id = ? - AND relation_type = ? - ORDER BY topological_ordering DESC, stream_ordering DESC - LIMIT 1 - """ + if isinstance(self.database_engine, PostgresEngine): + # The `DISTINCT ON` clause will pick the *first* row it encounters, + # so ordering by topologica ordering + stream ordering desc will + # ensure we get the latest event in the thread. + sql = """ + SELECT DISTINCT ON (parent.event_id) parent.event_id, child.event_id FROM events AS child + INNER JOIN event_relations USING (event_id) + INNER JOIN events AS parent ON + parent.event_id = relates_to_id + AND parent.room_id = child.room_id + WHERE + %s + AND relation_type = ? + ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC + """ + else: + # SQLite uses a simplified query which returns all entries for a + # thread. The first result for each thread is chosen to and subsequent + # results for a thread are ignored. + sql = """ + SELECT parent.event_id, child.event_id FROM events AS child + INNER JOIN event_relations USING (event_id) + INNER JOIN events AS parent ON + parent.event_id = relates_to_id + AND parent.room_id = child.room_id + WHERE + %s + AND relation_type = ? + ORDER BY child.topological_ordering DESC, child.stream_ordering DESC + """ + + clause, args = make_in_list_sql_clause( + txn.database_engine, "relates_to_id", event_ids + ) + args.append(RelationTypes.THREAD) - txn.execute(sql, (event_id, room_id, RelationTypes.THREAD)) - row = txn.fetchone() - if row is None: - return 0, None + txn.execute(sql % (clause,), args) + latest_event_ids = {} + for parent_event_id, child_event_id in txn: + # Only consider the latest threaded reply (by topological ordering). + if parent_event_id not in latest_event_ids: + latest_event_ids[parent_event_id] = child_event_id - latest_event_id = row[0] + # If no threads were found, bail. + if not latest_event_ids: + return {}, latest_event_ids # Fetch the number of threaded replies. sql = """ - SELECT COUNT(event_id) - FROM event_relations - INNER JOIN events USING (event_id) + SELECT parent.event_id, COUNT(child.event_id) FROM events AS child + INNER JOIN event_relations USING (event_id) + INNER JOIN events AS parent ON + parent.event_id = relates_to_id + AND parent.room_id = child.room_id WHERE - relates_to_id = ? - AND room_id = ? + %s AND relation_type = ? + GROUP BY parent.event_id """ - txn.execute(sql, (event_id, room_id, RelationTypes.THREAD)) - count = cast(Tuple[int], txn.fetchone())[0] - return count, latest_event_id + # Regenerate the arguments since only threads found above could + # possibly have any replies. + clause, args = make_in_list_sql_clause( + txn.database_engine, "relates_to_id", latest_event_ids.keys() + ) + args.append(RelationTypes.THREAD) + + txn.execute(sql % (clause,), args) + counts = dict(cast(List[Tuple[str, int]], txn.fetchall())) - count, latest_event_id = await self.db_pool.runInteraction( - "get_thread_summary", _get_thread_summary_txn + return counts, latest_event_ids + + counts, latest_event_ids = await self.db_pool.runInteraction( + "get_thread_summaries", _get_thread_summaries_txn ) - latest_event = None - if latest_event_id: - latest_event = await self.get_event(latest_event_id, allow_none=True) # type: ignore[attr-defined] + latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined] + + # Map to the event IDs to the thread summary. + # + # There might not be a summary due to there not being a thread or + # due to the latest event not being known, either case is treated the same. + summaries = {} + for parent_event_id, latest_event_id in latest_event_ids.items(): + latest_event = latest_events.get(latest_event_id) + + summary = None + if latest_event: + summary = (counts[parent_event_id], latest_event) + summaries[parent_event_id] = summary - return count, latest_event + return summaries @cached() - async def get_thread_participated( - self, event_id: str, room_id: str, user_id: str - ) -> bool: - """Get whether the requesting user participated in a thread. + def get_thread_participated(self, event_id: str, user_id: str) -> bool: + raise NotImplementedError() - This is separate from get_thread_summary since that can be cached across - all users while this value is specific to the requeser. + @cachedList(cached_method_name="get_thread_participated", list_name="event_ids") + async def _get_threads_participated( + self, event_ids: Collection[str], user_id: str + ) -> Dict[str, bool]: + """Get whether the requesting user participated in the given threads. + + This is separate from get_thread_summaries since that can be cached across + all users while this value is specific to the requester. Args: - event_id: The thread related to this event ID. - room_id: The room the event belongs to. + event_ids: The thread related to these event IDs. user_id: The user requesting the summary. Returns: - True if the requesting user participated in the thread, otherwise false. + A map of event ID to a boolean which represents if the requesting + user participated in that event's thread, otherwise false. """ - def _get_thread_summary_txn(txn: LoggingTransaction) -> bool: + def _get_thread_summary_txn(txn: LoggingTransaction) -> Set[str]: # Fetch whether the requester has participated or not. sql = """ - SELECT 1 - FROM event_relations - INNER JOIN events USING (event_id) + SELECT DISTINCT relates_to_id + FROM events AS child + INNER JOIN event_relations USING (event_id) + INNER JOIN events AS parent ON + parent.event_id = relates_to_id + AND parent.room_id = child.room_id WHERE - relates_to_id = ? - AND room_id = ? + %s AND relation_type = ? - AND sender = ? + AND child.sender = ? """ - txn.execute(sql, (event_id, room_id, RelationTypes.THREAD, user_id)) - return bool(txn.fetchone()) + clause, args = make_in_list_sql_clause( + txn.database_engine, "relates_to_id", event_ids + ) + args.extend((RelationTypes.THREAD, user_id)) - return await self.db_pool.runInteraction( + txn.execute(sql % (clause,), args) + return {row[0] for row in txn.fetchall()} + + participated_threads = await self.db_pool.runInteraction( "get_thread_summary", _get_thread_summary_txn ) + return {event_id: event_id in participated_threads for event_id in event_ids} + async def events_have_relations( self, parent_ids: List[str], @@ -700,21 +770,6 @@ async def _get_bundled_aggregation_for_event( if references.chunk: aggregations.references = await references.to_dict(cast("DataStore", self)) - # If this event is the start of a thread, include a summary of the replies. - if self._msc3440_enabled: - thread_count, latest_thread_event = await self.get_thread_summary( - event_id, room_id - ) - participated = await self.get_thread_participated( - event_id, room_id, user_id - ) - if latest_thread_event: - aggregations.thread = _ThreadAggregation( - latest_event=latest_thread_event, - count=thread_count, - current_user_participated=participated, - ) - # Store the bundled aggregations in the event metadata for later use. return aggregations @@ -763,6 +818,27 @@ async def get_bundled_aggregations( for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit + # Fetch thread summaries. + if self._msc3440_enabled: + summaries = await self._get_thread_summaries(seen_event_ids) + # Only fetch participated for a limited selection based on what had + # summaries. + participated = await self._get_threads_participated( + summaries.keys(), user_id + ) + for event_id, summary in summaries.items(): + if summary: + thread_count, latest_thread_event = summary + results.setdefault( + event_id, BundledAggregations() + ).thread = _ThreadAggregation( + latest_event=latest_thread_event, + count=thread_count, + # If there's a thread summary it must also exist in the + # participated dictionary. + current_user_participated=participated[event_id], + ) + return results From 4d7e74b2e503373da66ec929afd1aa7010676878 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Feb 2022 11:20:27 -0500 Subject: [PATCH 233/474] Support the MSC3715 for `/relations`. (#11941) This adds an unstable org.matrix.msc3715.dir parameter which acts like dir on /mesages. --- changelog.d/11941.feature | 1 + synapse/rest/client/relations.py | 4 ++++ tests/rest/client/test_relations.py | 37 ++++++++++++++++++++++++----- 3 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 changelog.d/11941.feature diff --git a/changelog.d/11941.feature b/changelog.d/11941.feature new file mode 100644 index 000000000000..2b5b11cb9f89 --- /dev/null +++ b/changelog.d/11941.feature @@ -0,0 +1 @@ +Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 9ec425888a21..2cab83c4e6e7 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -111,6 +111,9 @@ async def on_GET( raise SynapseError(404, "Unknown parent event.") limit = parse_integer(request, "limit", default=5) + direction = parse_string( + request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] + ) from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") @@ -128,6 +131,7 @@ async def on_GET( relation_type=relation_type, event_type=event_type, limit=limit, + direction=direction, from_token=from_token, to_token=to_token, ) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 9768fb29711e..de80aca03705 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -169,24 +169,28 @@ def test_basic_paginate_relations(self): """Tests that calling pagination API correctly the latest relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEquals(200, channel.code, channel.json_body) + first_annotation_id = channel.json_body["event_id"] channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") self.assertEquals(200, channel.code, channel.json_body) - annotation_id = channel.json_body["event_id"] + second_annotation_id = channel.json_body["event_id"] channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the full - # relation event we sent above. + # We expect to get back a single pagination result, which is the latest + # full relation event we sent above. self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) self.assert_dict( - {"event_id": annotation_id, "sender": self.user_id, "type": "m.reaction"}, + { + "event_id": second_annotation_id, + "sender": self.user_id, + "type": "m.reaction", + }, channel.json_body["chunk"][0], ) @@ -201,6 +205,27 @@ def test_basic_paginate_relations(self): channel.json_body.get("next_batch"), str, channel.json_body ) + # Request the relations again, but with a different direction. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + + # We expect to get back a single pagination result, which is the earliest + # full relation event we sent above. + self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + { + "event_id": first_annotation_id, + "sender": self.user_id, + "type": "m.reaction", + }, + channel.json_body["chunk"][0], + ) + def _stream_token_to_relation_token(self, token: str) -> str: """Convert a StreamToken into a legacy token (RelationPaginationToken).""" room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key From b2b971f28a19c7fb31df79db29060ae72ba06e6b Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 11 Feb 2022 11:05:20 -0800 Subject: [PATCH 234/474] Enable cache time-based expiry by default (#11849) --- changelog.d/11849.misc | 1 + docs/sample_config.yaml | 13 ++++++--- docs/upgrade.md | 7 +++++ synapse/config/background_updates.py | 0 synapse/config/cache.py | 40 +++++++++++++++++++++++----- 5 files changed, 50 insertions(+), 11 deletions(-) create mode 100644 changelog.d/11849.misc create mode 100644 synapse/config/background_updates.py diff --git a/changelog.d/11849.misc b/changelog.d/11849.misc new file mode 100644 index 000000000000..9561eab1927c --- /dev/null +++ b/changelog.d/11849.misc @@ -0,0 +1 @@ +Enable cache time-based expiry by default. The `expiry_time` config flag will be superseded by `expire_caches` and `cache_entry_ttl`. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 946cd281d265..d2bb3d42080a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -751,11 +751,16 @@ caches: per_cache_factors: #get_users_who_share_room_with_user: 2.0 - # Controls how long an entry can be in a cache without having been - # accessed before being evicted. Defaults to None, which means - # entries are never evicted based on time. + # Controls whether cache entries are evicted after a specified time + # period. Defaults to true. Uncomment to disable this feature. # - #expiry_time: 30m + #expire_caches: false + + # If expire_caches is enabled, this flag controls how long an entry can + # be in a cache without having been accessed before being evicted. + # Defaults to 30m. Uncomment to set a different time to live for cache entries. + # + #cache_entry_ttl: 30m # Controls how long the results of a /sync request are cached for after # a successful response is returned. A higher duration can help clients with diff --git a/docs/upgrade.md b/docs/upgrade.md index 6f2000029575..25a86c08e603 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -111,6 +111,13 @@ to: Please update any relevant reverse proxy or firewall configurations appropriately. +## Time-based cache expiry is now enabled by default + +Formerly, entries in the cache were not evicted regardless of whether they were accessed after storing. +This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed. +To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and +`cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config. + ## Deprecation of `capability` `org.matrix.msc3283.*` The `capabilities` of MSC3283 from the REST API `/_matrix/client/r0/capabilities` diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/synapse/config/cache.py b/synapse/config/cache.py index d9d85f98e155..387ac6d115e3 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import os import re import threading @@ -23,6 +24,8 @@ from ._base import Config, ConfigError +logger = logging.getLogger(__name__) + # The prefix for all cache factor-related environment variables _CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR" @@ -148,11 +151,16 @@ def generate_config_section(self, **kwargs) -> str: per_cache_factors: #get_users_who_share_room_with_user: 2.0 - # Controls how long an entry can be in a cache without having been - # accessed before being evicted. Defaults to None, which means - # entries are never evicted based on time. + # Controls whether cache entries are evicted after a specified time + # period. Defaults to true. Uncomment to disable this feature. + # + #expire_caches: false + + # If expire_caches is enabled, this flag controls how long an entry can + # be in a cache without having been accessed before being evicted. + # Defaults to 30m. Uncomment to set a different time to live for cache entries. # - #expiry_time: 30m + #cache_entry_ttl: 30m # Controls how long the results of a /sync request are cached for after # a successful response is returned. A higher duration can help clients with @@ -217,12 +225,30 @@ def read_config(self, config, **kwargs) -> None: e.message # noqa: B306, DependencyException.message is a property ) - expiry_time = cache_config.get("expiry_time") - if expiry_time: - self.expiry_time_msec: Optional[int] = self.parse_duration(expiry_time) + expire_caches = cache_config.get("expire_caches", True) + cache_entry_ttl = cache_config.get("cache_entry_ttl", "30m") + + if expire_caches: + self.expiry_time_msec: Optional[int] = self.parse_duration(cache_entry_ttl) else: self.expiry_time_msec = None + # Backwards compatibility support for the now-removed "expiry_time" config flag. + expiry_time = cache_config.get("expiry_time") + + if expiry_time and expire_caches: + logger.warning( + "You have set two incompatible options, expiry_time and expire_caches. Please only use the " + "expire_caches and cache_entry_ttl options and delete the expiry_time option as it is " + "deprecated." + ) + if expiry_time: + logger.warning( + "Expiry_time is a deprecated option, please use the expire_caches and cache_entry_ttl options " + "instead." + ) + self.expiry_time_msec = self.parse_duration(expiry_time) + self.sync_response_cache_duration = self.parse_duration( cache_config.get("sync_response_cache_duration", 0) ) From 63c46349c41aa967e64a5a4042ef5177f934be47 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Sat, 12 Feb 2022 10:44:16 +0000 Subject: [PATCH 235/474] Implement MSC3706: partial state in `/send_join` response (#11967) * Make `get_auth_chain_ids` return a Set It has a set internally, and a set is often useful where it gets used, so let's avoid converting to an intermediate list. * Minor refactors in `on_send_join_request` A little bit of non-functional groundwork * Implement MSC3706: partial state in /send_join response --- changelog.d/11967.feature | 1 + synapse/config/experimental.py | 3 + synapse/federation/federation_server.py | 91 +++++++++-- .../federation/transport/server/federation.py | 20 ++- .../databases/main/event_federation.py | 12 +- tests/federation/test_federation_server.py | 148 ++++++++++++++++++ tests/storage/test_event_federation.py | 8 +- 7 files changed, 262 insertions(+), 21 deletions(-) create mode 100644 changelog.d/11967.feature diff --git a/changelog.d/11967.feature b/changelog.d/11967.feature new file mode 100644 index 000000000000..d09320a2906c --- /dev/null +++ b/changelog.d/11967.feature @@ -0,0 +1 @@ +Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index f05a803a7104..09d692d9a1ed 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -61,3 +61,6 @@ def read_config(self, config: JsonDict, **kwargs): self.msc2409_to_device_messages_enabled: bool = experimental.get( "msc2409_to_device_messages_enabled", False ) + + # MSC3706 (server-side support for partial state in /send_join responses) + self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index af9cb98f67b1..482bbdd86744 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -20,6 +20,7 @@ Any, Awaitable, Callable, + Collection, Dict, Iterable, List, @@ -64,7 +65,7 @@ ReplicationGetQueryRestServlet, ) from synapse.storage.databases.main.lock import Lock -from synapse.types import JsonDict, get_domain_from_id +from synapse.types import JsonDict, StateMap, get_domain_from_id from synapse.util import json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache @@ -571,7 +572,7 @@ async def _on_state_ids_request_compute( ) -> JsonDict: state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = await self.store.get_auth_chain_ids(room_id, state_ids) - return {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} + return {"pdu_ids": state_ids, "auth_chain_ids": list(auth_chain_ids)} async def _on_context_state_request_compute( self, room_id: str, event_id: Optional[str] @@ -645,27 +646,61 @@ async def on_invite_request( return {"event": ret_pdu.get_pdu_json(time_now)} async def on_send_join_request( - self, origin: str, content: JsonDict, room_id: str + self, + origin: str, + content: JsonDict, + room_id: str, + caller_supports_partial_state: bool = False, ) -> Dict[str, Any]: event, context = await self._on_send_membership_event( origin, content, Membership.JOIN, room_id ) prev_state_ids = await context.get_prev_state_ids() - state_ids = list(prev_state_ids.values()) - auth_chain = await self.store.get_auth_chain(room_id, state_ids) - state = await self.store.get_events(state_ids) + state_event_ids: Collection[str] + servers_in_room: Optional[Collection[str]] + if caller_supports_partial_state: + state_event_ids = _get_event_ids_for_partial_state_join( + event, prev_state_ids + ) + servers_in_room = await self.state.get_hosts_in_room_at_events( + room_id, event_ids=event.prev_event_ids() + ) + else: + state_event_ids = prev_state_ids.values() + servers_in_room = None + + auth_chain_event_ids = await self.store.get_auth_chain_ids( + room_id, state_event_ids + ) + + # if the caller has opted in, we can omit any auth_chain events which are + # already in state_event_ids + if caller_supports_partial_state: + auth_chain_event_ids.difference_update(state_event_ids) + + auth_chain_events = await self.store.get_events_as_list(auth_chain_event_ids) + state_events = await self.store.get_events_as_list(state_event_ids) + + # we try to do all the async stuff before this point, so that time_now is as + # accurate as possible. time_now = self._clock.time_msec() - event_json = event.get_pdu_json() - return { + event_json = event.get_pdu_json(time_now) + resp = { # TODO Remove the unstable prefix when servers have updated. "org.matrix.msc3083.v2.event": event_json, "event": event_json, - "state": [p.get_pdu_json(time_now) for p in state.values()], - "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain], + "state": [p.get_pdu_json(time_now) for p in state_events], + "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events], + "org.matrix.msc3706.partial_state": caller_supports_partial_state, } + if servers_in_room is not None: + resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room) + + return resp + async def on_make_leave_request( self, origin: str, room_id: str, user_id: str ) -> Dict[str, Any]: @@ -1339,3 +1374,39 @@ async def on_query(self, query_type: str, args: dict) -> JsonDict: # error. logger.warning("No handler registered for query type %s", query_type) raise NotFoundError("No handler for Query type '%s'" % (query_type,)) + + +def _get_event_ids_for_partial_state_join( + join_event: EventBase, + prev_state_ids: StateMap[str], +) -> Collection[str]: + """Calculate state to be retuned in a partial_state send_join + + Args: + join_event: the join event being send_joined + prev_state_ids: the event ids of the state before the join + + Returns: + the event ids to be returned + """ + + # return all non-member events + state_event_ids = { + event_id + for (event_type, state_key), event_id in prev_state_ids.items() + if event_type != EventTypes.Member + } + + # we also need the current state of the current user (it's going to + # be an auth event for the new join, so we may as well return it) + current_membership_event_id = prev_state_ids.get( + (EventTypes.Member, join_event.state_key) + ) + if current_membership_event_id is not None: + state_event_ids.add(current_membership_event_id) + + # TODO: return a few more members: + # - those with invites + # - those that are kicked? / banned + + return state_event_ids diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index d86dfede4e89..310733097da5 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -412,6 +412,16 @@ class FederationV2SendJoinServlet(BaseFederationServerServlet): PREFIX = FEDERATION_V2_PREFIX + def __init__( + self, + hs: "HomeServer", + authenticator: Authenticator, + ratelimiter: FederationRateLimiter, + server_name: str, + ): + super().__init__(hs, authenticator, ratelimiter, server_name) + self._msc3706_enabled = hs.config.experimental.msc3706_enabled + async def on_PUT( self, origin: str, @@ -422,7 +432,15 @@ async def on_PUT( ) -> Tuple[int, JsonDict]: # TODO(paul): assert that event_id parsed from path actually # match those given in content - result = await self.handler.on_send_join_request(origin, content, room_id) + + partial_state = False + if self._msc3706_enabled: + partial_state = parse_boolean_from_args( + query, "org.matrix.msc3706.partial_state", default=False + ) + result = await self.handler.on_send_join_request( + origin, content, room_id, caller_supports_partial_state=partial_state + ) return 200, result diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 22f64741277a..277e6422ebd6 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -121,7 +121,7 @@ async def get_auth_chain_ids( room_id: str, event_ids: Collection[str], include_given: bool = False, - ) -> List[str]: + ) -> Set[str]: """Get auth events for given event_ids. The events *must* be state events. Args: @@ -130,7 +130,7 @@ async def get_auth_chain_ids( include_given: include the given events in result Returns: - list of event_ids + set of event_ids """ # Check if we have indexed the room so we can use the chain cover @@ -159,7 +159,7 @@ async def get_auth_chain_ids( def _get_auth_chain_ids_using_cover_index_txn( self, txn: Cursor, room_id: str, event_ids: Collection[str], include_given: bool - ) -> List[str]: + ) -> Set[str]: """Calculates the auth chain IDs using the chain index.""" # First we look up the chain ID/sequence numbers for the given events. @@ -272,11 +272,11 @@ def _get_auth_chain_ids_using_cover_index_txn( txn.execute(sql, (chain_id, max_no)) results.update(r for r, in txn) - return list(results) + return results def _get_auth_chain_ids_txn( self, txn: LoggingTransaction, event_ids: Collection[str], include_given: bool - ) -> List[str]: + ) -> Set[str]: """Calculates the auth chain IDs. This is used when we don't have a cover index for the room. @@ -331,7 +331,7 @@ def _get_auth_chain_ids_txn( front = new_front results.update(front) - return list(results) + return results async def get_auth_chain_difference( self, room_id: str, state_sets: List[Set[str]] diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 1af284bd2fb8..d084919ef7d7 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -16,12 +16,21 @@ from parameterized import parameterized +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.config.server import DEFAULT_ROOM_VERSION +from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import make_event_from_dict from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest +from tests.unittest import override_config class FederationServerTests(unittest.FederatingHomeserverTestCase): @@ -152,6 +161,145 @@ def test_needs_to_be_in_room(self): self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") +class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + super().prepare(reactor, clock, hs) + + # create the room + creator_user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + self._room_id = self.helper.create_room_as( + room_creator=creator_user_id, tok=tok + ) + + # a second member on the orgin HS + second_member_user_id = self.register_user("fozzie", "bear") + tok2 = self.login("fozzie", "bear") + self.helper.join(self._room_id, second_member_user_id, tok=tok2) + + def _make_join(self, user_id) -> JsonDict: + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/make_join/{self._room_id}/{user_id}" + f"?ver={DEFAULT_ROOM_VERSION}", + ) + self.assertEquals(channel.code, 200, channel.json_body) + return channel.json_body + + def test_send_join(self): + """happy-path test of send_join""" + joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME + join_result = self._make_join(joining_user) + + join_event_dict = join_result["event"] + add_hashes_and_signatures( + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + join_event_dict, + signature_name=self.OTHER_SERVER_NAME, + signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + ) + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/send_join/{self._room_id}/x", + content=join_event_dict, + ) + self.assertEquals(channel.code, 200, channel.json_body) + + # we should get complete room state back + returned_state = [ + (ev["type"], ev["state_key"]) for ev in channel.json_body["state"] + ] + self.assertCountEqual( + returned_state, + [ + ("m.room.create", ""), + ("m.room.power_levels", ""), + ("m.room.join_rules", ""), + ("m.room.history_visibility", ""), + ("m.room.member", "@kermit:test"), + ("m.room.member", "@fozzie:test"), + # nb: *not* the joining user + ], + ) + + # also check the auth chain + returned_auth_chain_events = [ + (ev["type"], ev["state_key"]) for ev in channel.json_body["auth_chain"] + ] + self.assertCountEqual( + returned_auth_chain_events, + [ + ("m.room.create", ""), + ("m.room.member", "@kermit:test"), + ("m.room.power_levels", ""), + ("m.room.join_rules", ""), + ], + ) + + # the room should show that the new user is a member + r = self.get_success( + self.hs.get_state_handler().get_current_state(self._room_id) + ) + self.assertEqual(r[("m.room.member", joining_user)].membership, "join") + + @override_config({"experimental_features": {"msc3706_enabled": True}}) + def test_send_join_partial_state(self): + """When MSC3706 support is enabled, /send_join should return partial state""" + joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME + join_result = self._make_join(joining_user) + + join_event_dict = join_result["event"] + add_hashes_and_signatures( + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + join_event_dict, + signature_name=self.OTHER_SERVER_NAME, + signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + ) + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", + content=join_event_dict, + ) + self.assertEquals(channel.code, 200, channel.json_body) + + # expect a reduced room state + returned_state = [ + (ev["type"], ev["state_key"]) for ev in channel.json_body["state"] + ] + self.assertCountEqual( + returned_state, + [ + ("m.room.create", ""), + ("m.room.power_levels", ""), + ("m.room.join_rules", ""), + ("m.room.history_visibility", ""), + ], + ) + + # the auth chain should not include anything already in "state" + returned_auth_chain_events = [ + (ev["type"], ev["state_key"]) for ev in channel.json_body["auth_chain"] + ] + self.assertCountEqual( + returned_auth_chain_events, + [ + ("m.room.member", "@kermit:test"), + ], + ) + + # the room should show that the new user is a member + r = self.get_success( + self.hs.get_state_handler().get_current_state(self._room_id) + ) + self.assertEqual(r[("m.room.member", joining_user)].membership, "join") + + def _create_acl_event(content): return make_event_from_dict( { diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 2bc89512f8ae..667ca90a4d3e 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -260,16 +260,16 @@ def test_auth_chain_ids(self, use_chain_cover_index: bool): self.assertCountEqual(auth_chain_ids, ["h", "i", "j", "k"]) auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["h"])) - self.assertEqual(auth_chain_ids, ["k"]) + self.assertEqual(auth_chain_ids, {"k"}) auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["i"])) - self.assertEqual(auth_chain_ids, ["j"]) + self.assertEqual(auth_chain_ids, {"j"}) # j and k have no parents. auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["j"])) - self.assertEqual(auth_chain_ids, []) + self.assertEqual(auth_chain_ids, set()) auth_chain_ids = self.get_success(self.store.get_auth_chain_ids(room_id, ["k"])) - self.assertEqual(auth_chain_ids, []) + self.assertEqual(auth_chain_ids, set()) # More complex input sequences. auth_chain_ids = self.get_success( From 55113dd5e880815b3d7881f72147f25f37b00045 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Sat, 12 Feb 2022 14:33:49 +0000 Subject: [PATCH 236/474] Notify users, rather than rooms, of device list updates (#11905) Co-authored-by: Patrick Cloke --- changelog.d/11905.misc | 1 + synapse/handlers/device.py | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11905.misc diff --git a/changelog.d/11905.misc b/changelog.d/11905.misc new file mode 100644 index 000000000000..4f170cf01aa0 --- /dev/null +++ b/changelog.d/11905.misc @@ -0,0 +1 @@ +Preparation to support sending device list updates to application services. \ No newline at end of file diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b184a48cb16c..36c05f8363b9 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -495,13 +495,11 @@ async def notify_device_update( "Notifying about update %r/%r, ID: %r", user_id, device_id, position ) - room_ids = await self.store.get_rooms_for_user(user_id) - # specify the user ID too since the user should always get their own device list # updates, even if they aren't in any rooms. - self.notifier.on_new_event( - "device_list_key", position, users=[user_id], rooms=room_ids - ) + users_to_notify = users_who_share_room.union({user_id}) + + self.notifier.on_new_event("device_list_key", position, users=users_to_notify) if hosts: logger.info( From 4ae956c8bb3d0f9a352934238b4d2a9c48307efb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 14 Feb 2022 13:12:22 +0000 Subject: [PATCH 237/474] Use version string helper from matrix-common (#11979) * Require latest matrix-common * Use the common function --- changelog.d/11979.misc | 1 + scripts/synapse_port_db | 7 +- scripts/update_synapse_database | 7 +- synapse/app/_base.py | 5 +- synapse/app/admin_cmd.py | 5 +- synapse/app/generic_worker.py | 5 +- synapse/app/homeserver.py | 5 +- synapse/config/logger.py | 9 +- .../federation/transport/server/federation.py | 10 ++- synapse/metrics/__init__.py | 7 +- synapse/python_dependencies.py | 2 +- synapse/rest/admin/__init__.py | 6 +- synapse/util/versionstring.py | 85 ------------------- 13 files changed, 42 insertions(+), 112 deletions(-) create mode 100644 changelog.d/11979.misc delete mode 100644 synapse/util/versionstring.py diff --git a/changelog.d/11979.misc b/changelog.d/11979.misc new file mode 100644 index 000000000000..6edf3e029bd3 --- /dev/null +++ b/changelog.d/11979.misc @@ -0,0 +1 @@ +Fetch Synapse's version using a helper from `matrix-common`. \ No newline at end of file diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 70ee4e5c7f8e..db354b3c8c5c 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -24,10 +24,10 @@ import traceback from typing import Dict, Iterable, Optional, Set import yaml +from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, reactor -import synapse from synapse.config.database import DatabaseConnectionConfig from synapse.config.homeserver import HomeServerConfig from synapse.logging.context import ( @@ -67,7 +67,6 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.util import Clock -from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse_port_db") @@ -222,7 +221,9 @@ class MockHomeserver: self.clock = Clock(reactor) self.config = config self.hostname = config.server.server_name - self.version_string = "Synapse/" + get_version_string(synapse) + self.version_string = "Synapse/" + get_distribution_version_string( + "matrix-synapse" + ) def get_clock(self): return self.clock diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database index 6c088bad9366..5c6453d77ffb 100755 --- a/scripts/update_synapse_database +++ b/scripts/update_synapse_database @@ -18,15 +18,14 @@ import logging import sys import yaml +from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, reactor -import synapse from synapse.config.homeserver import HomeServerConfig from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.util.versionstring import get_version_string logger = logging.getLogger("update_database") @@ -39,7 +38,9 @@ class MockHomeserver(HomeServer): config.server.server_name, reactor=reactor, config=config, **kwargs ) - self.version_string = "Synapse/" + get_version_string(synapse) + self.version_string = "Synapse/" + get_distribution_version_string( + "matrix-synapse" + ) def run_background_updates(hs): diff --git a/synapse/app/_base.py b/synapse/app/_base.py index bbab8a052a93..452c0c09d526 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -37,6 +37,7 @@ ) from cryptography.utils import CryptographyDeprecationWarning +from matrix_common.versionstring import get_distribution_version_string import twisted from twisted.internet import defer, error, reactor as _reactor @@ -67,7 +68,6 @@ from synapse.util.daemonize import daemonize_process from synapse.util.gai_resolver import GAIResolver from synapse.util.rlimit import change_resource_limit -from synapse.util.versionstring import get_version_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -487,7 +487,8 @@ def setup_sentry(hs: "HomeServer") -> None: import sentry_sdk sentry_sdk.init( - dsn=hs.config.metrics.sentry_dsn, release=get_version_string(synapse) + dsn=hs.config.metrics.sentry_dsn, + release=get_distribution_version_string("matrix-synapse"), ) # We set some default tags that give some context to this instance diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 42238f7f280b..6f8e33a15662 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -19,6 +19,8 @@ import tempfile from typing import List, Optional +from matrix_common.versionstring import get_distribution_version_string + from twisted.internet import defer, task import synapse @@ -44,7 +46,6 @@ from synapse.storage.databases.main.room import RoomWorkerStore from synapse.types import StateMap from synapse.util.logcontext import LoggingContext -from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.admin_cmd") @@ -223,7 +224,7 @@ def start(config_options: List[str]) -> None: ss = AdminCmdServer( config.server.server_name, config=config, - version_string="Synapse/" + get_version_string(synapse), + version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index e256de200355..aadc882bf898 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -16,6 +16,8 @@ import sys from typing import Dict, List, Optional, Tuple +from matrix_common.versionstring import get_distribution_version_string + from twisted.internet import address from twisted.web.resource import Resource @@ -122,7 +124,6 @@ from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import JsonDict from synapse.util.httpresourcetree import create_resource_tree -from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.generic_worker") @@ -482,7 +483,7 @@ def start(config_options: List[str]) -> None: hs = GenericWorkerServer( config.server.server_name, config=config, - version_string="Synapse/" + get_version_string(synapse), + version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), ) setup_logging(hs, config, use_worker_options=True) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 66e1a213319c..bfb30003c2d3 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -18,6 +18,8 @@ import sys from typing import Dict, Iterable, Iterator, List +from matrix_common.versionstring import get_distribution_version_string + from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource from twisted.web.server import GzipEncoderFactory @@ -70,7 +72,6 @@ from synapse.storage import DataStore from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module -from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.homeserver") @@ -350,7 +351,7 @@ def setup(config_options: List[str]) -> SynapseHomeServer: hs = SynapseHomeServer( config.server.server_name, config=config, - version_string="Synapse/" + get_version_string(synapse), + version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index ea69b9bd9b50..b7145a44ae1d 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -22,6 +22,7 @@ from typing import TYPE_CHECKING, Any, Dict, Optional import yaml +from matrix_common.versionstring import get_distribution_version_string from zope.interface import implementer from twisted.logger import ( @@ -32,11 +33,9 @@ globalLogBeginner, ) -import synapse from synapse.logging._structured import setup_structured_logging from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter -from synapse.util.versionstring import get_version_string from ._base import Config, ConfigError @@ -347,6 +346,10 @@ def setup_logging( # Log immediately so we can grep backwards. logging.warning("***** STARTING SERVER *****") - logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse)) + logging.warning( + "Server %s version %s", + sys.argv[0], + get_distribution_version_string("matrix-synapse"), + ) logging.info("Server hostname: %s", config.server.server_name) logging.info("Instance name: %s", hs.get_instance_name()) diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 310733097da5..e85a8eda5b87 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -24,9 +24,9 @@ Union, ) +from matrix_common.versionstring import get_distribution_version_string from typing_extensions import Literal -import synapse from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersions from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX @@ -42,7 +42,6 @@ ) from synapse.types import JsonDict from synapse.util.ratelimitutils import FederationRateLimiter -from synapse.util.versionstring import get_version_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -616,7 +615,12 @@ async def on_GET( ) -> Tuple[int, JsonDict]: return ( 200, - {"server": {"name": "Synapse", "version": get_version_string(synapse)}}, + { + "server": { + "name": "Synapse", + "version": get_distribution_version_string("matrix-synapse"), + } + }, ) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index cca084c18c21..d321946aa23d 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -34,6 +34,7 @@ ) import attr +from matrix_common.versionstring import get_distribution_version_string from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric from prometheus_client.core import ( REGISTRY, @@ -43,14 +44,14 @@ from twisted.python.threadpool import ThreadPool -import synapse.metrics._reactor_metrics +# This module is imported for its side effects; flake8 needn't warn that it's unused. +import synapse.metrics._reactor_metrics # noqa: F401 from synapse.metrics._exposition import ( MetricsResource, generate_latest, start_http_server, ) from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager -from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) @@ -417,7 +418,7 @@ def collect(self) -> Iterable[Metric]: ) build_info.labels( " ".join([platform.python_implementation(), platform.python_version()]), - get_version_string(synapse), + get_distribution_version_string("matrix-synapse"), " ".join([platform.system(), platform.release()]), ).set(1) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 22b4606ae0ea..4f99096df395 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -88,7 +88,7 @@ # with the latest security patches. "cryptography>=3.4.7", "ijson>=3.1", - "matrix-common==1.0.0", + "matrix-common~=1.1.0", ] CONDITIONAL_REQUIREMENTS = { diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 9be9e33c8eff..ba0d989d81c3 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -20,7 +20,8 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple -import synapse +from matrix_common.versionstring import get_distribution_version_string + from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -88,7 +89,6 @@ WhoisRestServlet, ) from synapse.types import JsonDict, RoomStreamToken -from synapse.util.versionstring import get_version_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -101,7 +101,7 @@ class VersionServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.res = { - "server_version": get_version_string(synapse), + "server_version": get_distribution_version_string("matrix-synapse"), "python_version": platform.python_version(), } diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py deleted file mode 100644 index c144ff62c1fa..000000000000 --- a/synapse/util/versionstring.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import subprocess -from types import ModuleType -from typing import Dict - -logger = logging.getLogger(__name__) - -version_cache: Dict[ModuleType, str] = {} - - -def get_version_string(module: ModuleType) -> str: - """Given a module calculate a git-aware version string for it. - - If called on a module not in a git checkout will return `__version__`. - - Args: - module: The module to check the version of. Must declare a __version__ - attribute. - - Returns: - The module version (as a string). - """ - - cached_version = version_cache.get(module) - if cached_version is not None: - return cached_version - - # We want this to fail loudly with an AttributeError. Type-ignore this so - # mypy only considers the happy path. - version_string = module.__version__ # type: ignore[attr-defined] - - try: - cwd = os.path.dirname(os.path.abspath(module.__file__)) - - def _run_git_command(prefix: str, *params: str) -> str: - try: - result = ( - subprocess.check_output( - ["git", *params], stderr=subprocess.DEVNULL, cwd=cwd - ) - .strip() - .decode("ascii") - ) - return prefix + result - except (subprocess.CalledProcessError, FileNotFoundError): - return "" - - git_branch = _run_git_command("b=", "rev-parse", "--abbrev-ref", "HEAD") - git_tag = _run_git_command("t=", "describe", "--exact-match") - git_commit = _run_git_command("", "rev-parse", "--short", "HEAD") - - dirty_string = "-this_is_a_dirty_checkout" - is_dirty = _run_git_command("", "describe", "--dirty=" + dirty_string).endswith( - dirty_string - ) - git_dirty = "dirty" if is_dirty else "" - - if git_branch or git_tag or git_commit or git_dirty: - git_version = ",".join( - s for s in (git_branch, git_tag, git_commit, git_dirty) if s - ) - - version_string = f"{version_string} ({git_version})" - except Exception as e: - logger.info("Failed to check for git repository: %s", e) - - version_cache[module] = version_string - - return version_string From 738e569ed22d662f62f82f54347b8faa87e1658b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 14 Feb 2022 09:18:44 -0500 Subject: [PATCH 238/474] Require that modules register their callbacks using keyword arguments. (#11975) To allow for easier backwards/forwards compatibility by making it easier to add/remove callbacks. --- changelog.d/11975.bugfix | 1 + synapse/module_api/__init__.py | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/11975.bugfix diff --git a/changelog.d/11975.bugfix b/changelog.d/11975.bugfix new file mode 100644 index 000000000000..076cb2b1e152 --- /dev/null +++ b/changelog.d/11975.bugfix @@ -0,0 +1 @@ +Require that modules register their callbacks using keyword arguments. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index c6367793081b..d4fca369231a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -211,6 +211,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: def register_spam_checker_callbacks( self, + *, check_event_for_spam: Optional[CHECK_EVENT_FOR_SPAM_CALLBACK] = None, user_may_join_room: Optional[USER_MAY_JOIN_ROOM_CALLBACK] = None, user_may_invite: Optional[USER_MAY_INVITE_CALLBACK] = None, @@ -245,6 +246,7 @@ def register_spam_checker_callbacks( def register_account_validity_callbacks( self, + *, is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, @@ -265,6 +267,7 @@ def register_account_validity_callbacks( def register_third_party_rules_callbacks( self, + *, check_event_allowed: Optional[CHECK_EVENT_ALLOWED_CALLBACK] = None, on_create_room: Optional[ON_CREATE_ROOM_CALLBACK] = None, check_threepid_can_be_invited: Optional[ @@ -289,6 +292,7 @@ def register_third_party_rules_callbacks( def register_presence_router_callbacks( self, + *, get_users_for_states: Optional[GET_USERS_FOR_STATES_CALLBACK] = None, get_interested_users: Optional[GET_INTERESTED_USERS_CALLBACK] = None, ) -> None: @@ -303,6 +307,7 @@ def register_presence_router_callbacks( def register_password_auth_provider_callbacks( self, + *, check_3pid_auth: Optional[CHECK_3PID_AUTH_CALLBACK] = None, on_logged_out: Optional[ON_LOGGED_OUT_CALLBACK] = None, auth_checkers: Optional[ @@ -327,6 +332,7 @@ def register_password_auth_provider_callbacks( def register_background_update_controller_callbacks( self, + *, on_update: ON_UPDATE_CALLBACK, default_batch_size: Optional[DEFAULT_BATCH_SIZE_CALLBACK] = None, min_batch_size: Optional[MIN_BATCH_SIZE_CALLBACK] = None, From 9c4563c5cd97dbcfe3eb43e05b30ca3c830d6937 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Mon, 14 Feb 2022 10:21:00 -0800 Subject: [PATCH 239/474] remove empty file, reword/rename newsfragment, and add line to upgrade doc --- changelog.d/11849.feature | 1 + changelog.d/11849.misc | 1 - docs/upgrade.md | 1 + synapse/config/background_updates.py | 0 4 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11849.feature delete mode 100644 changelog.d/11849.misc delete mode 100644 synapse/config/background_updates.py diff --git a/changelog.d/11849.feature b/changelog.d/11849.feature new file mode 100644 index 000000000000..6c6b57a774c0 --- /dev/null +++ b/changelog.d/11849.feature @@ -0,0 +1 @@ +Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. diff --git a/changelog.d/11849.misc b/changelog.d/11849.misc deleted file mode 100644 index 9561eab1927c..000000000000 --- a/changelog.d/11849.misc +++ /dev/null @@ -1 +0,0 @@ -Enable cache time-based expiry by default. The `expiry_time` config flag will be superseded by `expire_caches` and `cache_entry_ttl`. diff --git a/docs/upgrade.md b/docs/upgrade.md index 25a86c08e603..a5da0076c656 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,7 @@ Formerly, entries in the cache were not evicted regardless of whether they were This behavior has now changed. By default entries in the cache are now evicted after 30m of not being accessed. To change the default behavior, go to the `caches` section of the config and change the `expire_caches` and `cache_entry_ttl` flags as necessary. Please note that these flags replace the `expiry_time` flag in the config. +The `expiry_time` flag will still continue to work, but it has been deprecated and will be removed in the future. ## Deprecation of `capability` `org.matrix.msc3283.*` diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 7812fe9eddd83c956d4deb5526c2f48b47f9ee82 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Feb 2022 19:07:59 +0000 Subject: [PATCH 240/474] Note when unstable MSC3283 prefixes will be removed (#11989) --- changelog.d/11989.feature | 1 + docs/upgrade.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11989.feature diff --git a/changelog.d/11989.feature b/changelog.d/11989.feature new file mode 100644 index 000000000000..5975281a168e --- /dev/null +++ b/changelog.d/11989.feature @@ -0,0 +1 @@ +Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index a5da0076c656..477d7d0e81c8 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -129,7 +129,7 @@ The old `capabilities` - `org.matrix.msc3283.set_avatar_url` and - `org.matrix.msc3283.3pid_changes` -are deprecated and scheduled to be removed in Synapse v1.(next+1).0. +are deprecated and scheduled to be removed in Synapse v1.54.0. The new `capabilities` - `m.set_displayname`, From 54e74f8bde4ff02d6cb9acb3de11a0186af96d43 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Mon, 14 Feb 2022 19:28:00 +0000 Subject: [PATCH 241/474] Fix M_WRONG_ROOM_KEYS_VERSION error not including `current_version` field (#11988) Signed-off-by: Michael Telatynski <7t3chguy@gmail.com> --- changelog.d/11988.bugfix | 1 + synapse/api/errors.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/11988.bugfix diff --git a/changelog.d/11988.bugfix b/changelog.d/11988.bugfix new file mode 100644 index 000000000000..ced10d0c8156 --- /dev/null +++ b/changelog.d/11988.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 85302163dad9..e92db29f6dc6 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -406,6 +406,9 @@ def __init__(self, current_version: str): super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION) self.current_version = current_version + def error_dict(self) -> "JsonDict": + return cs_error(self.msg, self.errcode, current_version=self.current_version) + class UnsupportedRoomVersionError(SynapseError): """The client's request to create a room used a room version that the server does From c4c98c75188015b7ee921ee0adc2c428fbb9b6b4 Mon Sep 17 00:00:00 2001 From: Pascal Bach Date: Mon, 14 Feb 2022 22:18:01 +0100 Subject: [PATCH 242/474] Revert "Pin to frozendict<2.1.2 (#11625)" and allow frozendict>2.1.2 (#11969) This reverts commit 2bf31f7807c7a0c229170803c97090d612dc16f9. Signed-off-by: Pascal Bach --- changelog.d/11969.misc | 1 + synapse/python_dependencies.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11969.misc diff --git a/changelog.d/11969.misc b/changelog.d/11969.misc new file mode 100644 index 000000000000..60a12d4032ec --- /dev/null +++ b/changelog.d/11969.misc @@ -0,0 +1 @@ +Unpin frozendict but exclude the known bad version 2.1.2. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 4f99096df395..86162e0f2c65 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -51,7 +51,7 @@ # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0 "jsonschema>=3.0.0", # frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41 - "frozendict>=1,<2.1.2", + "frozendict>=1,!=2.1.2", "unpaddedbase64>=1.1.0", "canonicaljson>=1.4.0", # we use the type definitions added in signedjson 1.1. From 7c055990412d99a211d36ea108e17bf9d295ff67 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 15 Feb 2022 10:44:24 +0000 Subject: [PATCH 243/474] 1.53.0rc1 --- CHANGES.md | 79 +++++++++++++++++++++++++++++++++++++++ changelog.d/11114.bugfix | 1 - changelog.d/11215.feature | 1 - changelog.d/11615.misc | 1 - changelog.d/11616.misc | 1 - changelog.d/11655.feature | 1 - changelog.d/11660.misc | 1 - changelog.d/11752.misc | 1 - changelog.d/11832.misc | 1 - changelog.d/11837.feature | 1 - changelog.d/11849.feature | 1 - changelog.d/11854.feature | 1 - changelog.d/11856.misc | 1 - changelog.d/11859.doc | 1 - changelog.d/11862.doc | 1 - changelog.d/11867.feature | 1 - changelog.d/11868.feature | 1 - changelog.d/11869.misc | 1 - changelog.d/11870.misc | 1 - changelog.d/11876.misc | 1 - changelog.d/11878.misc | 1 - changelog.d/11880.misc | 1 - changelog.d/11884.misc | 1 - changelog.d/11888.misc | 1 - changelog.d/11890.bugfix | 1 - changelog.d/11892.feature | 1 - changelog.d/11895.removal | 1 - changelog.d/11905.misc | 1 - changelog.d/11907.doc | 1 - changelog.d/11909.misc | 1 - changelog.d/11910.misc | 1 - changelog.d/11911.misc | 1 - changelog.d/11912.misc | 1 - changelog.d/11913.misc | 1 - changelog.d/11914.misc | 1 - changelog.d/11927.misc | 1 - changelog.d/11930.bugfix | 1 - changelog.d/11933.feature | 1 - changelog.d/11936.bugfix | 1 - changelog.d/11938.misc | 1 - changelog.d/11939.misc | 1 - changelog.d/11941.feature | 1 - changelog.d/11946.doc | 1 - changelog.d/11950.removal | 1 - changelog.d/11952.bugfix | 1 - changelog.d/11953.misc | 1 - changelog.d/11954.doc | 1 - changelog.d/11955.doc | 1 - changelog.d/11961.removal | 1 - changelog.d/11965.misc | 1 - changelog.d/11966.feature | 1 - changelog.d/11967.feature | 1 - changelog.d/11969.misc | 1 - changelog.d/11971.misc | 1 - changelog.d/11975.bugfix | 1 - changelog.d/11979.misc | 1 - changelog.d/11988.bugfix | 1 - changelog.d/11989.feature | 1 - debian/changelog | 6 +++ synapse/__init__.py | 2 +- 60 files changed, 86 insertions(+), 58 deletions(-) delete mode 100644 changelog.d/11114.bugfix delete mode 100644 changelog.d/11215.feature delete mode 100644 changelog.d/11615.misc delete mode 100644 changelog.d/11616.misc delete mode 100644 changelog.d/11655.feature delete mode 100644 changelog.d/11660.misc delete mode 100644 changelog.d/11752.misc delete mode 100644 changelog.d/11832.misc delete mode 100644 changelog.d/11837.feature delete mode 100644 changelog.d/11849.feature delete mode 100644 changelog.d/11854.feature delete mode 100644 changelog.d/11856.misc delete mode 100644 changelog.d/11859.doc delete mode 100644 changelog.d/11862.doc delete mode 100644 changelog.d/11867.feature delete mode 100644 changelog.d/11868.feature delete mode 100644 changelog.d/11869.misc delete mode 100644 changelog.d/11870.misc delete mode 100644 changelog.d/11876.misc delete mode 100644 changelog.d/11878.misc delete mode 100644 changelog.d/11880.misc delete mode 100644 changelog.d/11884.misc delete mode 100644 changelog.d/11888.misc delete mode 100644 changelog.d/11890.bugfix delete mode 100644 changelog.d/11892.feature delete mode 100644 changelog.d/11895.removal delete mode 100644 changelog.d/11905.misc delete mode 100644 changelog.d/11907.doc delete mode 100644 changelog.d/11909.misc delete mode 100644 changelog.d/11910.misc delete mode 100644 changelog.d/11911.misc delete mode 100644 changelog.d/11912.misc delete mode 100644 changelog.d/11913.misc delete mode 100644 changelog.d/11914.misc delete mode 100644 changelog.d/11927.misc delete mode 100644 changelog.d/11930.bugfix delete mode 100644 changelog.d/11933.feature delete mode 100644 changelog.d/11936.bugfix delete mode 100644 changelog.d/11938.misc delete mode 100644 changelog.d/11939.misc delete mode 100644 changelog.d/11941.feature delete mode 100644 changelog.d/11946.doc delete mode 100644 changelog.d/11950.removal delete mode 100644 changelog.d/11952.bugfix delete mode 100644 changelog.d/11953.misc delete mode 100644 changelog.d/11954.doc delete mode 100644 changelog.d/11955.doc delete mode 100644 changelog.d/11961.removal delete mode 100644 changelog.d/11965.misc delete mode 100644 changelog.d/11966.feature delete mode 100644 changelog.d/11967.feature delete mode 100644 changelog.d/11969.misc delete mode 100644 changelog.d/11971.misc delete mode 100644 changelog.d/11975.bugfix delete mode 100644 changelog.d/11979.misc delete mode 100644 changelog.d/11988.bugfix delete mode 100644 changelog.d/11989.feature diff --git a/CHANGES.md b/CHANGES.md index 958024ff0c84..0398aa3853d5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,82 @@ +Synapse 1.53.0rc1 (2022-02-15) +============================== + +Features +-------- + +- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966)) +- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11655](https://github.com/matrix-org/synapse/issues/11655)) +- Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837)) +- Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849)) +- Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854)) +- Stabilize support for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients should switch to the stable identifier and endpoint. ([\#11867](https://github.com/matrix-org/synapse/issues/11867)) +- Allow modules to retrieve the current instance's server name and worker name. ([\#11868](https://github.com/matrix-org/synapse/issues/11868)) +- Use a dedicated configurable rate limiter for 3PID invites. ([\#11892](https://github.com/matrix-org/synapse/issues/11892)) +- Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. ([\#11933](https://github.com/matrix-org/synapse/issues/11933), [\#11989](https://github.com/matrix-org/synapse/issues/11989)) +- Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#11941](https://github.com/matrix-org/synapse/issues/11941)) +- Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. ([\#11967](https://github.com/matrix-org/synapse/issues/11967)) + + +Bugfixes +-------- + +- Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. ([\#11114](https://github.com/matrix-org/synapse/issues/11114)) +- Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890)) +- Fix a long-standing bug where some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11930](https://github.com/matrix-org/synapse/issues/11930)) +- Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. ([\#11936](https://github.com/matrix-org/synapse/issues/11936)) +- Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. ([\#11952](https://github.com/matrix-org/synapse/issues/11952)) +- Require that modules register their callbacks using keyword arguments. ([\#11975](https://github.com/matrix-org/synapse/issues/11975)) +- Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. ([\#11988](https://github.com/matrix-org/synapse/issues/11988)) + + +Improved Documentation +---------------------- + +- Fix typo in User Admin API: unpind -> unbind. ([\#11859](https://github.com/matrix-org/synapse/issues/11859)) +- Document images returned by the User List Media Admin API can include those generated by URL previews. ([\#11862](https://github.com/matrix-org/synapse/issues/11862)) +- Remove outdated MSC1711 FAQ document. ([\#11907](https://github.com/matrix-org/synapse/issues/11907)) +- Correct the structured logging configuration example. Contributed by Brad Jones. ([\#11946](https://github.com/matrix-org/synapse/issues/11946)) +- Add information on the Synapse release cycle. ([\#11954](https://github.com/matrix-org/synapse/issues/11954)) +- Fix broken link in the README to the admin API for password reset. ([\#11955](https://github.com/matrix-org/synapse/issues/11955)) + + +Deprecations and Removals +------------------------- + +- Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. ([\#11895](https://github.com/matrix-org/synapse/issues/11895)) +- Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. ([\#11950](https://github.com/matrix-org/synapse/issues/11950)) +- No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. ([\#11961](https://github.com/matrix-org/synapse/issues/11961)) + + +Internal Changes +---------------- + +- Enhance user registration test helpers to make them more useful for tests involving Application Services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616)) +- Improve performance when fetching bundled aggregations for multiple events. ([\#11660](https://github.com/matrix-org/synapse/issues/11660), [\#11752](https://github.com/matrix-org/synapse/issues/11752)) +- Fix type errors introduced by new annotations in the Prometheus Client library. ([\#11832](https://github.com/matrix-org/synapse/issues/11832)) +- Add missing type hints to replication code. ([\#11856](https://github.com/matrix-org/synapse/issues/11856), [\#11938](https://github.com/matrix-org/synapse/issues/11938)) +- Ensure that `opentracing` scopes are activated and closed at the right time. ([\#11869](https://github.com/matrix-org/synapse/issues/11869)) +- Improve opentracing for incoming federation requests. ([\#11870](https://github.com/matrix-org/synapse/issues/11870)) +- Improve internal docstrings in `synapse.util.caches`. ([\#11876](https://github.com/matrix-org/synapse/issues/11876)) +- Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. ([\#11878](https://github.com/matrix-org/synapse/issues/11878)) +- Convert `ApplicationServiceTestCase` to use `simple_async_mock`. ([\#11880](https://github.com/matrix-org/synapse/issues/11880)) +- Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. ([\#11884](https://github.com/matrix-org/synapse/issues/11884)) +- Disable coverage calculation for olddeps build. ([\#11888](https://github.com/matrix-org/synapse/issues/11888)) +- Preparation to support sending device list updates to application services. ([\#11905](https://github.com/matrix-org/synapse/issues/11905)) +- Add a test that checks users receive their own device list updates down `/sync`. ([\#11909](https://github.com/matrix-org/synapse/issues/11909)) +- Run Complement tests sequentially. ([\#11910](https://github.com/matrix-org/synapse/issues/11910)) +- Various refactors to the application service notifier code. ([\#11911](https://github.com/matrix-org/synapse/issues/11911), [\#11912](https://github.com/matrix-org/synapse/issues/11912)) +- Tests: replace mocked `Authenticator` with the real thing. ([\#11913](https://github.com/matrix-org/synapse/issues/11913)) +- Various refactors to the typing notifications code. ([\#11914](https://github.com/matrix-org/synapse/issues/11914)) +- Use the proper type for the Content-Length header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927)) +- Remove an unnecessary ignoring of type hints due to fixes in upstream packages. ([\#11939](https://github.com/matrix-org/synapse/issues/11939)) +- Add missing type hints. ([\#11953](https://github.com/matrix-org/synapse/issues/11953)) +- Fix an import cycle in `synapse.event_auth`. ([\#11965](https://github.com/matrix-org/synapse/issues/11965)) +- Unpin frozendict but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969)) +- Prepare for rename of default complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971)) +- Fetch Synapse's version using a helper from `matrix-common`. ([\#11979](https://github.com/matrix-org/synapse/issues/11979)) + + Synapse 1.52.0 (2022-02-08) =========================== diff --git a/changelog.d/11114.bugfix b/changelog.d/11114.bugfix deleted file mode 100644 index c6e65df97f90..000000000000 --- a/changelog.d/11114.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. diff --git a/changelog.d/11215.feature b/changelog.d/11215.feature deleted file mode 100644 index 468020834b3d..000000000000 --- a/changelog.d/11215.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. diff --git a/changelog.d/11615.misc b/changelog.d/11615.misc deleted file mode 100644 index bbc551698d45..000000000000 --- a/changelog.d/11615.misc +++ /dev/null @@ -1 +0,0 @@ -Enhance user registration test helpers to make them more useful for tests involving Application Services and devices. diff --git a/changelog.d/11616.misc b/changelog.d/11616.misc deleted file mode 100644 index bbc551698d45..000000000000 --- a/changelog.d/11616.misc +++ /dev/null @@ -1 +0,0 @@ -Enhance user registration test helpers to make them more useful for tests involving Application Services and devices. diff --git a/changelog.d/11655.feature b/changelog.d/11655.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11655.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11660.misc b/changelog.d/11660.misc deleted file mode 100644 index 47e085e4d931..000000000000 --- a/changelog.d/11660.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance when fetching bundled aggregations for multiple events. diff --git a/changelog.d/11752.misc b/changelog.d/11752.misc deleted file mode 100644 index 47e085e4d931..000000000000 --- a/changelog.d/11752.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance when fetching bundled aggregations for multiple events. diff --git a/changelog.d/11832.misc b/changelog.d/11832.misc deleted file mode 100644 index 5ff117d93326..000000000000 --- a/changelog.d/11832.misc +++ /dev/null @@ -1 +0,0 @@ -Fix type errors introduced by new annotations in the Prometheus Client library. \ No newline at end of file diff --git a/changelog.d/11837.feature b/changelog.d/11837.feature deleted file mode 100644 index 62ef707123db..000000000000 --- a/changelog.d/11837.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. diff --git a/changelog.d/11849.feature b/changelog.d/11849.feature deleted file mode 100644 index 6c6b57a774c0..000000000000 --- a/changelog.d/11849.feature +++ /dev/null @@ -1 +0,0 @@ -Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. diff --git a/changelog.d/11854.feature b/changelog.d/11854.feature deleted file mode 100644 index 975e95bc52e4..000000000000 --- a/changelog.d/11854.feature +++ /dev/null @@ -1 +0,0 @@ -Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. diff --git a/changelog.d/11856.misc b/changelog.d/11856.misc deleted file mode 100644 index 1d3a0030f77f..000000000000 --- a/changelog.d/11856.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to replication code. diff --git a/changelog.d/11859.doc b/changelog.d/11859.doc deleted file mode 100644 index d903c8ddafa9..000000000000 --- a/changelog.d/11859.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in User Admin API: unpind -> unbind. diff --git a/changelog.d/11862.doc b/changelog.d/11862.doc deleted file mode 100644 index 98e32e7325bd..000000000000 --- a/changelog.d/11862.doc +++ /dev/null @@ -1 +0,0 @@ -Document images returned by the User List Media Admin API can include those generated by URL previews. diff --git a/changelog.d/11867.feature b/changelog.d/11867.feature deleted file mode 100644 index 601705e0004e..000000000000 --- a/changelog.d/11867.feature +++ /dev/null @@ -1 +0,0 @@ -Stabilize support for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients should switch to the stable identifier and endpoint. diff --git a/changelog.d/11868.feature b/changelog.d/11868.feature deleted file mode 100644 index 3723dac4ea10..000000000000 --- a/changelog.d/11868.feature +++ /dev/null @@ -1 +0,0 @@ -Allow modules to retrieve the current instance's server name and worker name. diff --git a/changelog.d/11869.misc b/changelog.d/11869.misc deleted file mode 100644 index 054fbf610140..000000000000 --- a/changelog.d/11869.misc +++ /dev/null @@ -1 +0,0 @@ -Ensure that `opentracing` scopes are activated and closed at the right time. diff --git a/changelog.d/11870.misc b/changelog.d/11870.misc deleted file mode 100644 index 2cb0efdb456c..000000000000 --- a/changelog.d/11870.misc +++ /dev/null @@ -1 +0,0 @@ -Improve opentracing for incoming federation requests. diff --git a/changelog.d/11876.misc b/changelog.d/11876.misc deleted file mode 100644 index 09f2d0b67fd5..000000000000 --- a/changelog.d/11876.misc +++ /dev/null @@ -1 +0,0 @@ -Improve internal docstrings in `synapse.util.caches`. diff --git a/changelog.d/11878.misc b/changelog.d/11878.misc deleted file mode 100644 index 74915a47dd25..000000000000 --- a/changelog.d/11878.misc +++ /dev/null @@ -1 +0,0 @@ -Do not needlessly clear the `get_users_in_room` and `get_users_in_room_with_profiles` caches when any room state changes. diff --git a/changelog.d/11880.misc b/changelog.d/11880.misc deleted file mode 100644 index 8125947b2a17..000000000000 --- a/changelog.d/11880.misc +++ /dev/null @@ -1 +0,0 @@ -Convert `ApplicationServiceTestCase` to use `simple_async_mock`. \ No newline at end of file diff --git a/changelog.d/11884.misc b/changelog.d/11884.misc deleted file mode 100644 index d679d6038fe9..000000000000 --- a/changelog.d/11884.misc +++ /dev/null @@ -1 +0,0 @@ -Remove experimental changes to the default push rules which were introduced in Synapse 1.19.0 but never enabled. diff --git a/changelog.d/11888.misc b/changelog.d/11888.misc deleted file mode 100644 index db1c9b8bbd0b..000000000000 --- a/changelog.d/11888.misc +++ /dev/null @@ -1 +0,0 @@ -Disable coverage calculation for olddeps build. diff --git a/changelog.d/11890.bugfix b/changelog.d/11890.bugfix deleted file mode 100644 index 6b696692e332..000000000000 --- a/changelog.d/11890.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. \ No newline at end of file diff --git a/changelog.d/11892.feature b/changelog.d/11892.feature deleted file mode 100644 index 86e21a7f8454..000000000000 --- a/changelog.d/11892.feature +++ /dev/null @@ -1 +0,0 @@ -Use a dedicated configurable rate limiter for 3PID invites. diff --git a/changelog.d/11895.removal b/changelog.d/11895.removal deleted file mode 100644 index 5973d96a3340..000000000000 --- a/changelog.d/11895.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for `webclient` listeners and configuring `web_client_location` to a non-HTTP(S) URL. Deprecated configurations are a configuration error. diff --git a/changelog.d/11905.misc b/changelog.d/11905.misc deleted file mode 100644 index 4f170cf01aa0..000000000000 --- a/changelog.d/11905.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation to support sending device list updates to application services. \ No newline at end of file diff --git a/changelog.d/11907.doc b/changelog.d/11907.doc deleted file mode 100644 index 345cb900548d..000000000000 --- a/changelog.d/11907.doc +++ /dev/null @@ -1 +0,0 @@ -Remove outdated MSC1711 FAQ document. diff --git a/changelog.d/11909.misc b/changelog.d/11909.misc deleted file mode 100644 index ffd3e5c6397c..000000000000 --- a/changelog.d/11909.misc +++ /dev/null @@ -1 +0,0 @@ -Add a test that checks users receive their own device list updates down `/sync`. \ No newline at end of file diff --git a/changelog.d/11910.misc b/changelog.d/11910.misc deleted file mode 100644 index d05130969f5b..000000000000 --- a/changelog.d/11910.misc +++ /dev/null @@ -1 +0,0 @@ -Run Complement tests sequentially. diff --git a/changelog.d/11911.misc b/changelog.d/11911.misc deleted file mode 100644 index 805588c2e963..000000000000 --- a/changelog.d/11911.misc +++ /dev/null @@ -1 +0,0 @@ -Various refactors to the application service notifier code. \ No newline at end of file diff --git a/changelog.d/11912.misc b/changelog.d/11912.misc deleted file mode 100644 index 805588c2e963..000000000000 --- a/changelog.d/11912.misc +++ /dev/null @@ -1 +0,0 @@ -Various refactors to the application service notifier code. \ No newline at end of file diff --git a/changelog.d/11913.misc b/changelog.d/11913.misc deleted file mode 100644 index cb705603640a..000000000000 --- a/changelog.d/11913.misc +++ /dev/null @@ -1 +0,0 @@ -Tests: replace mocked `Authenticator` with the real thing. diff --git a/changelog.d/11914.misc b/changelog.d/11914.misc deleted file mode 100644 index c288d43455f8..000000000000 --- a/changelog.d/11914.misc +++ /dev/null @@ -1 +0,0 @@ -Various refactors to the typing notifications code. \ No newline at end of file diff --git a/changelog.d/11927.misc b/changelog.d/11927.misc deleted file mode 100644 index 22c58521c92c..000000000000 --- a/changelog.d/11927.misc +++ /dev/null @@ -1 +0,0 @@ -Use the proper type for the Content-Length header in the `UploadResource`. diff --git a/changelog.d/11930.bugfix b/changelog.d/11930.bugfix deleted file mode 100644 index e0dfbf1a1520..000000000000 --- a/changelog.d/11930.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug that some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. diff --git a/changelog.d/11933.feature b/changelog.d/11933.feature deleted file mode 100644 index 2b1b0d1786a6..000000000000 --- a/changelog.d/11933.feature +++ /dev/null @@ -1 +0,0 @@ -Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. diff --git a/changelog.d/11936.bugfix b/changelog.d/11936.bugfix deleted file mode 100644 index bc149f280106..000000000000 --- a/changelog.d/11936.bugfix +++ /dev/null @@ -1 +0,0 @@ -Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. diff --git a/changelog.d/11938.misc b/changelog.d/11938.misc deleted file mode 100644 index 1d3a0030f77f..000000000000 --- a/changelog.d/11938.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to replication code. diff --git a/changelog.d/11939.misc b/changelog.d/11939.misc deleted file mode 100644 index 317526f9efb4..000000000000 --- a/changelog.d/11939.misc +++ /dev/null @@ -1 +0,0 @@ -Remove an unnecessary ignoring of type hints due to fixes in upstream packages. diff --git a/changelog.d/11941.feature b/changelog.d/11941.feature deleted file mode 100644 index 2b5b11cb9f89..000000000000 --- a/changelog.d/11941.feature +++ /dev/null @@ -1 +0,0 @@ -Support the `dir` parameter on the `/relations` endpoint, per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). diff --git a/changelog.d/11946.doc b/changelog.d/11946.doc deleted file mode 100644 index eedf035a3bf3..000000000000 --- a/changelog.d/11946.doc +++ /dev/null @@ -1 +0,0 @@ -Correct the structured logging configuration example. Contributed by Brad Jones. diff --git a/changelog.d/11950.removal b/changelog.d/11950.removal deleted file mode 100644 index f75de40f2fa5..000000000000 --- a/changelog.d/11950.removal +++ /dev/null @@ -1 +0,0 @@ -Remove deprecated `user_may_create_room_with_invites` spam checker callback. See the [upgrade notes](https://matrix-org.github.io/synapse/latest/upgrade.html#removal-of-user_may_create_room_with_invites) for more information. diff --git a/changelog.d/11952.bugfix b/changelog.d/11952.bugfix deleted file mode 100644 index e38a08f559d5..000000000000 --- a/changelog.d/11952.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. diff --git a/changelog.d/11953.misc b/changelog.d/11953.misc deleted file mode 100644 index d44571b73149..000000000000 --- a/changelog.d/11953.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. diff --git a/changelog.d/11954.doc b/changelog.d/11954.doc deleted file mode 100644 index 6e7b3908909f..000000000000 --- a/changelog.d/11954.doc +++ /dev/null @@ -1 +0,0 @@ -Add information on the Synapse release cycle. diff --git a/changelog.d/11955.doc b/changelog.d/11955.doc deleted file mode 100644 index 3d93115f596e..000000000000 --- a/changelog.d/11955.doc +++ /dev/null @@ -1 +0,0 @@ -Fix broken link in the README to the admin API for password reset. diff --git a/changelog.d/11961.removal b/changelog.d/11961.removal deleted file mode 100644 index 67b86ac7c177..000000000000 --- a/changelog.d/11961.removal +++ /dev/null @@ -1 +0,0 @@ -No longer build `.deb` packages for Ubuntu 21.04 Hirsute Hippo, which has now EOLed. \ No newline at end of file diff --git a/changelog.d/11965.misc b/changelog.d/11965.misc deleted file mode 100644 index e0265e103ff2..000000000000 --- a/changelog.d/11965.misc +++ /dev/null @@ -1 +0,0 @@ -Fix an import cycle in `synapse.event_auth`. diff --git a/changelog.d/11966.feature b/changelog.d/11966.feature deleted file mode 100644 index 468020834b3d..000000000000 --- a/changelog.d/11966.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. diff --git a/changelog.d/11967.feature b/changelog.d/11967.feature deleted file mode 100644 index d09320a2906c..000000000000 --- a/changelog.d/11967.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental implementation of [MSC3706](https://github.com/matrix-org/matrix-doc/pull/3706): extensions to `/send_join` to support reduced response size. diff --git a/changelog.d/11969.misc b/changelog.d/11969.misc deleted file mode 100644 index 60a12d4032ec..000000000000 --- a/changelog.d/11969.misc +++ /dev/null @@ -1 +0,0 @@ -Unpin frozendict but exclude the known bad version 2.1.2. diff --git a/changelog.d/11971.misc b/changelog.d/11971.misc deleted file mode 100644 index 4e5bd8a393e1..000000000000 --- a/changelog.d/11971.misc +++ /dev/null @@ -1 +0,0 @@ -Prepare for rename of default complement branch. diff --git a/changelog.d/11975.bugfix b/changelog.d/11975.bugfix deleted file mode 100644 index 076cb2b1e152..000000000000 --- a/changelog.d/11975.bugfix +++ /dev/null @@ -1 +0,0 @@ -Require that modules register their callbacks using keyword arguments. diff --git a/changelog.d/11979.misc b/changelog.d/11979.misc deleted file mode 100644 index 6edf3e029bd3..000000000000 --- a/changelog.d/11979.misc +++ /dev/null @@ -1 +0,0 @@ -Fetch Synapse's version using a helper from `matrix-common`. \ No newline at end of file diff --git a/changelog.d/11988.bugfix b/changelog.d/11988.bugfix deleted file mode 100644 index ced10d0c8156..000000000000 --- a/changelog.d/11988.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where `M_WRONG_ROOM_KEYS_VERSION` errors would not include the specced `current_version` field. diff --git a/changelog.d/11989.feature b/changelog.d/11989.feature deleted file mode 100644 index 5975281a168e..000000000000 --- a/changelog.d/11989.feature +++ /dev/null @@ -1 +0,0 @@ -Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 64ea103f3e8f..fe79d7ed57b9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.53.0~rc1) stable; urgency=medium + + * New synapse release 1.53.0~rc1. + + -- Synapse Packaging team Tue, 15 Feb 2022 10:40:50 +0000 + matrix-synapse-py3 (1.52.0) stable; urgency=medium * New synapse release 1.52.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index a23563937ad4..2bf8eb2a11e4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.52.0" +__version__ = "1.53.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From f66997f2917c04632c8b29fc7a693c233ba91f45 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 15 Feb 2022 11:12:42 +0000 Subject: [PATCH 244/474] Update CHANGES.md --- CHANGES.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 0398aa3853d5..4210d09d2d48 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,12 +4,12 @@ Synapse 1.53.0rc1 (2022-02-15) Features -------- -- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966)) +- Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966)) - Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11655](https://github.com/matrix-org/synapse/issues/11655)) - Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837)) - Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849)) - Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854)) -- Stabilize support for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients should switch to the stable identifier and endpoint. ([\#11867](https://github.com/matrix-org/synapse/issues/11867)) +- Stabilize support and remove unstable endpoints for [MSC3231](https://github.com/matrix-org/matrix-doc/pull/3231). Clients must switch to the stable identifier and endpoint. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#stablisation-of-msc3231) for more information. ([\#11867](https://github.com/matrix-org/synapse/issues/11867)) - Allow modules to retrieve the current instance's server name and worker name. ([\#11868](https://github.com/matrix-org/synapse/issues/11868)) - Use a dedicated configurable rate limiter for 3PID invites. ([\#11892](https://github.com/matrix-org/synapse/issues/11892)) - Support the stable API endpoint for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283): new settings in `/capabilities` endpoint. ([\#11933](https://github.com/matrix-org/synapse/issues/11933), [\#11989](https://github.com/matrix-org/synapse/issues/11989)) @@ -21,7 +21,7 @@ Bugfixes -------- - Fix [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) historical messages backfilling in random order on remote homeservers. ([\#11114](https://github.com/matrix-org/synapse/issues/11114)) -- Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890)) +- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#11890](https://github.com/matrix-org/synapse/issues/11890)) - Fix a long-standing bug where some unknown endpoints would return HTML error pages instead of JSON `M_UNRECOGNIZED` errors. ([\#11930](https://github.com/matrix-org/synapse/issues/11930)) - Implement an allow list of content types for which we will attempt to preview a URL. This prevents Synapse from making useless longer-lived connections to streaming media servers. ([\#11936](https://github.com/matrix-org/synapse/issues/11936)) - Fix a long-standing bug where pagination tokens from `/sync` and `/messages` could not be provided to the `/relations` API. ([\#11952](https://github.com/matrix-org/synapse/issues/11952)) @@ -51,7 +51,7 @@ Deprecations and Removals Internal Changes ---------------- -- Enhance user registration test helpers to make them more useful for tests involving Application Services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616)) +- Enhance user registration test helpers to make them more useful for tests involving application services and devices. ([\#11615](https://github.com/matrix-org/synapse/issues/11615), [\#11616](https://github.com/matrix-org/synapse/issues/11616)) - Improve performance when fetching bundled aggregations for multiple events. ([\#11660](https://github.com/matrix-org/synapse/issues/11660), [\#11752](https://github.com/matrix-org/synapse/issues/11752)) - Fix type errors introduced by new annotations in the Prometheus Client library. ([\#11832](https://github.com/matrix-org/synapse/issues/11832)) - Add missing type hints to replication code. ([\#11856](https://github.com/matrix-org/synapse/issues/11856), [\#11938](https://github.com/matrix-org/synapse/issues/11938)) @@ -68,12 +68,12 @@ Internal Changes - Various refactors to the application service notifier code. ([\#11911](https://github.com/matrix-org/synapse/issues/11911), [\#11912](https://github.com/matrix-org/synapse/issues/11912)) - Tests: replace mocked `Authenticator` with the real thing. ([\#11913](https://github.com/matrix-org/synapse/issues/11913)) - Various refactors to the typing notifications code. ([\#11914](https://github.com/matrix-org/synapse/issues/11914)) -- Use the proper type for the Content-Length header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927)) +- Use the proper type for the `Content-Length` header in the `UploadResource`. ([\#11927](https://github.com/matrix-org/synapse/issues/11927)) - Remove an unnecessary ignoring of type hints due to fixes in upstream packages. ([\#11939](https://github.com/matrix-org/synapse/issues/11939)) - Add missing type hints. ([\#11953](https://github.com/matrix-org/synapse/issues/11953)) - Fix an import cycle in `synapse.event_auth`. ([\#11965](https://github.com/matrix-org/synapse/issues/11965)) -- Unpin frozendict but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969)) -- Prepare for rename of default complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971)) +- Unpin `frozendict` but exclude the known bad version 2.1.2. ([\#11969](https://github.com/matrix-org/synapse/issues/11969)) +- Prepare for rename of default Complement branch. ([\#11971](https://github.com/matrix-org/synapse/issues/11971)) - Fetch Synapse's version using a helper from `matrix-common`. ([\#11979](https://github.com/matrix-org/synapse/issues/11979)) From 87f200571386b131f841f8abc8bb08efb6c3be52 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 15 Feb 2022 11:27:56 +0000 Subject: [PATCH 245/474] Add some tests for propagation of device list changes between local users (#11972) --- changelog.d/11972.misc | 1 + synapse/notifier.py | 4 +- tests/rest/client/test_device_lists.py | 155 +++++++++++++++++++++++++ tests/rest/client/utils.py | 6 +- 4 files changed, 163 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11972.misc create mode 100644 tests/rest/client/test_device_lists.py diff --git a/changelog.d/11972.misc b/changelog.d/11972.misc new file mode 100644 index 000000000000..29c38bfd8255 --- /dev/null +++ b/changelog.d/11972.misc @@ -0,0 +1 @@ +Add tests for device list changes between local users. \ No newline at end of file diff --git a/synapse/notifier.py b/synapse/notifier.py index e0fad2da66b7..753dd6b6a5f1 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -138,7 +138,7 @@ def notify( self.current_token = self.current_token.copy_and_advance(stream_key, stream_id) self.last_notified_token = self.current_token self.last_notified_ms = time_now_ms - noify_deferred = self.notify_deferred + notify_deferred = self.notify_deferred log_kv( { @@ -153,7 +153,7 @@ def notify( with PreserveLoggingContext(): self.notify_deferred = ObservableDeferred(defer.Deferred()) - noify_deferred.callback(self.current_token) + notify_deferred.callback(self.current_token) def remove(self, notifier: "Notifier") -> None: """Remove this listener from all the indexes in the Notifier diff --git a/tests/rest/client/test_device_lists.py b/tests/rest/client/test_device_lists.py new file mode 100644 index 000000000000..16070cf0270f --- /dev/null +++ b/tests/rest/client/test_device_lists.py @@ -0,0 +1,155 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.rest import admin, devices, room, sync +from synapse.rest.client import account, login, register + +from tests import unittest + + +class DeviceListsTestCase(unittest.HomeserverTestCase): + """Tests regarding device list changes.""" + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + account.register_servlets, + room.register_servlets, + sync.register_servlets, + devices.register_servlets, + ] + + def test_receiving_local_device_list_changes(self): + """Tests that a local users that share a room receive each other's device list + changes. + """ + # Register two users + test_device_id = "TESTDEVICE" + alice_user_id = self.register_user("alice", "correcthorse") + alice_access_token = self.login( + alice_user_id, "correcthorse", device_id=test_device_id + ) + + bob_user_id = self.register_user("bob", "ponyponypony") + bob_access_token = self.login(bob_user_id, "ponyponypony") + + # Create a room for them to coexist peacefully in + new_room_id = self.helper.create_room_as( + alice_user_id, is_public=True, tok=alice_access_token + ) + self.assertIsNotNone(new_room_id) + + # Have Bob join the room + self.helper.invite( + new_room_id, alice_user_id, bob_user_id, tok=alice_access_token + ) + self.helper.join(new_room_id, bob_user_id, tok=bob_access_token) + + # Now have Bob initiate an initial sync (in order to get a since token) + channel = self.make_request( + "GET", + "/sync", + access_token=bob_access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + next_batch_token = channel.json_body["next_batch"] + + # ...and then an incremental sync. This should block until the sync stream is woken up, + # which we hope will happen as a result of Alice updating their device list. + bob_sync_channel = self.make_request( + "GET", + f"/sync?since={next_batch_token}&timeout=30000", + access_token=bob_access_token, + # Start the request, then continue on. + await_result=False, + ) + + # Have alice update their device list + channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=alice_access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that bob's incremental sync contains the updated device list. + # If not, the client would only receive the device list update on the + # *next* sync. + bob_sync_channel.await_result() + self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body) + + changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get( + "changed", [] + ) + self.assertIn(alice_user_id, changed_device_lists, bob_sync_channel.json_body) + + def test_not_receiving_local_device_list_changes(self): + """Tests a local users DO NOT receive device updates from each other if they do not + share a room. + """ + # Register two users + test_device_id = "TESTDEVICE" + alice_user_id = self.register_user("alice", "correcthorse") + alice_access_token = self.login( + alice_user_id, "correcthorse", device_id=test_device_id + ) + + bob_user_id = self.register_user("bob", "ponyponypony") + bob_access_token = self.login(bob_user_id, "ponyponypony") + + # These users do not share a room. They are lonely. + + # Have Bob initiate an initial sync (in order to get a since token) + channel = self.make_request( + "GET", + "/sync", + access_token=bob_access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + next_batch_token = channel.json_body["next_batch"] + + # ...and then an incremental sync. This should block until the sync stream is woken up, + # which we hope will happen as a result of Alice updating their device list. + bob_sync_channel = self.make_request( + "GET", + f"/sync?since={next_batch_token}&timeout=1000", + access_token=bob_access_token, + # Start the request, then continue on. + await_result=False, + ) + + # Have alice update their device list + channel = self.make_request( + "PUT", + f"/devices/{test_device_id}", + { + "display_name": "New Device Name", + }, + access_token=alice_access_token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that bob's incremental sync does not contain the updated device list. + bob_sync_channel.await_result() + self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body) + + changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get( + "changed", [] + ) + self.assertNotIn( + alice_user_id, changed_device_lists, bob_sync_channel.json_body + ) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 1c0cb0cf4f34..2b3fdadffa41 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -106,9 +106,13 @@ def create_room_as( default room version. tok: The access token to use in the request. expect_code: The expected HTTP response code. + extra_content: Extra keys to include in the body of the /createRoom request. + Note that if is_public is set, the "visibility" key will be overridden. + If room_version is set, the "room_version" key will be overridden. + custom_headers: HTTP headers to include in the request. Returns: - The ID of the newly created room. + The ID of the newly created room, or None if the request failed. """ temp_id = self.auth_user_id self.auth_user_id = room_creator From 45f45404de2d0c4d68954eddc2dc905e50dfafe9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 15 Feb 2022 08:26:57 -0500 Subject: [PATCH 246/474] Fix incorrect thread summaries when the latest event is edited. (#11992) If the latest event in a thread was edited than the original event content was included in bundled aggregation for threads instead of the edited event content. --- changelog.d/11992.bugfix | 1 + synapse/events/utils.py | 69 ++++++++++++------- .../storage/databases/main/events_worker.py | 2 +- synapse/storage/databases/main/relations.py | 24 +++++-- tests/rest/client/test_relations.py | 42 +++++++++++ 5 files changed, 107 insertions(+), 31 deletions(-) create mode 100644 changelog.d/11992.bugfix diff --git a/changelog.d/11992.bugfix b/changelog.d/11992.bugfix new file mode 100644 index 000000000000..f73c86bb2598 --- /dev/null +++ b/changelog.d/11992.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 243696b35724..9386fa29ddd3 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -425,6 +425,33 @@ def serialize_event( return serialized_event + def _apply_edit( + self, orig_event: EventBase, serialized_event: JsonDict, edit: EventBase + ) -> None: + """Replace the content, preserving existing relations of the serialized event. + + Args: + orig_event: The original event. + serialized_event: The original event, serialized. This is modified. + edit: The event which edits the above. + """ + + # Ensure we take copies of the edit content, otherwise we risk modifying + # the original event. + edit_content = edit.content.copy() + + # Unfreeze the event content if necessary, so that we may modify it below + edit_content = unfreeze(edit_content) + serialized_event["content"] = edit_content.get("m.new_content", {}) + + # Check for existing relations + relates_to = orig_event.content.get("m.relates_to") + if relates_to: + # Keep the relations, ensuring we use a dict copy of the original + serialized_event["content"]["m.relates_to"] = relates_to.copy() + else: + serialized_event["content"].pop("m.relates_to", None) + def _inject_bundled_aggregations( self, event: EventBase, @@ -450,26 +477,11 @@ def _inject_bundled_aggregations( serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references if aggregations.replace: - # If there is an edit replace the content, preserving existing - # relations. + # If there is an edit, apply it to the event. edit = aggregations.replace + self._apply_edit(event, serialized_event, edit) - # Ensure we take copies of the edit content, otherwise we risk modifying - # the original event. - edit_content = edit.content.copy() - - # Unfreeze the event content if necessary, so that we may modify it below - edit_content = unfreeze(edit_content) - serialized_event["content"] = edit_content.get("m.new_content", {}) - - # Check for existing relations - relates_to = event.content.get("m.relates_to") - if relates_to: - # Keep the relations, ensuring we use a dict copy of the original - serialized_event["content"]["m.relates_to"] = relates_to.copy() - else: - serialized_event["content"].pop("m.relates_to", None) - + # Include information about it in the relations dict. serialized_aggregations[RelationTypes.REPLACE] = { "event_id": edit.event_id, "origin_server_ts": edit.origin_server_ts, @@ -478,13 +490,22 @@ def _inject_bundled_aggregations( # If this event is the start of a thread, include a summary of the replies. if aggregations.thread: + thread = aggregations.thread + + # Don't bundle aggregations as this could recurse forever. + serialized_latest_event = self.serialize_event( + thread.latest_event, time_now, bundle_aggregations=None + ) + # Manually apply an edit, if one exists. + if thread.latest_edit: + self._apply_edit( + thread.latest_event, serialized_latest_event, thread.latest_edit + ) + serialized_aggregations[RelationTypes.THREAD] = { - # Don't bundle aggregations as this could recurse forever. - "latest_event": self.serialize_event( - aggregations.thread.latest_event, time_now, bundle_aggregations=None - ), - "count": aggregations.thread.count, - "current_user_participated": aggregations.thread.current_user_participated, + "latest_event": serialized_latest_event, + "count": thread.count, + "current_user_participated": thread.current_user_participated, } # Include the bundled aggregations in the event. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 8d4287045a8b..712b8ce204fe 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -408,7 +408,7 @@ async def get_events( include the previous states content in the unsigned field. allow_rejected: If True, return rejected events. Otherwise, - omits rejeted events from the response. + omits rejected events from the response. Returns: A mapping from event_id to event. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index e2c27e594b24..5582029f9f50 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -53,8 +53,13 @@ @attr.s(slots=True, frozen=True, auto_attribs=True) class _ThreadAggregation: + # The latest event in the thread. latest_event: EventBase + # The latest edit to the latest event in the thread. + latest_edit: Optional[EventBase] + # The total number of events in the thread. count: int + # True if the current user has sent an event to the thread. current_user_participated: bool @@ -461,8 +466,8 @@ def get_thread_summary(self, event_id: str) -> Optional[Tuple[int, EventBase]]: @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") async def _get_thread_summaries( self, event_ids: Collection[str] - ) -> Dict[str, Optional[Tuple[int, EventBase]]]: - """Get the number of threaded replies and the latest reply (if any) for the given event. + ) -> Dict[str, Optional[Tuple[int, EventBase, Optional[EventBase]]]]: + """Get the number of threaded replies, the latest reply (if any), and the latest edit for that reply for the given event. Args: event_ids: Summarize the thread related to this event ID. @@ -471,8 +476,10 @@ async def _get_thread_summaries( A map of the thread summary each event. A missing event implies there are no threaded replies. - Each summary includes the number of items in the thread and the most - recent response. + Each summary is a tuple of: + The number of events in the thread. + The most recent event in the thread. + The most recent edit to the most recent event in the thread, if applicable. """ def _get_thread_summaries_txn( @@ -558,6 +565,9 @@ def _get_thread_summaries_txn( latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined] + # Check to see if any of those events are edited. + latest_edits = await self._get_applicable_edits(latest_event_ids.values()) + # Map to the event IDs to the thread summary. # # There might not be a summary due to there not being a thread or @@ -568,7 +578,8 @@ def _get_thread_summaries_txn( summary = None if latest_event: - summary = (counts[parent_event_id], latest_event) + latest_edit = latest_edits.get(latest_event_id) + summary = (counts[parent_event_id], latest_event, latest_edit) summaries[parent_event_id] = summary return summaries @@ -828,11 +839,12 @@ async def get_bundled_aggregations( ) for event_id, summary in summaries.items(): if summary: - thread_count, latest_thread_event = summary + thread_count, latest_thread_event, edit = summary results.setdefault( event_id, BundledAggregations() ).thread = _ThreadAggregation( latest_event=latest_thread_event, + latest_edit=edit, count=thread_count, # If there's a thread summary it must also exist in the # participated dictionary. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index de80aca03705..dfd9ffcb9380 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1123,6 +1123,48 @@ def test_edit_reply(self): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) + @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + def test_edit_thread(self): + """Test that editing a thread works.""" + + # Create a thread and edit the last event. + channel = self._send_relation( + RelationTypes.THREAD, + "m.room.message", + content={"msgtype": "m.text", "body": "A threaded reply!"}, + ) + self.assertEquals(200, channel.code, channel.json_body) + threaded_event_id = channel.json_body["event_id"] + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=threaded_event_id, + ) + self.assertEquals(200, channel.code, channel.json_body) + + # Fetch the thread root, to get the bundled aggregation for the thread. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + + # We expect that the edit message appears in the thread summary in the + # unsigned relations section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.THREAD, relations_dict) + + thread_summary = relations_dict[RelationTypes.THREAD] + self.assertIn("latest_event", thread_summary) + latest_event_in_thread = thread_summary["latest_event"] + self.assertEquals( + latest_event_in_thread["content"]["body"], "I've been edited!" + ) + def test_edit_edit(self): """Test that an edit cannot be edited.""" new_body = {"msgtype": "m.text", "body": "Initial edit"} From e44f91d678e22936b7e2f0d8bf4890159507533b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 15 Feb 2022 08:47:05 -0500 Subject: [PATCH 247/474] Refactor search code to reduce function size. (#11991) Splits the search code into a few logical functions instead of a single unreadable function. There are also a few additional changes for readability. After refactoring it was clear to see there were some unused and unnecessary variables, which were simplified. --- changelog.d/11991.misc | 1 + synapse/handlers/search.py | 643 +++++++++++++++-------- synapse/storage/databases/main/search.py | 17 +- 3 files changed, 435 insertions(+), 226 deletions(-) create mode 100644 changelog.d/11991.misc diff --git a/changelog.d/11991.misc b/changelog.d/11991.misc new file mode 100644 index 000000000000..34a3b3a6b9df --- /dev/null +++ b/changelog.d/11991.misc @@ -0,0 +1 @@ +Refactor the search code for improved readability. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 41cb80907893..afd14da11222 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -14,8 +14,9 @@ import itertools import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional +from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple +import attr from unpaddedbase64 import decode_base64, encode_base64 from synapse.api.constants import EventTypes, Membership @@ -32,6 +33,20 @@ logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _SearchResult: + # The count of results. + count: int + # A mapping of event ID to the rank of that event. + rank_map: Dict[str, int] + # A list of the resulting events. + allowed_events: List[EventBase] + # A map of room ID to results. + room_groups: Dict[str, JsonDict] + # A set of event IDs to highlight. + highlights: Set[str] + + class SearchHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() @@ -100,7 +115,7 @@ async def search( """Performs a full text search for a user. Args: - user + user: The user performing the search. content: Search parameters batch: The next_batch parameter. Used for pagination. @@ -156,6 +171,8 @@ async def search( # Include context around each event? event_context = room_cat.get("event_context", None) + before_limit = after_limit = None + include_profile = False # Group results together? May allow clients to paginate within a # group @@ -182,6 +199,73 @@ async def search( % (set(group_keys) - {"room_id", "sender"},), ) + return await self._search( + user, + batch_group, + batch_group_key, + batch_token, + search_term, + keys, + filter_dict, + order_by, + include_state, + group_keys, + event_context, + before_limit, + after_limit, + include_profile, + ) + + async def _search( + self, + user: UserID, + batch_group: Optional[str], + batch_group_key: Optional[str], + batch_token: Optional[str], + search_term: str, + keys: List[str], + filter_dict: JsonDict, + order_by: str, + include_state: bool, + group_keys: List[str], + event_context: Optional[bool], + before_limit: Optional[int], + after_limit: Optional[int], + include_profile: bool, + ) -> JsonDict: + """Performs a full text search for a user. + + Args: + user: The user performing the search. + batch_group: Pagination information. + batch_group_key: Pagination information. + batch_token: Pagination information. + search_term: Search term to search for + keys: List of keys to search in, currently supports + "content.body", "content.name", "content.topic" + filter_dict: The JSON to build a filter out of. + order_by: How to order the results. Valid values ore "rank" and "recent". + include_state: True if the state of the room at each result should + be included. + group_keys: A list of ways to group the results. Valid values are + "room_id" and "sender". + event_context: True to include contextual events around results. + before_limit: + The number of events before a result to include as context. + + Only used if event_context is True. + after_limit: + The number of events after a result to include as context. + + Only used if event_context is True. + include_profile: True if historical profile information should be + included in the event context. + + Only used if event_context is True. + + Returns: + dict to be returned to the client with results of search + """ search_filter = Filter(self.hs, filter_dict) # TODO: Search through left rooms too @@ -216,278 +300,399 @@ async def search( } } - rank_map = {} # event_id -> rank of event - allowed_events = [] - # Holds result of grouping by room, if applicable - room_groups: Dict[str, JsonDict] = {} - # Holds result of grouping by sender, if applicable - sender_group: Dict[str, JsonDict] = {} + sender_group: Optional[Dict[str, JsonDict]] - # Holds the next_batch for the entire result set if one of those exists - global_next_batch = None - - highlights = set() + if order_by == "rank": + search_result, sender_group = await self._search_by_rank( + user, room_ids, search_term, keys, search_filter + ) + # Unused return values for rank search. + global_next_batch = None + elif order_by == "recent": + search_result, global_next_batch = await self._search_by_recent( + user, + room_ids, + search_term, + keys, + search_filter, + batch_group, + batch_group_key, + batch_token, + ) + # Unused return values for recent search. + sender_group = None + else: + # We should never get here due to the guard earlier. + raise NotImplementedError() - count = None + logger.info("Found %d events to return", len(search_result.allowed_events)) - if order_by == "rank": - search_result = await self.store.search_msgs(room_ids, search_term, keys) + # If client has asked for "context" for each event (i.e. some surrounding + # events and state), fetch that + if event_context is not None: + # Note that before and after limit must be set in this case. + assert before_limit is not None + assert after_limit is not None + + contexts = await self._calculate_event_contexts( + user, + search_result.allowed_events, + before_limit, + after_limit, + include_profile, + ) + else: + contexts = {} - count = search_result["count"] + # TODO: Add a limit - if search_result["highlights"]: - highlights.update(search_result["highlights"]) + state_results = {} + if include_state: + for room_id in {e.room_id for e in search_result.allowed_events}: + state = await self.state_handler.get_current_state(room_id) + state_results[room_id] = list(state.values()) - results = search_result["results"] + aggregations = None + if self._msc3666_enabled: + aggregations = await self.store.get_bundled_aggregations( + # Generate an iterable of EventBase for all the events that will be + # returned, including contextual events. + itertools.chain( + # The events_before and events_after for each context. + itertools.chain.from_iterable( + itertools.chain(context["events_before"], context["events_after"]) # type: ignore[arg-type] + for context in contexts.values() + ), + # The returned events. + search_result.allowed_events, + ), + user.to_string(), + ) - rank_map.update({r["event"].event_id: r["rank"] for r in results}) + # We're now about to serialize the events. We should not make any + # blocking calls after this. Otherwise, the 'age' will be wrong. - filtered_events = await search_filter.filter([r["event"] for r in results]) + time_now = self.clock.time_msec() - events = await filter_events_for_client( - self.storage, user.to_string(), filtered_events + for context in contexts.values(): + context["events_before"] = self._event_serializer.serialize_events( + context["events_before"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] + ) + context["events_after"] = self._event_serializer.serialize_events( + context["events_after"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] ) - events.sort(key=lambda e: -rank_map[e.event_id]) - allowed_events = events[: search_filter.limit] + results = [ + { + "rank": search_result.rank_map[e.event_id], + "result": self._event_serializer.serialize_event( + e, time_now, bundle_aggregations=aggregations + ), + "context": contexts.get(e.event_id, {}), + } + for e in search_result.allowed_events + ] - for e in allowed_events: - rm = room_groups.setdefault( - e.room_id, {"results": [], "order": rank_map[e.event_id]} - ) - rm["results"].append(e.event_id) + rooms_cat_res: JsonDict = { + "results": results, + "count": search_result.count, + "highlights": list(search_result.highlights), + } - s = sender_group.setdefault( - e.sender, {"results": [], "order": rank_map[e.event_id]} - ) - s["results"].append(e.event_id) + if state_results: + rooms_cat_res["state"] = { + room_id: self._event_serializer.serialize_events(state_events, time_now) + for room_id, state_events in state_results.items() + } - elif order_by == "recent": - room_events: List[EventBase] = [] - i = 0 - - pagination_token = batch_token - - # We keep looping and we keep filtering until we reach the limit - # or we run out of things. - # But only go around 5 times since otherwise synapse will be sad. - while len(room_events) < search_filter.limit and i < 5: - i += 1 - search_result = await self.store.search_rooms( - room_ids, - search_term, - keys, - search_filter.limit * 2, - pagination_token=pagination_token, - ) + if search_result.room_groups and "room_id" in group_keys: + rooms_cat_res.setdefault("groups", {})[ + "room_id" + ] = search_result.room_groups - if search_result["highlights"]: - highlights.update(search_result["highlights"]) + if sender_group and "sender" in group_keys: + rooms_cat_res.setdefault("groups", {})["sender"] = sender_group - count = search_result["count"] + if global_next_batch: + rooms_cat_res["next_batch"] = global_next_batch - results = search_result["results"] + return {"search_categories": {"room_events": rooms_cat_res}} - results_map = {r["event"].event_id: r for r in results} + async def _search_by_rank( + self, + user: UserID, + room_ids: Collection[str], + search_term: str, + keys: Iterable[str], + search_filter: Filter, + ) -> Tuple[_SearchResult, Dict[str, JsonDict]]: + """ + Performs a full text search for a user ordering by rank. - rank_map.update({r["event"].event_id: r["rank"] for r in results}) + Args: + user: The user performing the search. + room_ids: List of room ids to search in + search_term: Search term to search for + keys: List of keys to search in, currently supports + "content.body", "content.name", "content.topic" + search_filter: The event filter to use. - filtered_events = await search_filter.filter( - [r["event"] for r in results] - ) + Returns: + A tuple of: + The search results. + A map of sender ID to results. + """ + rank_map = {} # event_id -> rank of event + # Holds result of grouping by room, if applicable + room_groups: Dict[str, JsonDict] = {} + # Holds result of grouping by sender, if applicable + sender_group: Dict[str, JsonDict] = {} - events = await filter_events_for_client( - self.storage, user.to_string(), filtered_events - ) + search_result = await self.store.search_msgs(room_ids, search_term, keys) - room_events.extend(events) - room_events = room_events[: search_filter.limit] + if search_result["highlights"]: + highlights = search_result["highlights"] + else: + highlights = set() - if len(results) < search_filter.limit * 2: - pagination_token = None - break - else: - pagination_token = results[-1]["pagination_token"] - - for event in room_events: - group = room_groups.setdefault(event.room_id, {"results": []}) - group["results"].append(event.event_id) - - if room_events and len(room_events) >= search_filter.limit: - last_event_id = room_events[-1].event_id - pagination_token = results_map[last_event_id]["pagination_token"] - - # We want to respect the given batch group and group keys so - # that if people blindly use the top level `next_batch` token - # it returns more from the same group (if applicable) rather - # than reverting to searching all results again. - if batch_group and batch_group_key: - global_next_batch = encode_base64( - ( - "%s\n%s\n%s" - % (batch_group, batch_group_key, pagination_token) - ).encode("ascii") - ) - else: - global_next_batch = encode_base64( - ("%s\n%s\n%s" % ("all", "", pagination_token)).encode("ascii") - ) + results = search_result["results"] - for room_id, group in room_groups.items(): - group["next_batch"] = encode_base64( - ("%s\n%s\n%s" % ("room_id", room_id, pagination_token)).encode( - "ascii" - ) - ) + # event_id -> rank of event + rank_map = {r["event"].event_id: r["rank"] for r in results} - allowed_events.extend(room_events) + filtered_events = await search_filter.filter([r["event"] for r in results]) - else: - # We should never get here due to the guard earlier. - raise NotImplementedError() + events = await filter_events_for_client( + self.storage, user.to_string(), filtered_events + ) - logger.info("Found %d events to return", len(allowed_events)) + events.sort(key=lambda e: -rank_map[e.event_id]) + allowed_events = events[: search_filter.limit] - # If client has asked for "context" for each event (i.e. some surrounding - # events and state), fetch that - if event_context is not None: - now_token = self.hs.get_event_sources().get_current_token() + for e in allowed_events: + rm = room_groups.setdefault( + e.room_id, {"results": [], "order": rank_map[e.event_id]} + ) + rm["results"].append(e.event_id) - contexts = {} - for event in allowed_events: - res = await self.store.get_events_around( - event.room_id, event.event_id, before_limit, after_limit - ) + s = sender_group.setdefault( + e.sender, {"results": [], "order": rank_map[e.event_id]} + ) + s["results"].append(e.event_id) + + return ( + _SearchResult( + search_result["count"], + rank_map, + allowed_events, + room_groups, + highlights, + ), + sender_group, + ) - logger.info( - "Context for search returned %d and %d events", - len(res.events_before), - len(res.events_after), - ) + async def _search_by_recent( + self, + user: UserID, + room_ids: Collection[str], + search_term: str, + keys: Iterable[str], + search_filter: Filter, + batch_group: Optional[str], + batch_group_key: Optional[str], + batch_token: Optional[str], + ) -> Tuple[_SearchResult, Optional[str]]: + """ + Performs a full text search for a user ordering by recent. - events_before = await filter_events_for_client( - self.storage, user.to_string(), res.events_before - ) + Args: + user: The user performing the search. + room_ids: List of room ids to search in + search_term: Search term to search for + keys: List of keys to search in, currently supports + "content.body", "content.name", "content.topic" + search_filter: The event filter to use. + batch_group: Pagination information. + batch_group_key: Pagination information. + batch_token: Pagination information. - events_after = await filter_events_for_client( - self.storage, user.to_string(), res.events_after - ) + Returns: + A tuple of: + The search results. + Optionally, a pagination token. + """ + rank_map = {} # event_id -> rank of event + # Holds result of grouping by room, if applicable + room_groups: Dict[str, JsonDict] = {} - context = { - "events_before": events_before, - "events_after": events_after, - "start": await now_token.copy_and_replace( - "room_key", res.start - ).to_string(self.store), - "end": await now_token.copy_and_replace( - "room_key", res.end - ).to_string(self.store), - } + # Holds the next_batch for the entire result set if one of those exists + global_next_batch = None - if include_profile: - senders = { - ev.sender - for ev in itertools.chain(events_before, [event], events_after) - } + highlights = set() - if events_after: - last_event_id = events_after[-1].event_id - else: - last_event_id = event.event_id + room_events: List[EventBase] = [] + i = 0 + + pagination_token = batch_token + + # We keep looping and we keep filtering until we reach the limit + # or we run out of things. + # But only go around 5 times since otherwise synapse will be sad. + while len(room_events) < search_filter.limit and i < 5: + i += 1 + search_result = await self.store.search_rooms( + room_ids, + search_term, + keys, + search_filter.limit * 2, + pagination_token=pagination_token, + ) - state_filter = StateFilter.from_types( - [(EventTypes.Member, sender) for sender in senders] - ) + if search_result["highlights"]: + highlights.update(search_result["highlights"]) + + count = search_result["count"] + + results = search_result["results"] + + results_map = {r["event"].event_id: r for r in results} + + rank_map.update({r["event"].event_id: r["rank"] for r in results}) + + filtered_events = await search_filter.filter([r["event"] for r in results]) - state = await self.state_store.get_state_for_event( - last_event_id, state_filter + events = await filter_events_for_client( + self.storage, user.to_string(), filtered_events + ) + + room_events.extend(events) + room_events = room_events[: search_filter.limit] + + if len(results) < search_filter.limit * 2: + break + else: + pagination_token = results[-1]["pagination_token"] + + for event in room_events: + group = room_groups.setdefault(event.room_id, {"results": []}) + group["results"].append(event.event_id) + + if room_events and len(room_events) >= search_filter.limit: + last_event_id = room_events[-1].event_id + pagination_token = results_map[last_event_id]["pagination_token"] + + # We want to respect the given batch group and group keys so + # that if people blindly use the top level `next_batch` token + # it returns more from the same group (if applicable) rather + # than reverting to searching all results again. + if batch_group and batch_group_key: + global_next_batch = encode_base64( + ( + "%s\n%s\n%s" % (batch_group, batch_group_key, pagination_token) + ).encode("ascii") + ) + else: + global_next_batch = encode_base64( + ("%s\n%s\n%s" % ("all", "", pagination_token)).encode("ascii") + ) + + for room_id, group in room_groups.items(): + group["next_batch"] = encode_base64( + ("%s\n%s\n%s" % ("room_id", room_id, pagination_token)).encode( + "ascii" ) + ) - context["profile_info"] = { - s.state_key: { - "displayname": s.content.get("displayname", None), - "avatar_url": s.content.get("avatar_url", None), - } - for s in state.values() - if s.type == EventTypes.Member and s.state_key in senders - } + return ( + _SearchResult(count, rank_map, room_events, room_groups, highlights), + global_next_batch, + ) - contexts[event.event_id] = context - else: - contexts = {} + async def _calculate_event_contexts( + self, + user: UserID, + allowed_events: List[EventBase], + before_limit: int, + after_limit: int, + include_profile: bool, + ) -> Dict[str, JsonDict]: + """ + Calculates the contextual events for any search results. - # TODO: Add a limit + Args: + user: The user performing the search. + allowed_events: The search results. + before_limit: + The number of events before a result to include as context. + after_limit: + The number of events after a result to include as context. + include_profile: True if historical profile information should be + included in the event context. - time_now = self.clock.time_msec() + Returns: + A map of event ID to contextual information. + """ + now_token = self.hs.get_event_sources().get_current_token() - aggregations = None - if self._msc3666_enabled: - aggregations = await self.store.get_bundled_aggregations( - # Generate an iterable of EventBase for all the events that will be - # returned, including contextual events. - itertools.chain( - # The events_before and events_after for each context. - itertools.chain.from_iterable( - itertools.chain(context["events_before"], context["events_after"]) # type: ignore[arg-type] - for context in contexts.values() - ), - # The returned events. - allowed_events, - ), - user.to_string(), + contexts = {} + for event in allowed_events: + res = await self.store.get_events_around( + event.room_id, event.event_id, before_limit, after_limit ) - for context in contexts.values(): - context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] + logger.info( + "Context for search returned %d and %d events", + len(res.events_before), + len(res.events_after), ) - context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now, bundle_aggregations=aggregations # type: ignore[arg-type] + + events_before = await filter_events_for_client( + self.storage, user.to_string(), res.events_before ) - state_results = {} - if include_state: - for room_id in {e.room_id for e in allowed_events}: - state = await self.state_handler.get_current_state(room_id) - state_results[room_id] = list(state.values()) + events_after = await filter_events_for_client( + self.storage, user.to_string(), res.events_after + ) - # We're now about to serialize the events. We should not make any - # blocking calls after this. Otherwise the 'age' will be wrong + context = { + "events_before": events_before, + "events_after": events_after, + "start": await now_token.copy_and_replace( + "room_key", res.start + ).to_string(self.store), + "end": await now_token.copy_and_replace("room_key", res.end).to_string( + self.store + ), + } - results = [] - for e in allowed_events: - results.append( - { - "rank": rank_map[e.event_id], - "result": self._event_serializer.serialize_event( - e, time_now, bundle_aggregations=aggregations - ), - "context": contexts.get(e.event_id, {}), + if include_profile: + senders = { + ev.sender + for ev in itertools.chain(events_before, [event], events_after) } - ) - rooms_cat_res = { - "results": results, - "count": count, - "highlights": list(highlights), - } + if events_after: + last_event_id = events_after[-1].event_id + else: + last_event_id = event.event_id - if state_results: - s = {} - for room_id, state_events in state_results.items(): - s[room_id] = self._event_serializer.serialize_events( - state_events, time_now + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] ) - rooms_cat_res["state"] = s - - if room_groups and "room_id" in group_keys: - rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups + state = await self.state_store.get_state_for_event( + last_event_id, state_filter + ) - if sender_group and "sender" in group_keys: - rooms_cat_res.setdefault("groups", {})["sender"] = sender_group + context["profile_info"] = { + s.state_key: { + "displayname": s.content.get("displayname", None), + "avatar_url": s.content.get("avatar_url", None), + } + for s in state.values() + if s.type == EventTypes.Member and s.state_key in senders + } - if global_next_batch: - rooms_cat_res["next_batch"] = global_next_batch + contexts[event.event_id] = context - return {"search_categories": {"room_events": rooms_cat_res}} + return contexts diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 2d085a5764a7..acea300ed343 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -28,6 +28,7 @@ ) from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.engines import PostgresEngine, Sqlite3Engine +from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -381,17 +382,19 @@ def __init__( ): super().__init__(database, db_conn, hs) - async def search_msgs(self, room_ids, search_term, keys): + async def search_msgs( + self, room_ids: Collection[str], search_term: str, keys: Iterable[str] + ) -> JsonDict: """Performs a full text search over events with given keys. Args: - room_ids (list): List of room ids to search in - search_term (str): Search term to search for - keys (list): List of keys to search in, currently supports + room_ids: List of room ids to search in + search_term: Search term to search for + keys: List of keys to search in, currently supports "content.body", "content.name", "content.topic" Returns: - list of dicts + Dictionary of results """ clauses = [] @@ -499,10 +502,10 @@ async def search_rooms( self, room_ids: Collection[str], search_term: str, - keys: List[str], + keys: Iterable[str], limit, pagination_token: Optional[str] = None, - ) -> List[dict]: + ) -> JsonDict: """Performs a full text search over events with given keys. Args: From 5598556b776e3c8cc93b8ede4af9f2f5b21ff935 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 15 Feb 2022 13:59:15 +0000 Subject: [PATCH 248/474] Docker: remove `VOLUME` directive (#11997) The driver for this is to stop Complement complaining about it, but as far as I can tell it was pointless and needed to go away anyway. I'm a bit unclear about what exactly VOLUME does, but I think what it means is that, if you don't override it with an explicit -v argument, then docker run will create a temporary volume, and copy things into it. The temporary volume is then deleted when the container finishes. That only sounds useful if your image has something to copy into it (otherwise you may as well just use the default root filesystem), and our image notably doesn't copy anything into /data. So... this wasn't doing anything, except annoying Complement? --- changelog.d/11997.docker | 1 + docker/Dockerfile | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/11997.docker diff --git a/changelog.d/11997.docker b/changelog.d/11997.docker new file mode 100644 index 000000000000..1b3271457eee --- /dev/null +++ b/changelog.d/11997.docker @@ -0,0 +1 @@ +The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. diff --git a/docker/Dockerfile b/docker/Dockerfile index 306f75ae56cc..e4c1c19b86a7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -98,8 +98,6 @@ COPY --from=builder /install /usr/local COPY ./docker/start.py /start.py COPY ./docker/conf /conf -VOLUME ["/data"] - EXPOSE 8008/tcp 8009/tcp 8448/tcp ENTRYPOINT ["/start.py"] From dc9fe61050deb5eda59b161e4c0404ff36e3ac59 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 15 Feb 2022 14:26:28 +0000 Subject: [PATCH 249/474] Fix incorrect `get_rooms_for_user` for remote user (#11999) When the server leaves a room the `get_rooms_for_user` cache is not correctly invalidated for the remote users in the room. This means that subsequent calls to `get_rooms_for_user` for the remote users would incorrectly include the room (it shouldn't be included because the server no longer knows anything about the room). --- changelog.d/11999.bugfix | 1 + synapse/storage/databases/main/events.py | 27 +++--- tests/storage/test_events.py | 107 +++++++++++++++++++++++ 3 files changed, 124 insertions(+), 11 deletions(-) create mode 100644 changelog.d/11999.bugfix diff --git a/changelog.d/11999.bugfix b/changelog.d/11999.bugfix new file mode 100644 index 000000000000..fd8409590002 --- /dev/null +++ b/changelog.d/11999.bugfix @@ -0,0 +1 @@ +Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 5246fccad57a..a1d7a9b41300 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -975,6 +975,17 @@ def _update_current_state_txn( to_delete = delta_state.to_delete to_insert = delta_state.to_insert + # Figure out the changes of membership to invalidate the + # `get_rooms_for_user` cache. + # We find out which membership events we may have deleted + # and which we have added, then we invalidate the caches for all + # those users. + members_changed = { + state_key + for ev_type, state_key in itertools.chain(to_delete, to_insert) + if ev_type == EventTypes.Member + } + if delta_state.no_longer_in_room: # Server is no longer in the room so we delete the room from # current_state_events, being careful we've already updated the @@ -993,6 +1004,11 @@ def _update_current_state_txn( """ txn.execute(sql, (stream_id, self._instance_name, room_id)) + # We also want to invalidate the membership caches for users + # that were in the room. + users_in_room = self.store.get_users_in_room_txn(txn, room_id) + members_changed.update(users_in_room) + self.db_pool.simple_delete_txn( txn, table="current_state_events", @@ -1102,17 +1118,6 @@ def _update_current_state_txn( # Invalidate the various caches - # Figure out the changes of membership to invalidate the - # `get_rooms_for_user` cache. - # We find out which membership events we may have deleted - # and which we have added, then we invalidate the caches for all - # those users. - members_changed = { - state_key - for ev_type, state_key in itertools.chain(to_delete, to_insert) - if ev_type == EventTypes.Member - } - for member in members_changed: txn.call_after( self.store.get_rooms_for_user_with_stream_ordering.invalidate, diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index f462a8b1c721..a8639d8f8216 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -329,3 +329,110 @@ def test_do_not_prune_gap_if_not_dummy(self): # Check the new extremity is just the new remote event. self.assert_extremities([local_message_event_id, remote_event_2.event_id]) + + +class InvalideUsersInRoomCacheTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.state = self.hs.get_state_handler() + self.persistence = self.hs.get_storage().persistence + self.store = self.hs.get_datastore() + + def test_remote_user_rooms_cache_invalidated(self): + """Test that if the server leaves a room the `get_rooms_for_user` cache + is invalidated for remote users. + """ + + # Set up a room with a local and remote user in it. + user_id = self.register_user("user", "pass") + token = self.login("user", "pass") + + room_id = self.helper.create_room_as( + "user", room_version=RoomVersions.V6.identifier, tok=token + ) + + body = self.helper.send(room_id, body="Test", tok=token) + local_message_event_id = body["event_id"] + + # Fudge a join event for a remote user. + remote_user = "@user:other" + remote_event_1 = event_from_pdu_json( + { + "type": EventTypes.Member, + "state_key": remote_user, + "content": {"membership": Membership.JOIN}, + "room_id": room_id, + "sender": remote_user, + "depth": 5, + "prev_events": [local_message_event_id], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + RoomVersions.V6, + ) + + context = self.get_success(self.state.compute_event_context(remote_event_1)) + self.get_success(self.persistence.persist_event(remote_event_1, context)) + + # Call `get_rooms_for_user` to add the remote user to the cache + rooms = self.get_success(self.store.get_rooms_for_user(remote_user)) + self.assertEqual(set(rooms), {room_id}) + + # Now we have the local server leave the room, and check that calling + # `get_user_in_room` for the remote user no longer includes the room. + self.helper.leave(room_id, user_id, tok=token) + + rooms = self.get_success(self.store.get_rooms_for_user(remote_user)) + self.assertEqual(set(rooms), set()) + + def test_room_remote_user_cache_invalidated(self): + """Test that if the server leaves a room the `get_users_in_room` cache + is invalidated for remote users. + """ + + # Set up a room with a local and remote user in it. + user_id = self.register_user("user", "pass") + token = self.login("user", "pass") + + room_id = self.helper.create_room_as( + "user", room_version=RoomVersions.V6.identifier, tok=token + ) + + body = self.helper.send(room_id, body="Test", tok=token) + local_message_event_id = body["event_id"] + + # Fudge a join event for a remote user. + remote_user = "@user:other" + remote_event_1 = event_from_pdu_json( + { + "type": EventTypes.Member, + "state_key": remote_user, + "content": {"membership": Membership.JOIN}, + "room_id": room_id, + "sender": remote_user, + "depth": 5, + "prev_events": [local_message_event_id], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + RoomVersions.V6, + ) + + context = self.get_success(self.state.compute_event_context(remote_event_1)) + self.get_success(self.persistence.persist_event(remote_event_1, context)) + + # Call `get_users_in_room` to add the remote user to the cache + users = self.get_success(self.store.get_users_in_room(room_id)) + self.assertEqual(set(users), {user_id, remote_user}) + + # Now we have the local server leave the room, and check that calling + # `get_user_in_room` for the remote user no longer includes the room. + self.helper.leave(room_id, user_id, tok=token) + + users = self.get_success(self.store.get_users_in_room(room_id)) + self.assertEqual(users, []) From 0dbbe33a65c17cdb1ad41d6109b5629029dce886 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 15 Feb 2022 14:31:04 +0000 Subject: [PATCH 250/474] Track cache invalidations (#12000) Currently we only track evictions due to size or time constraints. --- changelog.d/12000.feature | 1 + synapse/util/caches/__init__.py | 1 + synapse/util/caches/expiringcache.py | 5 +++++ synapse/util/caches/lrucache.py | 4 +++- 4 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12000.feature diff --git a/changelog.d/12000.feature b/changelog.d/12000.feature new file mode 100644 index 000000000000..246cc87f0bde --- /dev/null +++ b/changelog.d/12000.feature @@ -0,0 +1 @@ +Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 15debd6c460f..1cbc180eda72 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -56,6 +56,7 @@ class EvictionReason(Enum): size = auto() time = auto() + invalidation = auto() @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 67ee4c693bc5..c6a5d0dfc0a9 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -133,6 +133,11 @@ def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: raise KeyError(key) return default + if self.iterable: + self.metrics.inc_evictions(EvictionReason.invalidation, len(value.value)) + else: + self.metrics.inc_evictions(EvictionReason.invalidation) + return value.value def __contains__(self, key: KT) -> bool: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 7548b38548bd..45ff0de638a4 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -560,8 +560,10 @@ def cache_pop(key: KT, default: T) -> Union[T, VT]: def cache_pop(key: KT, default: Optional[T] = None) -> Union[None, T, VT]: node = cache.get(key, None) if node: - delete_node(node) + evicted_len = delete_node(node) cache.pop(node.key, None) + if metrics: + metrics.inc_evictions(EvictionReason.invalidation, evicted_len) return node.value else: return default From bab2394aa9b514f51feb2c378a39d92143e9cec8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 15 Feb 2022 14:33:28 +0000 Subject: [PATCH 251/474] `_auth_and_persist_outliers`: drop events we have already seen (#11994) We already have two copies of this code, in 2/3 of the callers of `_auth_and_persist_outliers`. Before I add a third, let's push it down. --- changelog.d/11994.misc | 1 + synapse/handlers/federation_event.py | 44 +++++++++++++--------------- 2 files changed, 21 insertions(+), 24 deletions(-) create mode 100644 changelog.d/11994.misc diff --git a/changelog.d/11994.misc b/changelog.d/11994.misc new file mode 100644 index 000000000000..d64297dd78f0 --- /dev/null +++ b/changelog.d/11994.misc @@ -0,0 +1 @@ +Move common deduplication code down into `_auth_and_persist_outliers`. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9edc7369d6fe..6dc27a38f384 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -419,8 +419,6 @@ async def process_remote_join( Raises: SynapseError if the response is in some way invalid. """ - event_map = {e.event_id: e for e in itertools.chain(auth_events, state)} - create_event = None for e in auth_events: if (e.type, e.state_key) == (EventTypes.Create, ""): @@ -439,11 +437,6 @@ async def process_remote_join( if room_version.identifier != room_version_id: raise SynapseError(400, "Room version mismatch") - # filter out any events we have already seen - seen_remotes = await self._store.have_seen_events(room_id, event_map.keys()) - for s in seen_remotes: - event_map.pop(s, None) - # persist the auth chain and state events. # # any invalid events here will be marked as rejected, and we'll carry on. @@ -455,7 +448,9 @@ async def process_remote_join( # signatures right now doesn't mean that we will *never* be able to, so it # is premature to reject them. # - await self._auth_and_persist_outliers(room_id, event_map.values()) + await self._auth_and_persist_outliers( + room_id, itertools.chain(auth_events, state) + ) # and now persist the join event itself. logger.info("Peristing join-via-remote %s", event) @@ -1245,6 +1240,16 @@ async def _auth_and_persist_outliers( """ event_map = {event.event_id: event for event in events} + # filter out any events we have already seen. This might happen because + # the events were eagerly pushed to us (eg, during a room join), or because + # another thread has raced against us since we decided to request the event. + # + # This is just an optimisation, so it doesn't need to be watertight - the event + # persister does another round of deduplication. + seen_remotes = await self._store.have_seen_events(room_id, event_map.keys()) + for s in seen_remotes: + event_map.pop(s, None) + # XXX: it might be possible to kick this process off in parallel with fetching # the events. while event_map: @@ -1717,31 +1722,22 @@ async def _get_remote_auth_chain_for_event( event_id: the event for which we are lacking auth events """ try: - remote_event_map = { - e.event_id: e - for e in await self._federation_client.get_event_auth( - destination, room_id, event_id - ) - } + remote_events = await self._federation_client.get_event_auth( + destination, room_id, event_id + ) + except RequestSendFailed as e1: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. logger.info("Failed to get event auth from remote: %s", e1) return - logger.info("/event_auth returned %i events", len(remote_event_map)) + logger.info("/event_auth returned %i events", len(remote_events)) # `event` may be returned, but we should not yet process it. - remote_event_map.pop(event_id, None) - - # nor should we reprocess any events we have already seen. - seen_remotes = await self._store.have_seen_events( - room_id, remote_event_map.keys() - ) - for s in seen_remotes: - remote_event_map.pop(s, None) + remote_auth_events = (e for e in remote_events if e.event_id != event_id) - await self._auth_and_persist_outliers(room_id, remote_event_map.values()) + await self._auth_and_persist_outliers(room_id, remote_auth_events) async def _update_context_for_auth_events( self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] From 2b5643b3afa4cddc7809c8b51fe813d2f0987235 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 15 Feb 2022 15:01:00 +0000 Subject: [PATCH 252/474] Optimise calculating device_list changes in `/sync`. (#11974) For users with large accounts it is inefficient to calculate the set of users they share a room with (and takes a lot of space in the cache). Instead we can look at users whose devices have changed since the last sync and check if they share a room with the syncing user. --- changelog.d/11974.misc | 1 + synapse/handlers/sync.py | 68 +++++++++++++++----- synapse/storage/databases/main/devices.py | 10 +++ synapse/storage/databases/main/roommember.py | 62 ++++++++++++++++++ 4 files changed, 126 insertions(+), 15 deletions(-) create mode 100644 changelog.d/11974.misc diff --git a/changelog.d/11974.misc b/changelog.d/11974.misc new file mode 100644 index 000000000000..1debad2361ee --- /dev/null +++ b/changelog.d/11974.misc @@ -0,0 +1 @@ +Optimise calculating device_list changes in `/sync`. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index aa9a76f8a968..e6050cbce6c8 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1289,23 +1289,54 @@ async def _generate_sync_entry_for_device_list( # room with by looking at all users that have left a room plus users # that were in a room we've left. - users_who_share_room = await self.store.get_users_who_share_room_with_user( - user_id - ) - - # Always tell the user about their own devices. We check as the user - # ID is almost certainly already included (unless they're not in any - # rooms) and taking a copy of the set is relatively expensive. - if user_id not in users_who_share_room: - users_who_share_room = set(users_who_share_room) - users_who_share_room.add(user_id) + users_that_have_changed = set() - tracked_users = users_who_share_room + joined_rooms = sync_result_builder.joined_room_ids - # Step 1a, check for changes in devices of users we share a room with - users_that_have_changed = await self.store.get_users_whose_devices_changed( - since_token.device_list_key, tracked_users + # Step 1a, check for changes in devices of users we share a room + # with + # + # We do this in two different ways depending on what we have cached. + # If we already have a list of all the user that have changed since + # the last sync then it's likely more efficient to compare the rooms + # they're in with the rooms the syncing user is in. + # + # If we don't have that info cached then we get all the users that + # share a room with our user and check if those users have changed. + changed_users = self.store.get_cached_device_list_changes( + since_token.device_list_key ) + if changed_users is not None: + result = await self.store.get_rooms_for_users_with_stream_ordering( + changed_users + ) + + for changed_user_id, entries in result.items(): + # Check if the changed user shares any rooms with the user, + # or if the changed user is the syncing user (as we always + # want to include device list updates of their own devices). + if user_id == changed_user_id or any( + e.room_id in joined_rooms for e in entries + ): + users_that_have_changed.add(changed_user_id) + else: + users_who_share_room = ( + await self.store.get_users_who_share_room_with_user(user_id) + ) + + # Always tell the user about their own devices. We check as the user + # ID is almost certainly already included (unless they're not in any + # rooms) and taking a copy of the set is relatively expensive. + if user_id not in users_who_share_room: + users_who_share_room = set(users_who_share_room) + users_who_share_room.add(user_id) + + tracked_users = users_who_share_room + users_that_have_changed = ( + await self.store.get_users_whose_devices_changed( + since_token.device_list_key, tracked_users + ) + ) # Step 1b, check for newly joined rooms for room_id in newly_joined_rooms: @@ -1329,7 +1360,14 @@ async def _generate_sync_entry_for_device_list( newly_left_users.update(left_users) # Remove any users that we still share a room with. - newly_left_users -= users_who_share_room + left_users_rooms = ( + await self.store.get_rooms_for_users_with_stream_ordering( + newly_left_users + ) + ) + for user_id, entries in left_users_rooms.items(): + if any(e.room_id in joined_rooms for e in entries): + newly_left_users.discard(user_id) return DeviceLists(changed=users_that_have_changed, left=newly_left_users) else: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 8d845fe9516f..3b3a089b7627 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -670,6 +670,16 @@ async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict] device["device_id"]: db_to_json(device["content"]) for device in devices } + def get_cached_device_list_changes( + self, + from_key: int, + ) -> Optional[Set[str]]: + """Get set of users whose devices have changed since `from_key`, or None + if that information is not in our cache. + """ + + return self._device_list_stream_cache.get_all_entities_changed(from_key) + async def get_users_whose_devices_changed( self, from_key: int, user_ids: Iterable[str] ) -> Set[str]: diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 4489732fdac4..e48ec5f495af 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -504,6 +504,68 @@ def _get_rooms_for_user_with_stream_ordering_txn( for room_id, instance, stream_id in txn ) + @cachedList( + cached_method_name="get_rooms_for_user_with_stream_ordering", + list_name="user_ids", + ) + async def get_rooms_for_users_with_stream_ordering( + self, user_ids: Collection[str] + ) -> Dict[str, FrozenSet[GetRoomsForUserWithStreamOrdering]]: + """A batched version of `get_rooms_for_user_with_stream_ordering`. + + Returns: + Map from user_id to set of rooms that is currently in. + """ + return await self.db_pool.runInteraction( + "get_rooms_for_users_with_stream_ordering", + self._get_rooms_for_users_with_stream_ordering_txn, + user_ids, + ) + + def _get_rooms_for_users_with_stream_ordering_txn( + self, txn, user_ids: Collection[str] + ) -> Dict[str, FrozenSet[GetRoomsForUserWithStreamOrdering]]: + + clause, args = make_in_list_sql_clause( + self.database_engine, + "c.state_key", + user_ids, + ) + + if self._current_state_events_membership_up_to_date: + sql = f""" + SELECT c.state_key, room_id, e.instance_name, e.stream_ordering + FROM current_state_events AS c + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND c.membership = ? + AND {clause} + """ + else: + sql = f""" + SELECT c.state_key, room_id, e.instance_name, e.stream_ordering + FROM current_state_events AS c + INNER JOIN room_memberships AS m USING (room_id, event_id) + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND m.membership = ? + AND {clause} + """ + + txn.execute(sql, [Membership.JOIN] + args) + + result = {user_id: set() for user_id in user_ids} + for user_id, room_id, instance, stream_id in txn: + result[user_id].add( + GetRoomsForUserWithStreamOrdering( + room_id, PersistedEventPosition(instance, stream_id) + ) + ) + + return {user_id: frozenset(v) for user_id, v in result.items()} + async def get_users_server_still_shares_room_with( self, user_ids: Collection[str] ) -> Set[str]: From 130fd45393d39425a4aa35a388b1e5a74ba6f6f4 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 16 Feb 2022 12:16:48 +0100 Subject: [PATCH 253/474] Limit concurrent AS joins (#11996) Initially introduced in matrix-org-hotfixes by e5537cf (and tweaked by later commits). Fixes #11995 See also #4826 --- changelog.d/11996.misc | 1 + synapse/handlers/room_member.py | 46 +++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 19 deletions(-) create mode 100644 changelog.d/11996.misc diff --git a/changelog.d/11996.misc b/changelog.d/11996.misc new file mode 100644 index 000000000000..6c675fd1931c --- /dev/null +++ b/changelog.d/11996.misc @@ -0,0 +1 @@ +Limit concurrent joins from applications services. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index bf1a47efb020..b2adc0f48be9 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -82,6 +82,7 @@ def __init__(self, hs: "HomeServer"): self.event_auth_handler = hs.get_event_auth_handler() self.member_linearizer: Linearizer = Linearizer(name="member") + self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter") self.clock = hs.get_clock() self.spam_checker = hs.get_spam_checker() @@ -500,25 +501,32 @@ async def update_membership( key = (room_id,) - with (await self.member_linearizer.queue(key)): - result = await self.update_membership_locked( - requester, - target, - room_id, - action, - txn_id=txn_id, - remote_room_hosts=remote_room_hosts, - third_party_signed=third_party_signed, - ratelimit=ratelimit, - content=content, - new_room=new_room, - require_consent=require_consent, - outlier=outlier, - historical=historical, - allow_no_prev_events=allow_no_prev_events, - prev_event_ids=prev_event_ids, - auth_event_ids=auth_event_ids, - ) + as_id = object() + if requester.app_service: + as_id = requester.app_service.id + + # We first linearise by the application service (to try to limit concurrent joins + # by application services), and then by room ID. + with (await self.member_as_limiter.queue(as_id)): + with (await self.member_linearizer.queue(key)): + result = await self.update_membership_locked( + requester, + target, + room_id, + action, + txn_id=txn_id, + remote_room_hosts=remote_room_hosts, + third_party_signed=third_party_signed, + ratelimit=ratelimit, + content=content, + new_room=new_room, + require_consent=require_consent, + outlier=outlier, + historical=historical, + allow_no_prev_events=allow_no_prev_events, + prev_event_ids=prev_event_ids, + auth_event_ids=auth_event_ids, + ) return result From 7a92d68441f8182c6eac467ff2c23e71112659f0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Feb 2022 06:53:21 -0500 Subject: [PATCH 254/474] Fix a typo in a comment. --- synapse/storage/databases/main/relations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 5582029f9f50..36aa1092f602 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -489,7 +489,7 @@ def _get_thread_summaries_txn( # TODO Should this only allow m.room.message events. if isinstance(self.database_engine, PostgresEngine): # The `DISTINCT ON` clause will pick the *first* row it encounters, - # so ordering by topologica ordering + stream ordering desc will + # so ordering by topological ordering + stream ordering desc will # ensure we get the latest event in the thread. sql = """ SELECT DISTINCT ON (parent.event_id) parent.event_id, child.event_id FROM events AS child From 73fc4887834b33afaf23ff48a68772f8ef9b924c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 16 Feb 2022 12:25:43 +0000 Subject: [PATCH 255/474] Explain the meaning of spam checker callbacks' return values (#12003) Co-authored-by: Patrick Cloke --- changelog.d/12003.doc | 1 + docs/modules/spam_checker_callbacks.md | 40 +++++++++++++++++--------- 2 files changed, 28 insertions(+), 13 deletions(-) create mode 100644 changelog.d/12003.doc diff --git a/changelog.d/12003.doc b/changelog.d/12003.doc new file mode 100644 index 000000000000..1ac8163559f2 --- /dev/null +++ b/changelog.d/12003.doc @@ -0,0 +1 @@ +Explain the meaning of spam checker callbacks' return values. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 2eb9032f4136..2b672b78f9ae 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -16,10 +16,12 @@ _First introduced in Synapse v1.37.0_ async def check_event_for_spam(event: "synapse.events.EventBase") -> Union[bool, str] ``` -Called when receiving an event from a client or via federation. The module can return -either a `bool` to indicate whether the event must be rejected because of spam, or a `str` -to indicate the event must be rejected because of spam and to give a rejection reason to -forward to clients. +Called when receiving an event from a client or via federation. The callback must return +either: +- an error message string, to indicate the event must be rejected because of spam and + give a rejection reason to forward to clients; +- the boolean `True`, to indicate that the event is spammy, but not provide further details; or +- the booelan `False`, to indicate that the event is not considered spammy. If multiple modules implement this callback, they will be considered in order. If a callback returns `False`, Synapse falls through to the next one. The value of the first @@ -35,7 +37,10 @@ async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool ``` Called when a user is trying to join a room. The module must return a `bool` to indicate -whether the user can join the room. The user is represented by their Matrix user ID (e.g. +whether the user can join the room. Return `False` to prevent the user from joining the +room; otherwise return `True` to permit the joining. + +The user is represented by their Matrix user ID (e.g. `@alice:example.com`) and the room is represented by its Matrix ID (e.g. `!room:example.com`). The module is also given a boolean to indicate whether the user currently has a pending invite in the room. @@ -58,7 +63,8 @@ async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool Called when processing an invitation. The module must return a `bool` indicating whether the inviter can invite the invitee to the given room. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). +represented by their Matrix user ID (e.g. `@alice:example.com`). Return `False` to prevent +the invitation; otherwise return `True` to permit it. If multiple modules implement this callback, they will be considered in order. If a callback returns `True`, Synapse falls through to the next one. The value of the first @@ -80,7 +86,8 @@ async def user_may_send_3pid_invite( Called when processing an invitation using a third-party identifier (also called a 3PID, e.g. an email address or a phone number). The module must return a `bool` indicating -whether the inviter can invite the invitee to the given room. +whether the inviter can invite the invitee to the given room. Return `False` to prevent +the invitation; otherwise return `True` to permit it. The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address @@ -117,6 +124,7 @@ async def user_may_create_room(user: str) -> bool Called when processing a room creation request. The module must return a `bool` indicating whether the given user (represented by their Matrix user ID) is allowed to create a room. +Return `False` to prevent room creation; otherwise return `True` to permit it. If multiple modules implement this callback, they will be considered in order. If a callback returns `True`, Synapse falls through to the next one. The value of the first @@ -133,7 +141,8 @@ async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomA Called when trying to associate an alias with an existing room. The module must return a `bool` indicating whether the given user (represented by their Matrix user ID) is allowed -to set the given alias. +to set the given alias. Return `False` to prevent the alias creation; otherwise return +`True` to permit it. If multiple modules implement this callback, they will be considered in order. If a callback returns `True`, Synapse falls through to the next one. The value of the first @@ -150,7 +159,8 @@ async def user_may_publish_room(user: str, room_id: str) -> bool Called when trying to publish a room to the homeserver's public rooms directory. The module must return a `bool` indicating whether the given user (represented by their -Matrix user ID) is allowed to publish the given room. +Matrix user ID) is allowed to publish the given room. Return `False` to prevent the +room from being published; otherwise return `True` to permit its publication. If multiple modules implement this callback, they will be considered in order. If a callback returns `True`, Synapse falls through to the next one. The value of the first @@ -166,8 +176,11 @@ async def check_username_for_spam(user_profile: Dict[str, str]) -> bool ``` Called when computing search results in the user directory. The module must return a -`bool` indicating whether the given user profile can appear in search results. The profile -is represented as a dictionary with the following keys: +`bool` indicating whether the given user should be excluded from user directory +searches. Return `True` to indicate that the user is spammy and exclude them from +search results; otherwise return `False`. + +The profile is represented as a dictionary with the following keys: * `user_id`: The Matrix ID for this user. * `display_name`: The user's display name. @@ -225,8 +238,9 @@ async def check_media_file_for_spam( ) -> bool ``` -Called when storing a local or remote file. The module must return a boolean indicating -whether the given file can be stored in the homeserver's media store. +Called when storing a local or remote file. The module must return a `bool` indicating +whether the given file should be excluded from the homeserver's media store. Return +`True` to prevent this file from being stored; otherwise return `False`. If multiple modules implement this callback, they will be considered in order. If a callback returns `False`, Synapse falls through to the next one. The value of the first From 40771773909cb03d9296e3f0505e4e32372f10aa Mon Sep 17 00:00:00 2001 From: lukasdenk <63459921+lukasdenk@users.noreply.github.com> Date: Thu, 17 Feb 2022 11:23:54 +0100 Subject: [PATCH 256/474] Prevent duplicate push notifications for room reads (#11835) --- changelog.d/11835.feature | 1 + synapse/push/httppusher.py | 7 +- tests/push/test_http.py | 129 ++++++++++++++++++------------------- 3 files changed, 71 insertions(+), 66 deletions(-) create mode 100644 changelog.d/11835.feature diff --git a/changelog.d/11835.feature b/changelog.d/11835.feature new file mode 100644 index 000000000000..7cee39b08c71 --- /dev/null +++ b/changelog.d/11835.feature @@ -0,0 +1 @@ +Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 96559081d0bf..49bcc06e0b5d 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -109,6 +109,7 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig): self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] + self.badge_count_last_call: Optional[int] = None def on_started(self, should_check_for_notifs: bool) -> None: """Called when this pusher has been started. @@ -136,7 +137,9 @@ async def _update_badge(self) -> None: self.user_id, group_by_room=self._group_unread_count_by_room, ) - await self._send_badge(badge) + if self.badge_count_last_call is None or self.badge_count_last_call != badge: + self.badge_count_last_call = badge + await self._send_badge(badge) def on_timer(self) -> None: self._start_processing() @@ -402,6 +405,8 @@ async def dispatch_push( rejected = [] if "rejected" in resp: rejected = resp["rejected"] + else: + self.badge_count_last_call = badge return rejected async def _send_badge(self, badge: int) -> None: diff --git a/tests/push/test_http.py b/tests/push/test_http.py index c068d329a98b..e1e3fb97c54d 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -571,9 +571,7 @@ def test_push_unread_count_group_by_room(self): # Carry out our option-value specific test # # This push should still only contain an unread count of 1 (for 1 unread room) - self.assertEqual( - self.push_attempts[5][2]["notification"]["counts"]["unread"], 1 - ) + self._check_push_attempt(6, 1) @override_config({"push": {"group_unread_count_by_room": False}}) def test_push_unread_count_message_count(self): @@ -585,11 +583,9 @@ def test_push_unread_count_message_count(self): # Carry out our option-value specific test # - # We're counting every unread message, so there should now be 4 since the + # We're counting every unread message, so there should now be 3 since the # last read receipt - self.assertEqual( - self.push_attempts[5][2]["notification"]["counts"]["unread"], 4 - ) + self._check_push_attempt(6, 3) def _test_push_unread_count(self): """ @@ -597,8 +593,9 @@ def _test_push_unread_count(self): Note that: * Sending messages will cause push notifications to go out to relevant users - * Sending a read receipt will cause a "badge update" notification to go out to - the user that sent the receipt + * Sending a read receipt will cause the HTTP pusher to check whether the unread + count has changed since the last push notification. If so, a "badge update" + notification goes out to the user that sent the receipt """ # Register the user who gets notified user_id = self.register_user("user", "pass") @@ -642,24 +639,74 @@ def _test_push_unread_count(self): # position in the room. We'll set the read position to this event in a moment first_message_event_id = response["event_id"] - # Advance time a bit (so the pusher will register something has happened) and - # make the push succeed - self.push_attempts[0][0].callback({}) + expected_push_attempts = 1 + self._check_push_attempt(expected_push_attempts, 0) + + self._send_read_request(access_token, first_message_event_id, room_id) + + # Unread count has not changed. Therefore, ensure that read request does not + # trigger a push notification. + self.assertEqual(len(self.push_attempts), 1) + + # Send another message + response2 = self.helper.send( + room_id, body="How's the weather today?", tok=other_access_token + ) + second_message_event_id = response2["event_id"] + + expected_push_attempts += 1 + + self._check_push_attempt(expected_push_attempts, 1) + + self._send_read_request(access_token, second_message_event_id, room_id) + expected_push_attempts += 1 + + self._check_push_attempt(expected_push_attempts, 0) + + # If we're grouping by room, sending more messages shouldn't increase the + # unread count, as they're all being sent in the same room. Otherwise, it + # should. Therefore, the last call to _check_push_attempt is done in the + # caller method. + self.helper.send(room_id, body="Hello?", tok=other_access_token) + expected_push_attempts += 1 + + self._advance_time_and_make_push_succeed(expected_push_attempts) + + self.helper.send(room_id, body="Hello??", tok=other_access_token) + expected_push_attempts += 1 + + self._advance_time_and_make_push_succeed(expected_push_attempts) + + self.helper.send(room_id, body="HELLO???", tok=other_access_token) + + def _advance_time_and_make_push_succeed(self, expected_push_attempts): self.pump() + self.push_attempts[expected_push_attempts - 1][0].callback({}) + def _check_push_attempt( + self, expected_push_attempts: int, expected_unread_count_last_push: int + ) -> None: + """ + Makes sure that the last expected push attempt succeeds and checks whether + it contains the expected unread count. + """ + self._advance_time_and_make_push_succeed(expected_push_attempts) # Check our push made it - self.assertEqual(len(self.push_attempts), 1) + self.assertEqual(len(self.push_attempts), expected_push_attempts) + _, push_url, push_body = self.push_attempts[expected_push_attempts - 1] self.assertEqual( - self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify" + push_url, + "http://example.com/_matrix/push/v1/notify", ) - # Check that the unread count for the room is 0 # # The unread count is zero as the user has no read receipt in the room yet self.assertEqual( - self.push_attempts[0][2]["notification"]["counts"]["unread"], 0 + push_body["notification"]["counts"]["unread"], + expected_unread_count_last_push, ) + def _send_read_request(self, access_token, message_event_id, room_id): # Now set the user's read receipt position to the first event # # This will actually trigger a new notification to be sent out so that @@ -667,56 +714,8 @@ def _test_push_unread_count(self): # count goes down channel = self.make_request( "POST", - "/rooms/%s/receipt/m.read/%s" % (room_id, first_message_event_id), + "/rooms/%s/receipt/m.read/%s" % (room_id, message_event_id), {}, access_token=access_token, ) self.assertEqual(channel.code, 200, channel.json_body) - - # Advance time and make the push succeed - self.push_attempts[1][0].callback({}) - self.pump() - - # Unread count is still zero as we've read the only message in the room - self.assertEqual(len(self.push_attempts), 2) - self.assertEqual( - self.push_attempts[1][2]["notification"]["counts"]["unread"], 0 - ) - - # Send another message - self.helper.send( - room_id, body="How's the weather today?", tok=other_access_token - ) - - # Advance time and make the push succeed - self.push_attempts[2][0].callback({}) - self.pump() - - # This push should contain an unread count of 1 as there's now been one - # message since our last read receipt - self.assertEqual(len(self.push_attempts), 3) - self.assertEqual( - self.push_attempts[2][2]["notification"]["counts"]["unread"], 1 - ) - - # Since we're grouping by room, sending more messages shouldn't increase the - # unread count, as they're all being sent in the same room - self.helper.send(room_id, body="Hello?", tok=other_access_token) - - # Advance time and make the push succeed - self.pump() - self.push_attempts[3][0].callback({}) - - self.helper.send(room_id, body="Hello??", tok=other_access_token) - - # Advance time and make the push succeed - self.pump() - self.push_attempts[4][0].callback({}) - - self.helper.send(room_id, body="HELLO???", tok=other_access_token) - - # Advance time and make the push succeed - self.pump() - self.push_attempts[5][0].callback({}) - - self.assertEqual(len(self.push_attempts), 6) From 696acd35151ff32bb69e555baee4e584c504d4d6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 17 Feb 2022 11:59:26 +0000 Subject: [PATCH 257/474] `send_join` response: get create event from `state`, not `auth_chain` (#12005) msc3706 proposes changing the `/send_join` response: > Any events returned within `state` can be omitted from `auth_chain`. Currently, we rely on `m.room.create` being returned in `auth_chain`, but since the `m.room.create` event must necessarily be part of the state, the above change will break this. In short, let's look for `m.room.create` in `state` rather than `auth_chain`. --- changelog.d/12005.misc | 1 + synapse/handlers/federation_event.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12005.misc diff --git a/changelog.d/12005.misc b/changelog.d/12005.misc new file mode 100644 index 000000000000..45e21dbe5953 --- /dev/null +++ b/changelog.d/12005.misc @@ -0,0 +1 @@ +Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 6dc27a38f384..7683246bef90 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -420,7 +420,7 @@ async def process_remote_join( SynapseError if the response is in some way invalid. """ create_event = None - for e in auth_events: + for e in state: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break From e69f8f0a8e3b3f3b647efc42fe67b6b9270d26e0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 17 Feb 2022 08:32:18 -0500 Subject: [PATCH 258/474] Remove support for the legacy structured logging configuration. (#12008) --- changelog.d/12008.removal | 1 + docs/structured_logging.md | 14 ++- docs/upgrade.md | 9 ++ synapse/config/logger.py | 12 ++- synapse/logging/_structured.py | 163 --------------------------------- 5 files changed, 24 insertions(+), 175 deletions(-) create mode 100644 changelog.d/12008.removal delete mode 100644 synapse/logging/_structured.py diff --git a/changelog.d/12008.removal b/changelog.d/12008.removal new file mode 100644 index 000000000000..57599d9ee955 --- /dev/null +++ b/changelog.d/12008.removal @@ -0,0 +1 @@ +Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). diff --git a/docs/structured_logging.md b/docs/structured_logging.md index 14db85f58728..805c86765340 100644 --- a/docs/structured_logging.md +++ b/docs/structured_logging.md @@ -81,14 +81,12 @@ remote endpoint at 10.1.2.3:9999. ## Upgrading from legacy structured logging configuration -Versions of Synapse prior to v1.23.0 included a custom structured logging -configuration which is deprecated. It used a `structured: true` flag and -configured `drains` instead of ``handlers`` and `formatters`. - -Synapse currently automatically converts the old configuration to the new -configuration, but this will be removed in a future version of Synapse. The -following reference can be used to update your configuration. Based on the drain -`type`, we can pick a new handler: +Versions of Synapse prior to v1.54.0 automatically converted the legacy +structured logging configuration, which was deprecated in v1.23.0, to the standard +library logging configuration. + +The following reference can be used to update your configuration. Based on the +drain `type`, we can pick a new handler: 1. For a type of `console`, `console_json`, or `console_json_terse`: a handler with a class of `logging.StreamHandler` and a `stream` of `ext://sys.stdout` diff --git a/docs/upgrade.md b/docs/upgrade.md index 477d7d0e81c8..9860ae97b997 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,15 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.54.0 + +## Legacy structured logging configuration removal + +This release removes support for the `structured: true` logging configuration +which was deprecated in Synapse v1.23.0. If your logging configuration contains +`structured: true` then it should be modified based on the +[structured logging documentation](structured_logging.md). + # Upgrading to v1.53.0 ## Dropping support for `webclient` listeners and non-HTTP(S) `web_client_location` diff --git a/synapse/config/logger.py b/synapse/config/logger.py index b7145a44ae1d..cbbe22196528 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -33,7 +33,6 @@ globalLogBeginner, ) -from synapse.logging._structured import setup_structured_logging from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter @@ -138,6 +137,12 @@ removed in Synapse 1.3.0. You should instead set up a separate log configuration file. """ +STRUCTURED_ERROR = """\ +Support for the structured configuration option was removed in Synapse 1.54.0. +You should instead use the standard logging configuration. See +https://matrix-org.github.io/synapse/v1.54/structured_logging.html +""" + class LoggingConfig(Config): section = "logging" @@ -292,10 +297,9 @@ def _load_logging_config(log_config_path: str) -> None: if not log_config: logging.warning("Loaded a blank logging config?") - # If the old structured logging configuration is being used, convert it to - # the new style configuration. + # If the old structured logging configuration is being used, raise an error. if "structured" in log_config and log_config.get("structured"): - log_config = setup_structured_logging(log_config) + raise ConfigError(STRUCTURED_ERROR) logging.config.dictConfig(log_config) diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py deleted file mode 100644 index b9933a152847..000000000000 --- a/synapse/logging/_structured.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os.path -from typing import Any, Dict, Generator, Optional, Tuple - -from constantly import NamedConstant, Names - -from synapse.config._base import ConfigError - - -class DrainType(Names): - CONSOLE = NamedConstant() - CONSOLE_JSON = NamedConstant() - CONSOLE_JSON_TERSE = NamedConstant() - FILE = NamedConstant() - FILE_JSON = NamedConstant() - NETWORK_JSON_TERSE = NamedConstant() - - -DEFAULT_LOGGERS = {"synapse": {"level": "info"}} - - -def parse_drain_configs( - drains: dict, -) -> Generator[Tuple[str, Dict[str, Any]], None, None]: - """ - Parse the drain configurations. - - Args: - drains (dict): A list of drain configurations. - - Yields: - dict instances representing a logging handler. - - Raises: - ConfigError: If any of the drain configuration items are invalid. - """ - - for name, config in drains.items(): - if "type" not in config: - raise ConfigError("Logging drains require a 'type' key.") - - try: - logging_type = DrainType.lookupByName(config["type"].upper()) - except ValueError: - raise ConfigError( - "%s is not a known logging drain type." % (config["type"],) - ) - - # Either use the default formatter or the tersejson one. - if logging_type in ( - DrainType.CONSOLE_JSON, - DrainType.FILE_JSON, - ): - formatter: Optional[str] = "json" - elif logging_type in ( - DrainType.CONSOLE_JSON_TERSE, - DrainType.NETWORK_JSON_TERSE, - ): - formatter = "tersejson" - else: - # A formatter of None implies using the default formatter. - formatter = None - - if logging_type in [ - DrainType.CONSOLE, - DrainType.CONSOLE_JSON, - DrainType.CONSOLE_JSON_TERSE, - ]: - location = config.get("location") - if location is None or location not in ["stdout", "stderr"]: - raise ConfigError( - ( - "The %s drain needs the 'location' key set to " - "either 'stdout' or 'stderr'." - ) - % (logging_type,) - ) - - yield name, { - "class": "logging.StreamHandler", - "formatter": formatter, - "stream": "ext://sys." + location, - } - - elif logging_type in [DrainType.FILE, DrainType.FILE_JSON]: - if "location" not in config: - raise ConfigError( - "The %s drain needs the 'location' key set." % (logging_type,) - ) - - location = config.get("location") - if os.path.abspath(location) != location: - raise ConfigError( - "File paths need to be absolute, '%s' is a relative path" - % (location,) - ) - - yield name, { - "class": "logging.FileHandler", - "formatter": formatter, - "filename": location, - } - - elif logging_type in [DrainType.NETWORK_JSON_TERSE]: - host = config.get("host") - port = config.get("port") - maximum_buffer = config.get("maximum_buffer", 1000) - - yield name, { - "class": "synapse.logging.RemoteHandler", - "formatter": formatter, - "host": host, - "port": port, - "maximum_buffer": maximum_buffer, - } - - else: - raise ConfigError( - "The %s drain type is currently not implemented." - % (config["type"].upper(),) - ) - - -def setup_structured_logging( - log_config: dict, -) -> dict: - """ - Convert a legacy structured logging configuration (from Synapse < v1.23.0) - to one compatible with the new standard library handlers. - """ - if "drains" not in log_config: - raise ConfigError("The logging configuration requires a list of drains.") - - new_config = { - "version": 1, - "formatters": { - "json": {"class": "synapse.logging.JsonFormatter"}, - "tersejson": {"class": "synapse.logging.TerseJsonFormatter"}, - }, - "handlers": {}, - "loggers": log_config.get("loggers", DEFAULT_LOGGERS), - "root": {"handlers": []}, - } - - for handler_name, handler in parse_drain_configs(log_config["drains"]): - new_config["handlers"][handler_name] = handler - - # Add each handler to the root logger. - new_config["root"]["handlers"].append(handler_name) - - return new_config From 6127c4b9f19702909938ddc95b7e4219d20ac349 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 17 Feb 2022 15:55:14 +0000 Subject: [PATCH 259/474] Configure `tox` to use `venv` (#12015) As the comment says, virtualenv is a pile of fail. --- .ci/scripts/test_old_deps.sh | 4 +++- changelog.d/12015.misc | 1 + tox.ini | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12015.misc diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh index 54ec3c8b0dce..b2859f752261 100755 --- a/.ci/scripts/test_old_deps.sh +++ b/.ci/scripts/test_old_deps.sh @@ -8,7 +8,9 @@ export DEBIAN_FRONTEND=noninteractive set -ex apt-get update -apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev +apt-get install -y \ + python3 python3-dev python3-pip python3-venv \ + libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox libjpeg-dev libwebp-dev export LANG="C.UTF-8" diff --git a/changelog.d/12015.misc b/changelog.d/12015.misc new file mode 100644 index 000000000000..3aa32ab4cf78 --- /dev/null +++ b/changelog.d/12015.misc @@ -0,0 +1 @@ +Configure `tox` to use `venv` rather than `virtualenv`. diff --git a/tox.ini b/tox.ini index 32679e9106c8..2b3d39e0340f 100644 --- a/tox.ini +++ b/tox.ini @@ -4,6 +4,11 @@ envlist = packaging, py37, py38, py39, py310, check_codestyle, check_isort # we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 minversion = 2.3.2 +# the tox-venv plugin makes tox use python's built-in `venv` module rather than +# the legacy `virtualenv` tool. `virtualenv` embeds its own `pip`, `setuptools`, +# etc, and ends up being rather unreliable. +requires = tox-venv + [base] deps = python-subunit From da0e9f8efdac1571eab35ad2cc842073ca54769c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 17 Feb 2022 16:11:59 +0000 Subject: [PATCH 260/474] Faster joins: parse msc3706 fields in send_join response (#12011) Part of my work on #11249: add code to handle the new fields added in MSC3706. --- changelog.d/12011.misc | 1 + synapse/config/experimental.py | 4 + synapse/federation/federation_client.py | 15 ++- synapse/federation/transport/client.py | 118 ++++++++++++++++------ synapse/python_dependencies.py | 3 +- tests/federation/transport/test_client.py | 32 ++++++ 6 files changed, 140 insertions(+), 33 deletions(-) create mode 100644 changelog.d/12011.misc diff --git a/changelog.d/12011.misc b/changelog.d/12011.misc new file mode 100644 index 000000000000..258b0e389f6c --- /dev/null +++ b/changelog.d/12011.misc @@ -0,0 +1 @@ +Preparation for faster-room-join work: parse msc3706 fields in send_join response. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 09d692d9a1ed..12b5638cf8b7 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -64,3 +64,7 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3706 (server-side support for partial state in /send_join responses) self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) + + # experimental support for faster joins over federation (msc2775, msc3706) + # requires a target server with msc3706_enabled enabled. + self.faster_joins_enabled: bool = experimental.get("faster_joins", False) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 74f17aa4daa3..9f56f97d9965 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1,4 +1,4 @@ -# Copyright 2015-2021 The Matrix.org Foundation C.I.C. +# Copyright 2015-2022 The Matrix.org Foundation C.I.C. # Copyright 2020 Sorunome # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -89,6 +89,12 @@ class SendJoinResult: state: List[EventBase] auth_chain: List[EventBase] + # True if 'state' elides non-critical membership events + partial_state: bool + + # if 'partial_state' is set, a list of the servers in the room (otherwise empty) + servers_in_room: List[str] + class FederationClient(FederationBase): def __init__(self, hs: "HomeServer"): @@ -876,11 +882,18 @@ async def _execute(pdu: EventBase) -> None: % (auth_chain_create_events,) ) + if response.partial_state and not response.servers_in_room: + raise InvalidResponseError( + "partial_state was set, but no servers were listed in the room" + ) + return SendJoinResult( event=event, state=signed_state, auth_chain=signed_auth, origin=destination, + partial_state=response.partial_state, + servers_in_room=response.servers_in_room or [], ) # MSC3083 defines additional error codes for room joins. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 8782586cd6b4..dca6e5c45d07 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1,4 +1,4 @@ -# Copyright 2014-2021 The Matrix.org Foundation C.I.C. +# Copyright 2014-2022 The Matrix.org Foundation C.I.C. # Copyright 2020 Sorunome # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,6 +60,7 @@ class TransportLayerClient: def __init__(self, hs): self.server_name = hs.hostname self.client = hs.get_federation_http_client() + self._faster_joins_enabled = hs.config.experimental.faster_joins_enabled async def get_room_state_ids( self, destination: str, room_id: str, event_id: str @@ -336,10 +337,15 @@ async def send_join_v2( content: JsonDict, ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) + query_params: Dict[str, str] = {} + if self._faster_joins_enabled: + # lazy-load state on join + query_params["org.matrix.msc3706.partial_state"] = "true" return await self.client.put_json( destination=destination, path=path, + args=query_params, data=content, parser=SendJoinParser(room_version, v1_api=False), max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, @@ -1271,6 +1277,12 @@ class SendJoinResponse: # "event" is not included in the response. event: Optional[EventBase] = None + # The room state is incomplete + partial_state: bool = False + + # List of servers in the room + servers_in_room: Optional[List[str]] = None + @ijson.coroutine def _event_parser(event_dict: JsonDict) -> Generator[None, Tuple[str, Any], None]: @@ -1297,6 +1309,32 @@ def _event_list_parser( events.append(event) +@ijson.coroutine +def _partial_state_parser(response: SendJoinResponse) -> Generator[None, Any, None]: + """Helper function for use with `ijson.items_coro` + + Parses the partial_state field in send_join responses + """ + while True: + val = yield + if not isinstance(val, bool): + raise TypeError("partial_state must be a boolean") + response.partial_state = val + + +@ijson.coroutine +def _servers_in_room_parser(response: SendJoinResponse) -> Generator[None, Any, None]: + """Helper function for use with `ijson.items_coro` + + Parses the servers_in_room field in send_join responses + """ + while True: + val = yield + if not isinstance(val, list) or any(not isinstance(x, str) for x in val): + raise TypeError("servers_in_room must be a list of strings") + response.servers_in_room = val + + class SendJoinParser(ByteParser[SendJoinResponse]): """A parser for the response to `/send_join` requests. @@ -1308,44 +1346,62 @@ class SendJoinParser(ByteParser[SendJoinResponse]): CONTENT_TYPE = "application/json" def __init__(self, room_version: RoomVersion, v1_api: bool): - self._response = SendJoinResponse([], [], {}) + self._response = SendJoinResponse([], [], event_dict={}) self._room_version = room_version + self._coros = [] # The V1 API has the shape of `[200, {...}]`, which we handle by # prefixing with `item.*`. prefix = "item." if v1_api else "" - self._coro_state = ijson.items_coro( - _event_list_parser(room_version, self._response.state), - prefix + "state.item", - use_float=True, - ) - self._coro_auth = ijson.items_coro( - _event_list_parser(room_version, self._response.auth_events), - prefix + "auth_chain.item", - use_float=True, - ) - # TODO Remove the unstable prefix when servers have updated. - # - # By re-using the same event dictionary this will cause the parsing of - # org.matrix.msc3083.v2.event and event to stomp over each other. - # Generally this should be fine. - self._coro_unstable_event = ijson.kvitems_coro( - _event_parser(self._response.event_dict), - prefix + "org.matrix.msc3083.v2.event", - use_float=True, - ) - self._coro_event = ijson.kvitems_coro( - _event_parser(self._response.event_dict), - prefix + "event", - use_float=True, - ) + self._coros = [ + ijson.items_coro( + _event_list_parser(room_version, self._response.state), + prefix + "state.item", + use_float=True, + ), + ijson.items_coro( + _event_list_parser(room_version, self._response.auth_events), + prefix + "auth_chain.item", + use_float=True, + ), + # TODO Remove the unstable prefix when servers have updated. + # + # By re-using the same event dictionary this will cause the parsing of + # org.matrix.msc3083.v2.event and event to stomp over each other. + # Generally this should be fine. + ijson.kvitems_coro( + _event_parser(self._response.event_dict), + prefix + "org.matrix.msc3083.v2.event", + use_float=True, + ), + ijson.kvitems_coro( + _event_parser(self._response.event_dict), + prefix + "event", + use_float=True, + ), + ] + + if not v1_api: + self._coros.append( + ijson.items_coro( + _partial_state_parser(self._response), + "org.matrix.msc3706.partial_state", + use_float="True", + ) + ) + + self._coros.append( + ijson.items_coro( + _servers_in_room_parser(self._response), + "org.matrix.msc3706.servers_in_room", + use_float="True", + ) + ) def write(self, data: bytes) -> int: - self._coro_state.send(data) - self._coro_auth.send(data) - self._coro_unstable_event.send(data) - self._coro_event.send(data) + for c in self._coros: + c.send(data) return len(data) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 86162e0f2c65..f43fbb5842fa 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -87,7 +87,8 @@ # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. "cryptography>=3.4.7", - "ijson>=3.1", + # ijson 3.1.4 fixes a bug with "." in property names + "ijson>=3.1.4", "matrix-common~=1.1.0", ] diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index a7031a55f28c..c2320ce133aa 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -62,3 +62,35 @@ def test_two_writes(self) -> None: self.assertEqual(len(parsed_response.state), 1, parsed_response) self.assertEqual(parsed_response.event_dict, {}, parsed_response) self.assertIsNone(parsed_response.event, parsed_response) + self.assertFalse(parsed_response.partial_state, parsed_response) + self.assertEqual(parsed_response.servers_in_room, None, parsed_response) + + def test_partial_state(self) -> None: + """Check that the partial_state flag is correctly parsed""" + parser = SendJoinParser(RoomVersions.V1, False) + response = { + "org.matrix.msc3706.partial_state": True, + } + + serialised_response = json.dumps(response).encode() + + # Send data to the parser + parser.write(serialised_response) + + # Retrieve and check the parsed SendJoinResponse + parsed_response = parser.finish() + self.assertTrue(parsed_response.partial_state) + + def test_servers_in_room(self) -> None: + """Check that the servers_in_room field is correctly parsed""" + parser = SendJoinParser(RoomVersions.V1, False) + response = {"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]} + + serialised_response = json.dumps(response).encode() + + # Send data to the parser + parser.write(serialised_response) + + # Retrieve and check the parsed SendJoinResponse + parsed_response = parser.finish() + self.assertEqual(parsed_response.servers_in_room, ["hs1", "hs2"]) From 707049c6ff61193ffdfba909b4f17e9158c1d3e1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 17 Feb 2022 17:54:16 +0100 Subject: [PATCH 261/474] Allow modules to set a display name on registration (#12009) Co-authored-by: Patrick Cloke --- changelog.d/12009.feature | 1 + .../password_auth_provider_callbacks.md | 35 ++++- synapse/handlers/auth.py | 58 +++++++++ synapse/module_api/__init__.py | 5 + synapse/rest/client/register.py | 7 + tests/handlers/test_password_providers.py | 123 +++++++++++++----- 6 files changed, 195 insertions(+), 34 deletions(-) create mode 100644 changelog.d/12009.feature diff --git a/changelog.d/12009.feature b/changelog.d/12009.feature new file mode 100644 index 000000000000..c8a531481ec3 --- /dev/null +++ b/changelog.d/12009.feature @@ -0,0 +1 @@ +Enable modules to set a custom display name when registering a user. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index 88b59bb09e61..ec810fd292e5 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -85,7 +85,7 @@ If the authentication is unsuccessful, the module must return `None`. If multiple modules implement this callback, they will be considered in order. If a callback returns `None`, Synapse falls through to the next one. The value of the first callback that does not return `None` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. If every callback return `None`, +any of the subsequent implementations of this callback. If every callback returns `None`, the authentication is denied. ### `on_logged_out` @@ -162,10 +162,38 @@ return `None`. If multiple modules implement this callback, they will be considered in order. If a callback returns `None`, Synapse falls through to the next one. The value of the first callback that does not return `None` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. If every callback return `None`, +any of the subsequent implementations of this callback. If every callback returns `None`, the username provided by the user is used, if any (otherwise one is automatically generated). +### `get_displayname_for_registration` + +_First introduced in Synapse v1.54.0_ + +```python +async def get_displayname_for_registration( + uia_results: Dict[str, Any], + params: Dict[str, Any], +) -> Optional[str] +``` + +Called when registering a new user. The module can return a display name to set for the +user being registered by returning it as a string, or `None` if it doesn't wish to force a +display name for this user. + +This callback is called once [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api) +has been completed by the user. It is not called when registering a user via SSO. It is +passed two dictionaries, which include the information that the user has provided during +the registration process. These dictionaries are identical to the ones passed to +[`get_username_for_registration`](#get_username_for_registration), so refer to the +documentation of this callback for more information about them. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `None`, Synapse falls through to the next one. The value of the first +callback that does not return `None` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. If every callback returns `None`, +the username will be used (e.g. `alice` if the user being registered is `@alice:example.com`). + ## `is_3pid_allowed` _First introduced in Synapse v1.53.0_ @@ -194,8 +222,7 @@ The example module below implements authentication checkers for two different lo - Is checked by the method: `self.check_my_login` - `m.login.password` (defined in [the spec](https://matrix.org/docs/spec/client_server/latest#password-based)) - Expects a `password` field to be sent to `/login` - - Is checked by the method: `self.check_pass` - + - Is checked by the method: `self.check_pass` ```python from typing import Awaitable, Callable, Optional, Tuple diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 6959d1aa7e47..572f54b1e353 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -2064,6 +2064,10 @@ def run(*args: Tuple, **kwargs: Dict) -> Awaitable: [JsonDict, JsonDict], Awaitable[Optional[str]], ] +GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK = Callable[ + [JsonDict, JsonDict], + Awaitable[Optional[str]], +] IS_3PID_ALLOWED_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] @@ -2080,6 +2084,9 @@ def __init__(self) -> None: self.get_username_for_registration_callbacks: List[ GET_USERNAME_FOR_REGISTRATION_CALLBACK ] = [] + self.get_displayname_for_registration_callbacks: List[ + GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK + ] = [] self.is_3pid_allowed_callbacks: List[IS_3PID_ALLOWED_CALLBACK] = [] # Mapping from login type to login parameters @@ -2099,6 +2106,9 @@ def register_password_auth_provider_callbacks( get_username_for_registration: Optional[ GET_USERNAME_FOR_REGISTRATION_CALLBACK ] = None, + get_displayname_for_registration: Optional[ + GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK + ] = None, ) -> None: # Register check_3pid_auth callback if check_3pid_auth is not None: @@ -2148,6 +2158,11 @@ def register_password_auth_provider_callbacks( get_username_for_registration, ) + if get_displayname_for_registration is not None: + self.get_displayname_for_registration_callbacks.append( + get_displayname_for_registration, + ) + if is_3pid_allowed is not None: self.is_3pid_allowed_callbacks.append(is_3pid_allowed) @@ -2350,6 +2365,49 @@ async def get_username_for_registration( return None + async def get_displayname_for_registration( + self, + uia_results: JsonDict, + params: JsonDict, + ) -> Optional[str]: + """Defines the display name to use when registering the user, using the + credentials and parameters provided during the UIA flow. + + Stops at the first callback that returns a tuple containing at least one string. + + Args: + uia_results: The credentials provided during the UIA flow. + params: The parameters provided by the registration request. + + Returns: + A tuple which first element is the display name, and the second is an MXC URL + to the user's avatar. + """ + for callback in self.get_displayname_for_registration_callbacks: + try: + res = await callback(uia_results, params) + + if isinstance(res, str): + return res + elif res is not None: + # mypy complains that this line is unreachable because it assumes the + # data returned by the module fits the expected type. We just want + # to make sure this is the case. + logger.warning( # type: ignore[unreachable] + "Ignoring non-string value returned by" + " get_displayname_for_registration callback %s: %s", + callback, + res, + ) + except Exception as e: + logger.error( + "Module raised an exception in get_displayname_for_registration: %s", + e, + ) + raise SynapseError(code=500, msg="Internal Server Error") + + return None + async def is_3pid_allowed( self, medium: str, diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d4fca369231a..8a17b912d36d 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -70,6 +70,7 @@ from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, CHECK_AUTH_CALLBACK, + GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK, GET_USERNAME_FOR_REGISTRATION_CALLBACK, IS_3PID_ALLOWED_CALLBACK, ON_LOGGED_OUT_CALLBACK, @@ -317,6 +318,9 @@ def register_password_auth_provider_callbacks( get_username_for_registration: Optional[ GET_USERNAME_FOR_REGISTRATION_CALLBACK ] = None, + get_displayname_for_registration: Optional[ + GET_DISPLAYNAME_FOR_REGISTRATION_CALLBACK + ] = None, ) -> None: """Registers callbacks for password auth provider capabilities. @@ -328,6 +332,7 @@ def register_password_auth_provider_callbacks( is_3pid_allowed=is_3pid_allowed, auth_checkers=auth_checkers, get_username_for_registration=get_username_for_registration, + get_displayname_for_registration=get_displayname_for_registration, ) def register_background_update_controller_callbacks( diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index c965e2bda2f9..b8a5135e02e9 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -694,11 +694,18 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: session_id ) + display_name = await ( + self.password_auth_provider.get_displayname_for_registration( + auth_result, params + ) + ) + registered_user_id = await self.registration_handler.register_user( localpart=desired_username, password_hash=password_hash, guest_access_token=guest_access_token, threepid=threepid, + default_display_name=display_name, address=client_addr, user_agent_ips=entries, ) diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 4740dd0a65e3..49d832de814d 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -84,7 +84,7 @@ def parse_config(self): def __init__(self, config, api: ModuleApi): api.register_password_auth_provider_callbacks( - auth_checkers={("test.login_type", ("test_field",)): self.check_auth}, + auth_checkers={("test.login_type", ("test_field",)): self.check_auth} ) def check_auth(self, *args): @@ -122,7 +122,7 @@ def __init__(self, config, api: ModuleApi): auth_checkers={ ("test.login_type", ("test_field",)): self.check_auth, ("m.login.password", ("password",)): self.check_auth, - }, + } ) pass @@ -163,6 +163,9 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): account.register_servlets, ] + CALLBACK_USERNAME = "get_username_for_registration" + CALLBACK_DISPLAYNAME = "get_displayname_for_registration" + def setUp(self): # we use a global mock device, so make sure we are starting with a clean slate mock_password_provider.reset_mock() @@ -754,7 +757,9 @@ def test_username(self): """Tests that the get_username_for_registration callback can define the username of a user when registering. """ - self._setup_get_username_for_registration() + self._setup_get_name_for_registration( + callback_name=self.CALLBACK_USERNAME, + ) username = "rin" channel = self.make_request( @@ -777,30 +782,14 @@ def test_username_uia(self): """Tests that the get_username_for_registration callback is only called at the end of the UIA flow. """ - m = self._setup_get_username_for_registration() - - # Initiate the UIA flow. - username = "rin" - channel = self.make_request( - "POST", - "register", - {"username": username, "type": "m.login.password", "password": "bar"}, + m = self._setup_get_name_for_registration( + callback_name=self.CALLBACK_USERNAME, ) - self.assertEqual(channel.code, 401) - self.assertIn("session", channel.json_body) - # Check that the callback hasn't been called yet. - m.assert_not_called() + username = "rin" + res = self._do_uia_assert_mock_not_called(username, m) - # Finish the UIA flow. - session = channel.json_body["session"] - channel = self.make_request( - "POST", - "register", - {"auth": {"session": session, "type": LoginType.DUMMY}}, - ) - self.assertEqual(channel.code, 200, channel.json_body) - mxid = channel.json_body["user_id"] + mxid = res["user_id"] self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo") # Check that the callback has been called. @@ -817,6 +806,56 @@ def test_3pid_allowed(self): self._test_3pid_allowed("rin", False) self._test_3pid_allowed("kitay", True) + def test_displayname(self): + """Tests that the get_displayname_for_registration callback can define the + display name of a user when registering. + """ + self._setup_get_name_for_registration( + callback_name=self.CALLBACK_DISPLAYNAME, + ) + + username = "rin" + channel = self.make_request( + "POST", + "/register", + { + "username": username, + "password": "bar", + "auth": {"type": LoginType.DUMMY}, + }, + ) + self.assertEqual(channel.code, 200) + + # Our callback takes the username and appends "-foo" to it, check that's what we + # have. + user_id = UserID.from_string(channel.json_body["user_id"]) + display_name = self.get_success( + self.hs.get_profile_handler().get_displayname(user_id) + ) + + self.assertEqual(display_name, username + "-foo") + + def test_displayname_uia(self): + """Tests that the get_displayname_for_registration callback is only called at the + end of the UIA flow. + """ + m = self._setup_get_name_for_registration( + callback_name=self.CALLBACK_DISPLAYNAME, + ) + + username = "rin" + res = self._do_uia_assert_mock_not_called(username, m) + + user_id = UserID.from_string(res["user_id"]) + display_name = self.get_success( + self.hs.get_profile_handler().get_displayname(user_id) + ) + + self.assertEqual(display_name, username + "-foo") + + # Check that the callback has been called. + m.assert_called_once() + def _test_3pid_allowed(self, username: str, registration: bool): """Tests that the "is_3pid_allowed" module callback is called correctly, using either /register or /account URLs depending on the arguments. @@ -877,23 +916,47 @@ def _test_3pid_allowed(self, username: str, registration: bool): m.assert_called_once_with("email", "bar@test.com", registration) - def _setup_get_username_for_registration(self) -> Mock: - """Registers a get_username_for_registration callback that appends "-foo" to the - username the client is trying to register. + def _setup_get_name_for_registration(self, callback_name: str) -> Mock: + """Registers either a get_username_for_registration callback or a + get_displayname_for_registration callback that appends "-foo" to the username the + client is trying to register. """ - async def get_username_for_registration(uia_results, params): + async def callback(uia_results, params): self.assertIn(LoginType.DUMMY, uia_results) username = params["username"] return username + "-foo" - m = Mock(side_effect=get_username_for_registration) + m = Mock(side_effect=callback) password_auth_provider = self.hs.get_password_auth_provider() - password_auth_provider.get_username_for_registration_callbacks.append(m) + getattr(password_auth_provider, callback_name + "_callbacks").append(m) return m + def _do_uia_assert_mock_not_called(self, username: str, m: Mock) -> JsonDict: + # Initiate the UIA flow. + channel = self.make_request( + "POST", + "register", + {"username": username, "type": "m.login.password", "password": "bar"}, + ) + self.assertEqual(channel.code, 401) + self.assertIn("session", channel.json_body) + + # Check that the callback hasn't been called yet. + m.assert_not_called() + + # Finish the UIA flow. + session = channel.json_body["session"] + channel = self.make_request( + "POST", + "register", + {"auth": {"session": session, "type": LoginType.DUMMY}}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + return channel.json_body + def _get_login_flows(self) -> JsonDict: channel = self.make_request("GET", "/_matrix/client/r0/login") self.assertEqual(channel.code, 200, channel.result) From 3f4d25a48ba17a1e2bcc6510f6cf6de7dd496a11 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 17 Feb 2022 17:22:55 +0000 Subject: [PATCH 262/474] Remove unstable MSC3283 flags (#12018) Fixes #11962 --- changelog.d/12018.removal | 1 + synapse/config/experimental.py | 3 --- synapse/rest/client/capabilities.py | 14 -------------- 3 files changed, 1 insertion(+), 17 deletions(-) create mode 100644 changelog.d/12018.removal diff --git a/changelog.d/12018.removal b/changelog.d/12018.removal new file mode 100644 index 000000000000..e940b62228ac --- /dev/null +++ b/changelog.d/12018.removal @@ -0,0 +1 @@ +Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 12b5638cf8b7..bcdeb9ee232d 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -41,9 +41,6 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3244 (room version capabilities) self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True) - # MSC3283 (set displayname, avatar_url and change 3pid capabilities) - self.msc3283_enabled: bool = experimental.get("msc3283_enabled", False) - # MSC3266 (room summary api) self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index 6682da077a3e..e05c926b6f06 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -72,20 +72,6 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "org.matrix.msc3244.room_capabilities" ] = MSC3244_CAPABILITIES - # Must be removed in later versions. - # Is only included for migration. - # Also the parts in `synapse/config/experimental.py`. - if self.config.experimental.msc3283_enabled: - response["capabilities"]["org.matrix.msc3283.set_displayname"] = { - "enabled": self.config.registration.enable_set_displayname - } - response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = { - "enabled": self.config.registration.enable_set_avatar_url - } - response["capabilities"]["org.matrix.msc3283.3pid_changes"] = { - "enabled": self.config.registration.enable_3pid_changes - } - if self.config.experimental.msc3440_enabled: response["capabilities"]["io.element.thread"] = {"enabled": True} From 40e256e7aa31a6b90e665b340858abbd3a2999c9 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 18 Feb 2022 12:38:48 +0100 Subject: [PATCH 263/474] Update the olddeps CI check to use an old version of markupsafe (#12025) --- changelog.d/12025.misc | 1 + tox.ini | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/12025.misc diff --git a/changelog.d/12025.misc b/changelog.d/12025.misc new file mode 100644 index 000000000000..d9475a771881 --- /dev/null +++ b/changelog.d/12025.misc @@ -0,0 +1 @@ +Update the `olddeps` CI job to use an old version of `markupsafe`. diff --git a/tox.ini b/tox.ini index 2b3d39e0340f..41678aa38bee 100644 --- a/tox.ini +++ b/tox.ini @@ -124,6 +124,9 @@ usedevelop = false deps = Automat == 0.8.0 lxml + # markupsafe 2.1 introduced a change that breaks Jinja 2.x. Since we depend on + # Jinja >= 2.9, it means this test suite will fail if markupsafe >= 2.1 is installed. + markupsafe < 2.1 {[base]deps} commands = From 5a6911598ad2d3dea96b9f8c1cffccd4f4840bf7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 18 Feb 2022 06:11:18 -0600 Subject: [PATCH 264/474] Fix 500 error with Postgres when looking backwards with the MSC3030 `/timestamp_to_event` endpoint (#12024) --- changelog.d/12024.bugfix | 1 + synapse/storage/databases/main/events_worker.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12024.bugfix diff --git a/changelog.d/12024.bugfix b/changelog.d/12024.bugfix new file mode 100644 index 000000000000..59bcdb93a57f --- /dev/null +++ b/changelog.d/12024.bugfix @@ -0,0 +1 @@ +Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 712b8ce204fe..2a255d103101 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1854,7 +1854,7 @@ def is_event_next_to_gap_txn(txn: LoggingTransaction) -> bool: forward_edge_query = """ SELECT 1 FROM event_edges /* Check to make sure the event referencing our event in question is not rejected */ - LEFT JOIN rejections ON event_edges.event_id == rejections.event_id + LEFT JOIN rejections ON event_edges.event_id = rejections.event_id WHERE event_edges.room_id = ? AND event_edges.prev_event_id = ? From 19bd9cff1afba2149ae58cd5b5648470c48d51a5 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 18 Feb 2022 05:48:23 -0700 Subject: [PATCH 265/474] Use stable MSC3069 `is_guest` flag on `/whoami`. (#12021) Keeping backwards compatibility with the unstable flag for now. --- changelog.d/12021.feature | 1 + synapse/rest/client/account.py | 2 ++ tests/rest/client/test_account.py | 9 ++++++--- 3 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12021.feature diff --git a/changelog.d/12021.feature b/changelog.d/12021.feature new file mode 100644 index 000000000000..01378df8ca62 --- /dev/null +++ b/changelog.d/12021.feature @@ -0,0 +1 @@ +Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. \ No newline at end of file diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index cfa2aee76d49..efe299e69828 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -883,7 +883,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: response = { "user_id": requester.user.to_string(), # MSC: https://github.com/matrix-org/matrix-doc/pull/3069 + # Entered spec in Matrix 1.2 "org.matrix.msc3069.is_guest": bool(requester.is_guest), + "is_guest": bool(requester.is_guest), } # Appservices and similar accounts do not have device IDs diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 89d85b0a1701..51146c471d94 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -486,8 +486,9 @@ def test_GET_whoami(self): { "user_id": user_id, "device_id": device_id, - # Unstable until MSC3069 enters spec + # MSC3069 entered spec in Matrix 1.2 but maintained compatibility "org.matrix.msc3069.is_guest": False, + "is_guest": False, }, ) @@ -505,8 +506,9 @@ def test_GET_whoami_guests(self): { "user_id": user_id, "device_id": device_id, - # Unstable until MSC3069 enters spec + # MSC3069 entered spec in Matrix 1.2 but maintained compatibility "org.matrix.msc3069.is_guest": True, + "is_guest": True, }, ) @@ -528,8 +530,9 @@ def test_GET_whoami_appservices(self): whoami, { "user_id": user_id, - # Unstable until MSC3069 enters spec + # MSC3069 entered spec in Matrix 1.2 but maintained compatibility "org.matrix.msc3069.is_guest": False, + "is_guest": False, }, ) self.assertFalse(hasattr(whoami, "device_id")) From 31a298fec792ec1d1efb5f47763e4b0a16f24e6d Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 18 Feb 2022 05:49:53 -0700 Subject: [PATCH 266/474] Advertise Matrix 1.1 in `/_matrix/client/versions` (#12020) --- changelog.d/12020.feature | 1 + synapse/rest/client/versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/12020.feature diff --git a/changelog.d/12020.feature b/changelog.d/12020.feature new file mode 100644 index 000000000000..1ac9d2060e2a --- /dev/null +++ b/changelog.d/12020.feature @@ -0,0 +1 @@ +Advertise Matrix 1.1 support on `/_matrix/client/versions`. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 2290c57c126e..35b88e9bbf14 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -73,6 +73,7 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "r0.5.0", "r0.6.0", "r0.6.1", + "v1.1", ], # as per MSC1497: "unstable_features": { From eb609c65d0794dd49efcd924bdc8743fd4253a93 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 18 Feb 2022 14:54:31 +0000 Subject: [PATCH 267/474] Fix bug in `StateFilter.return_expanded()` and add some tests. (#12016) --- changelog.d/12016.misc | 1 + synapse/storage/state.py | 8 ++- tests/storage/test_state.py | 109 ++++++++++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12016.misc diff --git a/changelog.d/12016.misc b/changelog.d/12016.misc new file mode 100644 index 000000000000..8856ef46a913 --- /dev/null +++ b/changelog.d/12016.misc @@ -0,0 +1 @@ +Fix bug in `StateFilter.return_expanded()` and add some tests. \ No newline at end of file diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 913448f0f95e..e79ecf64a0ec 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -204,13 +204,16 @@ def return_expanded(self) -> "StateFilter": if get_all_members: # We want to return everything. return StateFilter.all() - else: + elif EventTypes.Member in self.types: # We want to return all non-members, but only particular # memberships return StateFilter( types=frozendict({EventTypes.Member: self.types[EventTypes.Member]}), include_others=True, ) + else: + # We want to return all non-members + return _ALL_NON_MEMBER_STATE_FILTER def make_sql_filter_clause(self) -> Tuple[str, List[str]]: """Converts the filter to an SQL clause. @@ -528,6 +531,9 @@ def approx_difference(self, other: "StateFilter") -> "StateFilter": _ALL_STATE_FILTER = StateFilter(types=frozendict(), include_others=True) +_ALL_NON_MEMBER_STATE_FILTER = StateFilter( + types=frozendict({EventTypes.Member: frozenset()}), include_others=True +) _NONE_STATE_FILTER = StateFilter(types=frozendict(), include_others=False) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 70d52b088c81..28c767ecfdfe 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -992,3 +992,112 @@ def test_state_filter_difference_simple_cases(self): StateFilter.none(), StateFilter.all(), ) + + +class StateFilterTestCase(TestCase): + def test_return_expanded(self): + """ + Tests the behaviour of the return_expanded() function that expands + StateFilters to include more state types (for the sake of cache hit rate). + """ + + self.assertEqual(StateFilter.all().return_expanded(), StateFilter.all()) + + self.assertEqual(StateFilter.none().return_expanded(), StateFilter.none()) + + # Concrete-only state filters stay the same + # (Case: mixed filter) + self.assertEqual( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:test", "@alicia:test"}, + "some.other.state.type": {""}, + }, + include_others=False, + ).return_expanded(), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:test", "@alicia:test"}, + "some.other.state.type": {""}, + }, + include_others=False, + ), + ) + + # Concrete-only state filters stay the same + # (Case: non-member-only filter) + self.assertEqual( + StateFilter.freeze( + {"some.other.state.type": {""}}, include_others=False + ).return_expanded(), + StateFilter.freeze({"some.other.state.type": {""}}, include_others=False), + ) + + # Concrete-only state filters stay the same + # (Case: member-only filter) + self.assertEqual( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:test", "@alicia:test"}, + }, + include_others=False, + ).return_expanded(), + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:test", "@alicia:test"}, + }, + include_others=False, + ), + ) + + # Wildcard member-only state filters stay the same + self.assertEqual( + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ).return_expanded(), + StateFilter.freeze( + {EventTypes.Member: None}, + include_others=False, + ), + ) + + # If there is a wildcard in the non-member portion of the filter, + # it's expanded to include ALL non-member events. + # (Case: mixed filter) + self.assertEqual( + StateFilter.freeze( + { + EventTypes.Member: {"@wombat:test", "@alicia:test"}, + "some.other.state.type": None, + }, + include_others=False, + ).return_expanded(), + StateFilter.freeze( + {EventTypes.Member: {"@wombat:test", "@alicia:test"}}, + include_others=True, + ), + ) + + # If there is a wildcard in the non-member portion of the filter, + # it's expanded to include ALL non-member events. + # (Case: non-member-only filter) + self.assertEqual( + StateFilter.freeze( + { + "some.other.state.type": None, + }, + include_others=False, + ).return_expanded(), + StateFilter.freeze({EventTypes.Member: set()}, include_others=True), + ) + self.assertEqual( + StateFilter.freeze( + { + "some.other.state.type": None, + "yet.another.state.type": {"wombat"}, + }, + include_others=False, + ).return_expanded(), + StateFilter.freeze({EventTypes.Member: set()}, include_others=True), + ) From e6acd3cf4fe6910e99683c2ebe3dd917f0a3ae14 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 18 Feb 2022 15:57:26 +0000 Subject: [PATCH 268/474] Upgrade mypy to version 0.931 (#12030) Upgrade mypy to 0.931, mypy-zope to 0.3.5 and fix new complaints. --- changelog.d/12030.misc | 1 + setup.py | 4 ++-- stubs/sortedcontainers/sorteddict.pyi | 13 +++++++++---- synapse/handlers/search.py | 2 +- synapse/push/baserules.py | 10 ++++++---- synapse/push/httppusher.py | 2 +- synapse/streams/events.py | 6 ++++-- synapse/util/daemonize.py | 8 +++++--- synapse/util/patch_inline_callbacks.py | 6 ++++-- 9 files changed, 33 insertions(+), 19 deletions(-) create mode 100644 changelog.d/12030.misc diff --git a/changelog.d/12030.misc b/changelog.d/12030.misc new file mode 100644 index 000000000000..607ee97ce60e --- /dev/null +++ b/changelog.d/12030.misc @@ -0,0 +1 @@ +Upgrade mypy to version 0.931. diff --git a/setup.py b/setup.py index d0511c767f98..c80cb6f20767 100755 --- a/setup.py +++ b/setup.py @@ -103,8 +103,8 @@ def exec_file(path_segments): ] CONDITIONAL_REQUIREMENTS["mypy"] = [ - "mypy==0.910", - "mypy-zope==0.3.2", + "mypy==0.931", + "mypy-zope==0.3.5", "types-bleach>=4.1.0", "types-jsonschema>=3.2.0", "types-opentracing>=2.4.2", diff --git a/stubs/sortedcontainers/sorteddict.pyi b/stubs/sortedcontainers/sorteddict.pyi index 0eaef0049860..344d55cce118 100644 --- a/stubs/sortedcontainers/sorteddict.pyi +++ b/stubs/sortedcontainers/sorteddict.pyi @@ -66,13 +66,18 @@ class SortedDict(Dict[_KT, _VT]): def __copy__(self: _SD) -> _SD: ... @classmethod @overload - def fromkeys(cls, seq: Iterable[_T_h]) -> SortedDict[_T_h, None]: ... + def fromkeys( + cls, seq: Iterable[_T_h], value: None = ... + ) -> SortedDict[_T_h, None]: ... @classmethod @overload def fromkeys(cls, seq: Iterable[_T_h], value: _S) -> SortedDict[_T_h, _S]: ... - def keys(self) -> SortedKeysView[_KT]: ... - def items(self) -> SortedItemsView[_KT, _VT]: ... - def values(self) -> SortedValuesView[_VT]: ... + # As of Python 3.10, `dict_{keys,items,values}` have an extra `mapping` attribute and so + # `Sorted{Keys,Items,Values}View` are no longer compatible with them. + # See https://github.com/python/typeshed/issues/6837 + def keys(self) -> SortedKeysView[_KT]: ... # type: ignore[override] + def items(self) -> SortedItemsView[_KT, _VT]: ... # type: ignore[override] + def values(self) -> SortedValuesView[_VT]: ... # type: ignore[override] @overload def pop(self, key: _KT) -> _VT: ... @overload diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index afd14da11222..0e0e58de02f0 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -654,7 +654,7 @@ async def _calculate_event_contexts( self.storage, user.to_string(), res.events_after ) - context = { + context: JsonDict = { "events_before": events_before, "events_after": events_after, "start": await now_token.copy_and_replace( diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 910b05c0da32..832eaa34e9ef 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -130,7 +130,9 @@ def make_base_prepend_rules( return rules -BASE_APPEND_CONTENT_RULES = [ +# We have to annotate these types, otherwise mypy infers them as +# `List[Dict[str, Sequence[Collection[str]]]]`. +BASE_APPEND_CONTENT_RULES: List[Dict[str, Any]] = [ { "rule_id": "global/content/.m.rule.contains_user_name", "conditions": [ @@ -149,7 +151,7 @@ def make_base_prepend_rules( ] -BASE_PREPEND_OVERRIDE_RULES = [ +BASE_PREPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ { "rule_id": "global/override/.m.rule.master", "enabled": False, @@ -159,7 +161,7 @@ def make_base_prepend_rules( ] -BASE_APPEND_OVERRIDE_RULES = [ +BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ { "rule_id": "global/override/.m.rule.suppress_notices", "conditions": [ @@ -278,7 +280,7 @@ def make_base_prepend_rules( ] -BASE_APPEND_UNDERRIDE_RULES = [ +BASE_APPEND_UNDERRIDE_RULES: List[Dict[str, Any]] = [ { "rule_id": "global/underride/.m.rule.call", "conditions": [ diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 49bcc06e0b5d..52c7ff3572e7 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -325,7 +325,7 @@ async def _build_notification_dict( # This was checked in the __init__, but mypy doesn't seem to know that. assert self.data is not None if self.data.get("format") == "event_id_only": - d = { + d: Dict[str, Any] = { "notification": { "event_id": event.event_id, "room_id": event.room_id, diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 21591d0bfd2f..4ec2a713cf55 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -37,14 +37,16 @@ class _EventSourcesInner: account_data: AccountDataEventSource def get_sources(self) -> Iterator[Tuple[str, EventSource]]: - for attribute in _EventSourcesInner.__attrs_attrs__: # type: ignore[attr-defined] + for attribute in attr.fields(_EventSourcesInner): yield attribute.name, getattr(self, attribute.name) class EventSources: def __init__(self, hs: "HomeServer"): self.sources = _EventSourcesInner( - *(attribute.type(hs) for attribute in _EventSourcesInner.__attrs_attrs__) # type: ignore[attr-defined] + # mypy thinks attribute.type is `Optional`, but we know it's never `None` here since + # all the attributes of `_EventSourcesInner` are annotated. + *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) # type: ignore[misc] ) self.store = hs.get_datastore() diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index de04f34e4e5c..031880ec39e1 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -20,7 +20,7 @@ import signal import sys from types import FrameType, TracebackType -from typing import NoReturn, Type +from typing import NoReturn, Optional, Type def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") -> None: @@ -100,7 +100,9 @@ def daemonize_process(pid_file: str, logger: logging.Logger, chdir: str = "/") - # also catch any other uncaught exceptions before we get that far.) def excepthook( - type_: Type[BaseException], value: BaseException, traceback: TracebackType + type_: Type[BaseException], + value: BaseException, + traceback: Optional[TracebackType], ) -> None: logger.critical("Unhanded exception", exc_info=(type_, value, traceback)) @@ -123,7 +125,7 @@ def excepthook( sys.exit(1) # write a log line on SIGTERM. - def sigterm(signum: signal.Signals, frame: FrameType) -> NoReturn: + def sigterm(signum: int, frame: Optional[FrameType]) -> NoReturn: logger.warning("Caught signal %s. Stopping daemon." % signum) sys.exit(0) diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index 1f18654d47e7..6d4b0b7c5adb 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -14,7 +14,7 @@ import functools import sys -from typing import Any, Callable, Generator, List, TypeVar +from typing import Any, Callable, Generator, List, TypeVar, cast from twisted.internet import defer from twisted.internet.defer import Deferred @@ -174,7 +174,9 @@ def check_yield_points_inner( ) ) changes.append(err) - return getattr(e, "value", None) + # The `StopIteration` or `_DefGen_Return` contains the return value from the + # generator. + return cast(T, e.value) frame = gen.gi_frame From 284ea2025a27e2ceb6dacdd1f6f0b0fff3814dde Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 18 Feb 2022 17:23:31 +0000 Subject: [PATCH 269/474] Track and deduplicate in-flight requests to `_get_state_for_groups`. (#10870) Co-authored-by: Patrick Cloke --- changelog.d/10870.misc | 1 + synapse/storage/databases/state/store.py | 203 +++++++++++++++++--- tests/storage/databases/test_state_store.py | 133 +++++++++++++ 3 files changed, 312 insertions(+), 25 deletions(-) create mode 100644 changelog.d/10870.misc create mode 100644 tests/storage/databases/test_state_store.py diff --git a/changelog.d/10870.misc b/changelog.d/10870.misc new file mode 100644 index 000000000000..3af049b96961 --- /dev/null +++ b/changelog.d/10870.misc @@ -0,0 +1 @@ +Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 7614d76ac646..3af69a207690 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -13,11 +13,23 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + Optional, + Sequence, + Set, + Tuple, +) import attr +from twisted.internet import defer + from synapse.api.constants import EventTypes +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -29,6 +41,12 @@ from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator from synapse.types import MutableStateMap, StateKey, StateMap +from synapse.util import unwrapFirstError +from synapse.util.async_helpers import ( + AbstractObservableDeferred, + ObservableDeferred, + yieldable_gather_results, +) from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache @@ -37,7 +55,6 @@ logger = logging.getLogger(__name__) - MAX_STATE_DELTA_HOPS = 100 @@ -106,6 +123,12 @@ def __init__( 500000, ) + # Current ongoing get_state_for_groups in-flight requests + # {group ID -> {StateFilter -> ObservableDeferred}} + self._state_group_inflight_requests: Dict[ + int, Dict[StateFilter, AbstractObservableDeferred[StateMap[str]]] + ] = {} + def get_max_state_group_txn(txn: Cursor) -> int: txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups") return txn.fetchone()[0] # type: ignore @@ -157,7 +180,7 @@ def _get_state_group_delta_txn(txn: LoggingTransaction) -> _GetStateGroupDelta: ) async def _get_state_groups_from_groups( - self, groups: List[int], state_filter: StateFilter + self, groups: Sequence[int], state_filter: StateFilter ) -> Dict[int, StateMap[str]]: """Returns the state groups for a given set of groups from the database, filtering on types of state events. @@ -228,6 +251,150 @@ def _get_state_for_group_using_cache( return state_filter.filter_state(state_dict_ids), not missing_types + def _get_state_for_group_gather_inflight_requests( + self, group: int, state_filter_left_over: StateFilter + ) -> Tuple[Sequence[AbstractObservableDeferred[StateMap[str]]], StateFilter]: + """ + Attempts to gather in-flight requests and re-use them to retrieve state + for the given state group, filtered with the given state filter. + + Used as part of _get_state_for_group_using_inflight_cache. + + Returns: + Tuple of two values: + A sequence of ObservableDeferreds to observe + A StateFilter representing what else needs to be requested to fulfill the request + """ + + inflight_requests = self._state_group_inflight_requests.get(group) + if inflight_requests is None: + # no requests for this group, need to retrieve it all ourselves + return (), state_filter_left_over + + # The list of ongoing requests which will help narrow the current request. + reusable_requests = [] + for (request_state_filter, request_deferred) in inflight_requests.items(): + new_state_filter_left_over = state_filter_left_over.approx_difference( + request_state_filter + ) + if new_state_filter_left_over == state_filter_left_over: + # Reusing this request would not gain us anything, so don't bother. + continue + + reusable_requests.append(request_deferred) + state_filter_left_over = new_state_filter_left_over + if state_filter_left_over == StateFilter.none(): + # we have managed to collect enough of the in-flight requests + # to cover our StateFilter and give us the state we need. + break + + return reusable_requests, state_filter_left_over + + async def _get_state_for_group_fire_request( + self, group: int, state_filter: StateFilter + ) -> StateMap[str]: + """ + Fires off a request to get the state at a state group, + potentially filtering by type and/or state key. + + This request will be tracked in the in-flight request cache and automatically + removed when it is finished. + + Used as part of _get_state_for_group_using_inflight_cache. + + Args: + group: ID of the state group for which we want to get state + state_filter: the state filter used to fetch state from the database + """ + cache_sequence_nm = self._state_group_cache.sequence + cache_sequence_m = self._state_group_members_cache.sequence + + # Help the cache hit ratio by expanding the filter a bit + db_state_filter = state_filter.return_expanded() + + async def _the_request() -> StateMap[str]: + group_to_state_dict = await self._get_state_groups_from_groups( + (group,), state_filter=db_state_filter + ) + + # Now let's update the caches + self._insert_into_cache( + group_to_state_dict, + db_state_filter, + cache_seq_num_members=cache_sequence_m, + cache_seq_num_non_members=cache_sequence_nm, + ) + + # Remove ourselves from the in-flight cache + group_request_dict = self._state_group_inflight_requests[group] + del group_request_dict[db_state_filter] + if not group_request_dict: + # If there are no more requests in-flight for this group, + # clean up the cache by removing the empty dictionary + del self._state_group_inflight_requests[group] + + return group_to_state_dict[group] + + # We don't immediately await the result, so must use run_in_background + # But we DO await the result before the current log context (request) + # finishes, so don't need to run it as a background process. + request_deferred = run_in_background(_the_request) + observable_deferred = ObservableDeferred(request_deferred, consumeErrors=True) + + # Insert the ObservableDeferred into the cache + group_request_dict = self._state_group_inflight_requests.setdefault(group, {}) + group_request_dict[db_state_filter] = observable_deferred + + return await make_deferred_yieldable(observable_deferred.observe()) + + async def _get_state_for_group_using_inflight_cache( + self, group: int, state_filter: StateFilter + ) -> MutableStateMap[str]: + """ + Gets the state at a state group, potentially filtering by type and/or + state key. + + 1. Calls _get_state_for_group_gather_inflight_requests to gather any + ongoing requests which might overlap with the current request. + 2. Fires a new request, using _get_state_for_group_fire_request, + for any state which cannot be gathered from ongoing requests. + + Args: + group: ID of the state group for which we want to get state + state_filter: the state filter used to fetch state from the database + Returns: + state map + """ + + # first, figure out whether we can re-use any in-flight requests + # (and if so, what would be left over) + ( + reusable_requests, + state_filter_left_over, + ) = self._get_state_for_group_gather_inflight_requests(group, state_filter) + + if state_filter_left_over != StateFilter.none(): + # Fetch remaining state + remaining = await self._get_state_for_group_fire_request( + group, state_filter_left_over + ) + assembled_state: MutableStateMap[str] = dict(remaining) + else: + assembled_state = {} + + gathered = await make_deferred_yieldable( + defer.gatherResults( + (r.observe() for r in reusable_requests), consumeErrors=True + ) + ).addErrback(unwrapFirstError) + + # assemble our result. + for result_piece in gathered: + assembled_state.update(result_piece) + + # Filter out any state that may be more than what we asked for. + return state_filter.filter_state(assembled_state) + async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Dict[int, MutableStateMap[str]]: @@ -269,31 +436,17 @@ async def _get_state_for_groups( if not incomplete_groups: return state - cache_sequence_nm = self._state_group_cache.sequence - cache_sequence_m = self._state_group_members_cache.sequence - - # Help the cache hit ratio by expanding the filter a bit - db_state_filter = state_filter.return_expanded() - - group_to_state_dict = await self._get_state_groups_from_groups( - list(incomplete_groups), state_filter=db_state_filter - ) + async def get_from_cache(group: int, state_filter: StateFilter) -> None: + state[group] = await self._get_state_for_group_using_inflight_cache( + group, state_filter + ) - # Now lets update the caches - self._insert_into_cache( - group_to_state_dict, - db_state_filter, - cache_seq_num_members=cache_sequence_m, - cache_seq_num_non_members=cache_sequence_nm, + await yieldable_gather_results( + get_from_cache, + incomplete_groups, + state_filter, ) - # And finally update the result dict, by filtering out any extra - # stuff we pulled out of the database. - for group, group_state_dict in group_to_state_dict.items(): - # We just replace any existing entries, as we will have loaded - # everything we need from the database anyway. - state[group] = state_filter.filter_state(group_state_dict) - return state def _get_state_for_groups_using_cache( diff --git a/tests/storage/databases/test_state_store.py b/tests/storage/databases/test_state_store.py new file mode 100644 index 000000000000..cf126ee62d39 --- /dev/null +++ b/tests/storage/databases/test_state_store.py @@ -0,0 +1,133 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import typing +from typing import Dict, List, Sequence, Tuple +from unittest.mock import patch + +from twisted.internet.defer import Deferred, ensureDeferred +from twisted.test.proto_helpers import MemoryReactor + +from synapse.storage.state import StateFilter +from synapse.types import MutableStateMap, StateMap +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + +if typing.TYPE_CHECKING: + from synapse.server import HomeServer + + +class StateGroupInflightCachingTestCase(HomeserverTestCase): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: "HomeServer" + ) -> None: + self.state_storage = homeserver.get_storage().state + self.state_datastore = homeserver.get_datastores().state + # Patch out the `_get_state_groups_from_groups`. + # This is useful because it lets us pretend we have a slow database. + get_state_groups_patch = patch.object( + self.state_datastore, + "_get_state_groups_from_groups", + self._fake_get_state_groups_from_groups, + ) + get_state_groups_patch.start() + + self.addCleanup(get_state_groups_patch.stop) + self.get_state_group_calls: List[ + Tuple[Tuple[int, ...], StateFilter, Deferred[Dict[int, StateMap[str]]]] + ] = [] + + def _fake_get_state_groups_from_groups( + self, groups: Sequence[int], state_filter: StateFilter + ) -> "Deferred[Dict[int, StateMap[str]]]": + d: Deferred[Dict[int, StateMap[str]]] = Deferred() + self.get_state_group_calls.append((tuple(groups), state_filter, d)) + return d + + def _complete_request_fake( + self, + groups: Tuple[int, ...], + state_filter: StateFilter, + d: "Deferred[Dict[int, StateMap[str]]]", + ) -> None: + """ + Assemble a fake database response and complete the database request. + """ + + result: Dict[int, StateMap[str]] = {} + + for group in groups: + group_result: MutableStateMap[str] = {} + result[group] = group_result + + for state_type, state_keys in state_filter.types.items(): + if state_keys is None: + group_result[(state_type, "a")] = "xyz" + group_result[(state_type, "b")] = "xyz" + else: + for state_key in state_keys: + group_result[(state_type, state_key)] = "abc" + + if state_filter.include_others: + group_result[("other.event.type", "state.key")] = "123" + + d.callback(result) + + def test_duplicate_requests_deduplicated(self) -> None: + """ + Tests that duplicate requests for state are deduplicated. + + This test: + - requests some state (state group 42, 'all' state filter) + - requests it again, before the first request finishes + - checks to see that only one database query was made + - completes the database query + - checks that both requests see the same retrieved state + """ + req1 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.all() + ) + ) + self.pump(by=0.1) + + # This should have gone to the database + self.assertEqual(len(self.get_state_group_calls), 1) + self.assertFalse(req1.called) + + req2 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.all() + ) + ) + self.pump(by=0.1) + + # No more calls should have gone to the database + self.assertEqual(len(self.get_state_group_calls), 1) + self.assertFalse(req1.called) + self.assertFalse(req2.called) + + groups, sf, d = self.get_state_group_calls[0] + self.assertEqual(groups, (42,)) + self.assertEqual(sf, StateFilter.all()) + + # Now we can complete the request + self._complete_request_fake(groups, sf, d) + + self.assertEqual( + self.get_success(req1), {("other.event.type", "state.key"): "123"} + ) + self.assertEqual( + self.get_success(req2), {("other.event.type", "state.key"): "123"} + ) From 444b04058b497da15812d7f14858e6270d54abb5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Feb 2022 12:24:25 -0500 Subject: [PATCH 270/474] Document why auth providers aren't validated in the admin API. (#12004) Since it is reasonable to give a future or past auth provider, which might not be in the current configuration. --- changelog.d/12004.doc | 1 + docs/admin_api/user_admin_api.md | 3 ++- synapse/module_api/__init__.py | 6 +++++- .../storage/databases/main/registration.py | 21 +++++++++++++++++++ 4 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12004.doc diff --git a/changelog.d/12004.doc b/changelog.d/12004.doc new file mode 100644 index 000000000000..0b4baef21035 --- /dev/null +++ b/changelog.d/12004.doc @@ -0,0 +1 @@ +Clarify information about external Identity Provider IDs. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 1bbe23708055..4076fcab65f1 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -126,7 +126,8 @@ Body parameters: [Sample Configuration File](../usage/configuration/homeserver_sample_config.html) section `sso` and `oidc_providers`. - `auth_provider` - string. ID of the external identity provider. Value of `idp_id` - in homeserver configuration. + in the homeserver configuration. Note that no error is raised if the provided + value is not in the homeserver configuration. - `external_id` - string, user ID in the external identity provider. - `avatar_url` - string, optional, must be a [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 8a17b912d36d..07020bfb8d5a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -653,7 +653,11 @@ def record_user_external_id( Added in Synapse v1.9.0. Args: - auth_provider: identifier for the remote auth provider + auth_provider: identifier for the remote auth provider, see `sso` and + `oidc_providers` in the homeserver configuration. + + Note that no error is raised if the provided value is not in the + homeserver configuration. external_id: id on that system user_id: complete mxid that it is mapped to """ diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index aac94fa46444..17110bb03314 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -622,10 +622,13 @@ async def record_user_external_id( ) -> None: """Record a mapping from an external user id to a mxid + See notes in _record_user_external_id_txn about what constitutes valid data. + Args: auth_provider: identifier for the remote auth provider external_id: id on that system user_id: complete mxid that it is mapped to + Raises: ExternalIDReuseException if the new external_id could not be mapped. """ @@ -648,6 +651,21 @@ def _record_user_external_id_txn( external_id: str, user_id: str, ) -> None: + """ + Record a mapping from an external user id to a mxid. + + Note that the auth provider IDs (and the external IDs) are not validated + against configured IdPs as Synapse does not know its relationship to + external systems. For example, it might be useful to pre-configure users + before enabling a new IdP or an IdP might be temporarily offline, but + still valid. + + Args: + txn: The database transaction. + auth_provider: identifier for the remote auth provider + external_id: id on that system + user_id: complete mxid that it is mapped to + """ self.db_pool.simple_insert_txn( txn, @@ -687,10 +705,13 @@ async def replace_user_external_id( """Replace mappings from external user ids to a mxid in a single transaction. All mappings are deleted and the new ones are created. + See notes in _record_user_external_id_txn about what constitutes valid data. + Args: record_external_ids: List with tuple of auth_provider and external_id to record user_id: complete mxid that it is mapped to + Raises: ExternalIDReuseException if the new external_id could not be mapped. """ From 99f6d79fe17b2f96c6c3cf85c4f0fee255758300 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 21 Feb 2022 08:59:29 -0700 Subject: [PATCH 271/474] Advertise Matrix 1.2 in `/_matrix/client/versions` (#12022) Co-authored-by: Patrick Cloke --- changelog.d/12022.feature | 1 + synapse/rest/client/versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/12022.feature diff --git a/changelog.d/12022.feature b/changelog.d/12022.feature new file mode 100644 index 000000000000..188fb125705a --- /dev/null +++ b/changelog.d/12022.feature @@ -0,0 +1 @@ +Advertise Matrix 1.2 support on `/_matrix/client/versions`. \ No newline at end of file diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 35b88e9bbf14..00f29344a8a2 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -74,6 +74,7 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "r0.6.0", "r0.6.1", "v1.1", + "v1.2", ], # as per MSC1497: "unstable_features": { From 7c82da27aa6acff3ac40343719b440133955c207 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 21 Feb 2022 17:03:06 +0100 Subject: [PATCH 272/474] Add type hints to `synapse/storage/databases/main` (#11984) --- changelog.d/11984.misc | 1 + mypy.ini | 3 - synapse/handlers/presence.py | 26 ++++---- synapse/storage/databases/main/presence.py | 61 +++++++++++++------ .../storage/databases/main/purge_events.py | 13 ++-- .../storage/databases/main/user_directory.py | 22 +++---- synapse/types.py | 6 +- 7 files changed, 79 insertions(+), 53 deletions(-) create mode 100644 changelog.d/11984.misc diff --git a/changelog.d/11984.misc b/changelog.d/11984.misc new file mode 100644 index 000000000000..8e405b922674 --- /dev/null +++ b/changelog.d/11984.misc @@ -0,0 +1 @@ +Add missing type hints to storage classes. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 63848d664c57..610660b9b721 100644 --- a/mypy.ini +++ b/mypy.ini @@ -31,14 +31,11 @@ exclude = (?x) |synapse/storage/databases/main/group_server.py |synapse/storage/databases/main/metrics.py |synapse/storage/databases/main/monthly_active_users.py - |synapse/storage/databases/main/presence.py - |synapse/storage/databases/main/purge_events.py |synapse/storage/databases/main/push_rule.py |synapse/storage/databases/main/receipts.py |synapse/storage/databases/main/roommember.py |synapse/storage/databases/main/search.py |synapse/storage/databases/main/state.py - |synapse/storage/databases/main/user_directory.py |synapse/storage/schema/ |tests/api/test_auth.py diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 067c43ae4714..b223b72623e8 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -204,25 +204,27 @@ async def current_state_for_users( Returns: dict: `user_id` -> `UserPresenceState` """ - states = { - user_id: self.user_to_current_state.get(user_id, None) - for user_id in user_ids - } + states = {} + missing = [] + for user_id in user_ids: + state = self.user_to_current_state.get(user_id, None) + if state: + states[user_id] = state + else: + missing.append(user_id) - missing = [user_id for user_id, state in states.items() if not state] if missing: # There are things not in our in memory cache. Lets pull them out of # the database. res = await self.store.get_presence_for_users(missing) states.update(res) - missing = [user_id for user_id, state in states.items() if not state] - if missing: - new = { - user_id: UserPresenceState.default(user_id) for user_id in missing - } - states.update(new) - self.user_to_current_state.update(new) + for user_id in missing: + # if user has no state in database, create the state + if not res.get(user_id, None): + new_state = UserPresenceState.default(user_id) + states[user_id] = new_state + self.user_to_current_state[user_id] = new_state return states diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 4f05811a77eb..d3c4611686f8 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -12,15 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple, cast from synapse.api.presence import PresenceState, UserPresenceState from synapse.replication.tcp.streams import PresenceStream from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.storage.engines import PostgresEngine from synapse.storage.types import Connection -from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator +from synapse.storage.util.id_generators import ( + AbstractStreamIdGenerator, + MultiWriterIdGenerator, + StreamIdGenerator, +) from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.iterutils import batch_iter @@ -35,7 +43,7 @@ def __init__( database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: "HomeServer", - ): + ) -> None: super().__init__(database, db_conn, hs) # Used by `PresenceStore._get_active_presence()` @@ -54,11 +62,14 @@ def __init__( database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: "HomeServer", - ): + ) -> None: super().__init__(database, db_conn, hs) + self._instance_name = hs.get_instance_name() + self._presence_id_gen: AbstractStreamIdGenerator + self._can_persist_presence = ( - hs.get_instance_name() in hs.config.worker.writers.presence + self._instance_name in hs.config.worker.writers.presence ) if isinstance(database.engine, PostgresEngine): @@ -109,7 +120,9 @@ async def update_presence(self, presence_states) -> Tuple[int, int]: return stream_orderings[-1], self._presence_id_gen.get_current_token() - def _update_presence_txn(self, txn, stream_orderings, presence_states): + def _update_presence_txn( + self, txn: LoggingTransaction, stream_orderings, presence_states + ) -> None: for stream_id, state in zip(stream_orderings, presence_states): txn.call_after( self.presence_stream_cache.entity_has_changed, state.user_id, stream_id @@ -183,19 +196,23 @@ async def get_all_presence_updates( if last_id == current_id: return [], current_id, False - def get_all_presence_updates_txn(txn): + def get_all_presence_updates_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, list]], int, bool]: sql = """ SELECT stream_id, user_id, state, last_active_ts, last_federation_update_ts, last_user_sync_ts, - status_msg, - currently_active + status_msg, currently_active FROM presence_stream WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC LIMIT ? """ txn.execute(sql, (last_id, current_id, limit)) - updates = [(row[0], row[1:]) for row in txn] + updates = cast( + List[Tuple[int, list]], + [(row[0], row[1:]) for row in txn], + ) upper_bound = current_id limited = False @@ -210,7 +227,7 @@ def get_all_presence_updates_txn(txn): ) @cached() - def _get_presence_for_user(self, user_id): + def _get_presence_for_user(self, user_id: str) -> None: raise NotImplementedError() @cachedList( @@ -218,7 +235,9 @@ def _get_presence_for_user(self, user_id): list_name="user_ids", num_args=1, ) - async def get_presence_for_users(self, user_ids): + async def get_presence_for_users( + self, user_ids: Iterable[str] + ) -> Dict[str, UserPresenceState]: rows = await self.db_pool.simple_select_many_batch( table="presence_stream", column="user_id", @@ -257,7 +276,9 @@ async def should_user_receive_full_presence_with_token( True if the user should have full presence sent to them, False otherwise. """ - def _should_user_receive_full_presence_with_token_txn(txn): + def _should_user_receive_full_presence_with_token_txn( + txn: LoggingTransaction, + ) -> bool: sql = """ SELECT 1 FROM users_to_send_full_presence_to WHERE user_id = ? @@ -271,7 +292,7 @@ def _should_user_receive_full_presence_with_token_txn(txn): _should_user_receive_full_presence_with_token_txn, ) - async def add_users_to_send_full_presence_to(self, user_ids: Iterable[str]): + async def add_users_to_send_full_presence_to(self, user_ids: Iterable[str]) -> None: """Adds to the list of users who should receive a full snapshot of presence upon their next sync. @@ -353,10 +374,10 @@ async def get_presence_for_all_users( return users_to_state - def get_current_presence_token(self): + def get_current_presence_token(self) -> int: return self._presence_id_gen.get_current_token() - def _get_active_presence(self, db_conn: Connection): + def _get_active_presence(self, db_conn: Connection) -> List[UserPresenceState]: """Fetch non-offline presence from the database so that we can register the appropriate time outs. """ @@ -379,12 +400,12 @@ def _get_active_presence(self, db_conn: Connection): return [UserPresenceState(**row) for row in rows] - def take_presence_startup_info(self): + def take_presence_startup_info(self) -> List[UserPresenceState]: active_on_startup = self._presence_on_startup - self._presence_on_startup = None + self._presence_on_startup = [] return active_on_startup - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows(self, stream_name, instance_name, token, rows) -> None: if stream_name == PresenceStream.NAME: self._presence_id_gen.advance(instance_name, token) for row in rows: diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index e87a8fb85dce..2e3818e43244 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -13,9 +13,10 @@ # limitations under the License. import logging -from typing import Any, List, Set, Tuple +from typing import Any, List, Set, Tuple, cast from synapse.api.errors import SynapseError +from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.types import RoomStreamToken @@ -55,7 +56,11 @@ async def purge_history( ) def _purge_history_txn( - self, txn, room_id: str, token: RoomStreamToken, delete_local_events: bool + self, + txn: LoggingTransaction, + room_id: str, + token: RoomStreamToken, + delete_local_events: bool, ) -> Set[int]: # Tables that should be pruned: # event_auth @@ -273,7 +278,7 @@ def _purge_history_txn( """, (room_id,), ) - (min_depth,) = txn.fetchone() + (min_depth,) = cast(Tuple[int], txn.fetchone()) logger.info("[purge] updating room_depth to %d", min_depth) @@ -318,7 +323,7 @@ async def purge_room(self, room_id: str) -> List[int]: "purge_room", self._purge_room_txn, room_id ) - def _purge_room_txn(self, txn, room_id: str) -> List[int]: + def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: # First we fetch all the state groups that should be deleted, before # we delete that information. txn.execute( diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index f7c778bdf22b..e7fddd24262a 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -58,7 +58,7 @@ def __init__( database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: "HomeServer", - ): + ) -> None: super().__init__(database, db_conn, hs) self.server_name = hs.hostname @@ -234,10 +234,10 @@ def _get_next_batch( processed_event_count = 0 for room_id, event_count in rooms_to_work_on: - is_in_room = await self.is_host_joined(room_id, self.server_name) + is_in_room = await self.is_host_joined(room_id, self.server_name) # type: ignore[attr-defined] if is_in_room: - users_with_profile = await self.get_users_in_room_with_profiles(room_id) + users_with_profile = await self.get_users_in_room_with_profiles(room_id) # type: ignore[attr-defined] # Throw away users excluded from the directory. users_with_profile = { user_id: profile @@ -368,7 +368,7 @@ def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: for user_id in users_to_work_on: if await self.should_include_local_user_in_dir(user_id): - profile = await self.get_profileinfo(get_localpart_from_id(user_id)) + profile = await self.get_profileinfo(get_localpart_from_id(user_id)) # type: ignore[attr-defined] await self.update_profile_in_user_dir( user_id, profile.display_name, profile.avatar_url ) @@ -397,7 +397,7 @@ async def should_include_local_user_in_dir(self, user: str) -> bool: # technically it could be DM-able. In the future, this could potentially # be configurable per-appservice whether the appservice sender can be # contacted. - if self.get_app_service_by_user_id(user) is not None: + if self.get_app_service_by_user_id(user) is not None: # type: ignore[attr-defined] return False # We're opting to exclude appservice users (anyone matching the user @@ -405,17 +405,17 @@ async def should_include_local_user_in_dir(self, user: str) -> bool: # they could be DM-able. In the future, this could potentially # be configurable per-appservice whether the appservice users can be # contacted. - if self.get_if_app_services_interested_in_user(user): + if self.get_if_app_services_interested_in_user(user): # type: ignore[attr-defined] # TODO we might want to make this configurable for each app service return False # Support users are for diagnostics and should not appear in the user directory. - if await self.is_support_user(user): + if await self.is_support_user(user): # type: ignore[attr-defined] return False # Deactivated users aren't contactable, so should not appear in the user directory. try: - if await self.get_user_deactivated_status(user): + if await self.get_user_deactivated_status(user): # type: ignore[attr-defined] return False except StoreError: # No such user in the users table. No need to do this when calling @@ -433,20 +433,20 @@ async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> boo (EventTypes.RoomHistoryVisibility, ""), ) - current_state_ids = await self.get_filtered_current_state_ids( + current_state_ids = await self.get_filtered_current_state_ids( # type: ignore[attr-defined] room_id, StateFilter.from_types(types_to_filter) ) join_rules_id = current_state_ids.get((EventTypes.JoinRules, "")) if join_rules_id: - join_rule_ev = await self.get_event(join_rules_id, allow_none=True) + join_rule_ev = await self.get_event(join_rules_id, allow_none=True) # type: ignore[attr-defined] if join_rule_ev: if join_rule_ev.content.get("join_rule") == JoinRules.PUBLIC: return True hist_vis_id = current_state_ids.get((EventTypes.RoomHistoryVisibility, "")) if hist_vis_id: - hist_vis_ev = await self.get_event(hist_vis_id, allow_none=True) + hist_vis_ev = await self.get_event(hist_vis_id, allow_none=True) # type: ignore[attr-defined] if hist_vis_ev: if ( hist_vis_ev.content.get("history_visibility") diff --git a/synapse/types.py b/synapse/types.py index f89fb216a656..53be3583a013 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -51,7 +51,7 @@ if TYPE_CHECKING: from synapse.appservice.api import ApplicationService - from synapse.storage.databases.main import DataStore + from synapse.storage.databases.main import DataStore, PurgeEventsStore # Define a state map type from type/state_key to T (usually an event ID or # event) @@ -485,7 +485,7 @@ def __attrs_post_init__(self) -> None: ) @classmethod - async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": + async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken": try: if string[0] == "s": return cls(topological=None, stream=int(string[1:])) @@ -502,7 +502,7 @@ async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": instance_id = int(key) pos = int(value) - instance_name = await store.get_name_from_instance_id(instance_id) + instance_name = await store.get_name_from_instance_id(instance_id) # type: ignore[attr-defined] instance_map[instance_name] = pos return cls( From a85dde34459451f2ccece2374ca6280b5f55335f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 21 Feb 2022 18:37:04 +0000 Subject: [PATCH 273/474] Minor typing fixes (#12034) These started failing in https://github.com/matrix-org/synapse/pull/12031... I'm a bit mystified by how they ever worked. --- changelog.d/12034.misc | 1 + .../federation/sender/per_destination_queue.py | 18 +++++++++--------- synapse/handlers/message.py | 10 ++++++---- synapse/handlers/register.py | 6 +++--- 4 files changed, 19 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12034.misc diff --git a/changelog.d/12034.misc b/changelog.d/12034.misc new file mode 100644 index 000000000000..8374a63220d1 --- /dev/null +++ b/changelog.d/12034.misc @@ -0,0 +1 @@ +Minor typing fixes. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 8152e80b88d2..c3132f731921 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -381,7 +381,9 @@ async def _catch_up_transmission_loop(self) -> None: ) ) - if self._last_successful_stream_ordering is None: + last_successful_stream_ordering = self._last_successful_stream_ordering + + if last_successful_stream_ordering is None: # if it's still None, then this means we don't have the information # in our database ­ we haven't successfully sent a PDU to this server # (at least since the introduction of the feature tracking @@ -394,8 +396,7 @@ async def _catch_up_transmission_loop(self) -> None: # get at most 50 catchup room/PDUs while True: event_ids = await self._store.get_catch_up_room_event_ids( - self._destination, - self._last_successful_stream_ordering, + self._destination, last_successful_stream_ordering ) if not event_ids: @@ -403,7 +404,7 @@ async def _catch_up_transmission_loop(self) -> None: # of a race condition, so we check that no new events have been # skipped due to us being in catch-up mode - if self._catchup_last_skipped > self._last_successful_stream_ordering: + if self._catchup_last_skipped > last_successful_stream_ordering: # another event has been skipped because we were in catch-up mode continue @@ -470,7 +471,7 @@ async def _catch_up_transmission_loop(self) -> None: # offline if ( p.internal_metadata.stream_ordering - < self._last_successful_stream_ordering + < last_successful_stream_ordering ): continue @@ -513,12 +514,11 @@ async def _catch_up_transmission_loop(self) -> None: # from the *original* PDU, rather than the PDU(s) we actually # send. This is because we use it to mark our position in the # queue of missed PDUs to process. - self._last_successful_stream_ordering = ( - pdu.internal_metadata.stream_ordering - ) + last_successful_stream_ordering = pdu.internal_metadata.stream_ordering + self._last_successful_stream_ordering = last_successful_stream_ordering await self._store.set_destination_last_successful_stream_ordering( - self._destination, self._last_successful_stream_ordering + self._destination, last_successful_stream_ordering ) def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 9267e586a83d..4d0da842873f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -550,10 +550,11 @@ async def create_event( if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": room_version_id = event_dict["content"]["room_version"] - room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) - if not room_version_obj: + maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not maybe_room_version_obj: # this can happen if support is withdrawn for a room version raise UnsupportedRoomVersionError(room_version_id) + room_version_obj = maybe_room_version_obj else: try: room_version_obj = await self.store.get_room_version( @@ -1145,12 +1146,13 @@ async def handle_new_client_event( room_version_id = event.content.get( "room_version", RoomVersions.V1.identifier ) - room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) - if not room_version_obj: + maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not maybe_room_version_obj: raise UnsupportedRoomVersionError( "Attempt to create a room with unsupported room version %s" % (room_version_id,) ) + room_version_obj = maybe_room_version_obj else: room_version_obj = await self.store.get_room_version(event.room_id) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a719d5eef351..80320d2c070b 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -320,12 +320,12 @@ async def register_user( if fail_count > 10: raise SynapseError(500, "Unable to find a suitable guest user ID") - localpart = await self.store.generate_user_id() - user = UserID(localpart, self.hs.hostname) + generated_localpart = await self.store.generate_user_id() + user = UserID(generated_localpart, self.hs.hostname) user_id = user.to_string() self.check_user_id_not_appservice_exclusive(user_id) if generate_display_name: - default_display_name = localpart + default_display_name = generated_localpart try: await self.register_with_store( user_id=user_id, From 3070af4809016f547adf55fb02dbe9e569590f7e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 21 Feb 2022 19:27:35 +0000 Subject: [PATCH 274/474] remote join processing: get create event from state, not auth_chain (#12039) A follow-up to #12005, in which I apparently missed that there are a bunch of other places that assume the create event is in the auth chain. --- changelog.d/12039.misc | 1 + synapse/federation/federation_client.py | 6 ++++-- synapse/handlers/federation.py | 2 +- synapse/storage/databases/main/room.py | 4 ++-- 4 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12039.misc diff --git a/changelog.d/12039.misc b/changelog.d/12039.misc new file mode 100644 index 000000000000..45e21dbe5953 --- /dev/null +++ b/changelog.d/12039.misc @@ -0,0 +1 @@ +Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 9f56f97d9965..48c90bf0bbbd 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -870,13 +870,15 @@ async def _execute(pdu: EventBase) -> None: for s in signed_state: s.internal_metadata = copy.deepcopy(s.internal_metadata) - # double-check that the same create event has ended up in the auth chain + # double-check that the auth chain doesn't include a different create event auth_chain_create_events = [ e.event_id for e in signed_auth if (e.type, e.state_key) == (EventTypes.Create, "") ] - if auth_chain_create_events != [create_event.event_id]: + if auth_chain_create_events and auth_chain_create_events != [ + create_event.event_id + ]: raise InvalidResponseError( "Unexpected create event(s) in auth chain: %s" % (auth_chain_create_events,) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c0f642005ff4..c8356f233dad 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -516,7 +516,7 @@ async def do_invite_join( await self.store.upsert_room_on_join( room_id=room_id, room_version=room_version_obj, - auth_events=auth_chain, + state_events=state, ) max_stream_id = await self._federation_event_handler.process_remote_join( diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 95167116c953..0416df64ce8d 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1498,7 +1498,7 @@ def __init__( self._event_reports_id_gen = IdGenerator(db_conn, "event_reports", "id") async def upsert_room_on_join( - self, room_id: str, room_version: RoomVersion, auth_events: List[EventBase] + self, room_id: str, room_version: RoomVersion, state_events: List[EventBase] ) -> None: """Ensure that the room is stored in the table @@ -1511,7 +1511,7 @@ async def upsert_room_on_join( has_auth_chain_index = await self.has_auth_chain_index(room_id) create_event = None - for e in auth_events: + for e in state_events: if (e.type, e.state_key) == (EventTypes.Create, ""): create_event = e break From d7cb0dcbaa59c9e16f9b46415dbc645735dce8f4 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 22 Feb 2022 04:20:45 -0700 Subject: [PATCH 275/474] Use v3 endpoints for fallback auth (Matrix 1.1) (#12019) --- changelog.d/12019.misc | 1 + synapse/rest/client/auth.py | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12019.misc diff --git a/changelog.d/12019.misc b/changelog.d/12019.misc new file mode 100644 index 000000000000..b2186320ead4 --- /dev/null +++ b/changelog.d/12019.misc @@ -0,0 +1 @@ +Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. \ No newline at end of file diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index 9c15a04338b1..e0b2b80e5b9b 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -62,7 +62,7 @@ async def on_GET(self, request: SynapseRequest, stagetype: str) -> None: if stagetype == LoginType.RECAPTCHA: html = self.recaptcha_template.render( session=session, - myurl="%s/r0/auth/%s/fallback/web" + myurl="%s/v3/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), sitekey=self.hs.config.captcha.recaptcha_public_key, ) @@ -74,7 +74,7 @@ async def on_GET(self, request: SynapseRequest, stagetype: str) -> None: self.hs.config.server.public_baseurl, self.hs.config.consent.user_consent_version, ), - myurl="%s/r0/auth/%s/fallback/web" + myurl="%s/v3/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), ) @@ -118,7 +118,7 @@ async def on_POST(self, request: Request, stagetype: str) -> None: # Authentication failed, let user try again html = self.recaptcha_template.render( session=session, - myurl="%s/r0/auth/%s/fallback/web" + myurl="%s/v3/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), sitekey=self.hs.config.captcha.recaptcha_public_key, error=e.msg, @@ -143,7 +143,7 @@ async def on_POST(self, request: Request, stagetype: str) -> None: self.hs.config.server.public_baseurl, self.hs.config.consent.user_consent_version, ), - myurl="%s/r0/auth/%s/fallback/web" + myurl="%s/v3/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), error=e.msg, ) From 1ae492c8c09edef4a6d2af65588895305eaedec3 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 22 Feb 2022 11:30:19 +0000 Subject: [PATCH 276/474] Move isort config to `pyproject.toml` (#12052) --- changelog.d/12052.misc | 1 + pyproject.toml | 12 ++++++++++++ setup.cfg | 11 ----------- tox.ini | 2 +- 4 files changed, 14 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12052.misc diff --git a/changelog.d/12052.misc b/changelog.d/12052.misc new file mode 100644 index 000000000000..fbaff67e959f --- /dev/null +++ b/changelog.d/12052.misc @@ -0,0 +1 @@ +Move `isort` configuration to `pyproject.toml`. diff --git a/pyproject.toml b/pyproject.toml index 963f149c6af6..c9cd0cf6ec10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,3 +54,15 @@ exclude = ''' )/ ) ''' + +[tool.isort] +line_length = 88 +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "TWISTED", "FIRSTPARTY", "TESTS", "LOCALFOLDER"] +default_section = "THIRDPARTY" +known_first_party = ["synapse"] +known_tests = ["tests"] +known_twisted = ["twisted", "OpenSSL"] +multi_line_output = 3 +include_trailing_comma = true +combine_as_imports = true + diff --git a/setup.cfg b/setup.cfg index e5ceb7ed1933..a0506572d96b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -19,14 +19,3 @@ ignore = # E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) ignore=W503,W504,E203,E731,E501 - -[isort] -line_length = 88 -sections=FUTURE,STDLIB,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER -default_section=THIRDPARTY -known_first_party = synapse -known_tests=tests -known_twisted=twisted,OpenSSL -multi_line_output=3 -include_trailing_comma=true -combine_as_imports=true diff --git a/tox.ini b/tox.ini index 41678aa38bee..436ecf7552f7 100644 --- a/tox.ini +++ b/tox.ini @@ -166,7 +166,7 @@ commands = [testenv:check_isort] extras = lint -commands = isort -c --df --sp setup.cfg {[base]lint_targets} +commands = isort -c --df {[base]lint_targets} [testenv:check-newsfragment] skip_install = true From 551dd8c9f8d10681dc535ec43d652bbadf60de48 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 Feb 2022 11:32:11 +0000 Subject: [PATCH 277/474] 1.53.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 4210d09d2d48..8d91a7921138 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.53.0 (2022-02-22) +=========================== + +No significant changes. + + Synapse 1.53.0rc1 (2022-02-15) ============================== diff --git a/debian/changelog b/debian/changelog index fe79d7ed57b9..574930c085cd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.53.0) stable; urgency=medium + + * New synapse release 1.53.0. + + -- Synapse Packaging team Tue, 22 Feb 2022 11:32:06 +0000 + matrix-synapse-py3 (1.53.0~rc1) stable; urgency=medium * New synapse release 1.53.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 2bf8eb2a11e4..903f2e815dd9 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.53.0rc1" +__version__ = "1.53.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From af2c1e3d2a56c4042db27e70b72409ce8f4b406e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 22 Feb 2022 11:33:37 +0000 Subject: [PATCH 278/474] Tidy the building of sdists and wheels (#12051) * Don't build distribution pkgs in tests.yml * Run `release-artifacts` on release branches * Use backend-meta workflow for packaging --- .github/workflows/release-artifacts.yml | 14 ++------------ .github/workflows/tests.yml | 17 +---------------- changelog.d/12051.misc | 1 + 3 files changed, 4 insertions(+), 28 deletions(-) create mode 100644 changelog.d/12051.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index eb294f161939..eee3633d5043 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -7,7 +7,7 @@ on: # of things breaking (but only build one set of debs) pull_request: push: - branches: ["develop"] + branches: ["develop", "release-*"] # we do the full build on tags. tags: ["v*"] @@ -91,17 +91,7 @@ jobs: build-sdist: name: "Build pypi distribution files" - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - run: pip install wheel - - run: | - python setup.py sdist bdist_wheel - - uses: actions/upload-artifact@v2 - with: - name: python-dist - path: dist/* + uses: "matrix-org/backend-meta/.github/workflows/packaging.yml@v1" # if it's a tag, create a release and attach the artifacts to it attach-assets: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 75ac1304bfbc..bbf1033bdd2c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -48,24 +48,10 @@ jobs: env: PULL_REQUEST_NUMBER: ${{ github.event.number }} - lint-sdist: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - with: - python-version: "3.x" - - run: pip install wheel - - run: python setup.py sdist bdist_wheel - - uses: actions/upload-artifact@v2 - with: - name: Python Distributions - path: dist/* - # Dummy step to gate other tests on without repeating the whole list linting-done: if: ${{ !cancelled() }} # Run this even if prior jobs were skipped - needs: [lint, lint-crlf, lint-newsfile, lint-sdist] + needs: [lint, lint-crlf, lint-newsfile] runs-on: ubuntu-latest steps: - run: "true" @@ -397,7 +383,6 @@ jobs: - lint - lint-crlf - lint-newsfile - - lint-sdist - trial - trial-olddeps - sytest diff --git a/changelog.d/12051.misc b/changelog.d/12051.misc new file mode 100644 index 000000000000..9959191352ad --- /dev/null +++ b/changelog.d/12051.misc @@ -0,0 +1 @@ +Tidy up GitHub Actions config which builds distributions for PyPI. \ No newline at end of file From 546b9c9e648f5e2b25bb7c8350570787ff9befae Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 22 Feb 2022 11:44:11 +0000 Subject: [PATCH 279/474] Add more tests for in-flight state query duplication. (#12033) --- changelog.d/12033.misc | 1 + tests/storage/databases/test_state_store.py | 192 +++++++++++++++++--- 2 files changed, 172 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12033.misc diff --git a/changelog.d/12033.misc b/changelog.d/12033.misc new file mode 100644 index 000000000000..3af049b96961 --- /dev/null +++ b/changelog.d/12033.misc @@ -0,0 +1 @@ +Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/tests/storage/databases/test_state_store.py b/tests/storage/databases/test_state_store.py index cf126ee62d39..3a4a4a3a2971 100644 --- a/tests/storage/databases/test_state_store.py +++ b/tests/storage/databases/test_state_store.py @@ -18,8 +18,9 @@ from twisted.internet.defer import Deferred, ensureDeferred from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import EventTypes from synapse.storage.state import StateFilter -from synapse.types import MutableStateMap, StateMap +from synapse.types import StateMap from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -27,6 +28,21 @@ if typing.TYPE_CHECKING: from synapse.server import HomeServer +# StateFilter for ALL non-m.room.member state events +ALL_NON_MEMBERS_STATE_FILTER = StateFilter.freeze( + types={EventTypes.Member: set()}, + include_others=True, +) + +FAKE_STATE = { + (EventTypes.Member, "@alice:test"): "join", + (EventTypes.Member, "@bob:test"): "leave", + (EventTypes.Member, "@charlie:test"): "invite", + ("test.type", "a"): "AAA", + ("test.type", "b"): "BBB", + ("other.event.type", "state.key"): "123", +} + class StateGroupInflightCachingTestCase(HomeserverTestCase): def prepare( @@ -65,24 +81,8 @@ def _complete_request_fake( Assemble a fake database response and complete the database request. """ - result: Dict[int, StateMap[str]] = {} - - for group in groups: - group_result: MutableStateMap[str] = {} - result[group] = group_result - - for state_type, state_keys in state_filter.types.items(): - if state_keys is None: - group_result[(state_type, "a")] = "xyz" - group_result[(state_type, "b")] = "xyz" - else: - for state_key in state_keys: - group_result[(state_type, state_key)] = "abc" - - if state_filter.include_others: - group_result[("other.event.type", "state.key")] = "123" - - d.callback(result) + # Return a filtered copy of the fake state + d.callback({group: state_filter.filter_state(FAKE_STATE) for group in groups}) def test_duplicate_requests_deduplicated(self) -> None: """ @@ -125,9 +125,159 @@ def test_duplicate_requests_deduplicated(self) -> None: # Now we can complete the request self._complete_request_fake(groups, sf, d) + self.assertEqual(self.get_success(req1), FAKE_STATE) + self.assertEqual(self.get_success(req2), FAKE_STATE) + + def test_smaller_request_deduplicated(self) -> None: + """ + Tests that duplicate requests for state are deduplicated. + + This test: + - requests some state (state group 42, 'all' state filter) + - requests a subset of that state, before the first request finishes + - checks to see that only one database query was made + - completes the database query + - checks that both requests see the correct retrieved state + """ + req1 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.from_types((("test.type", None),)) + ) + ) + self.pump(by=0.1) + + # This should have gone to the database + self.assertEqual(len(self.get_state_group_calls), 1) + self.assertFalse(req1.called) + + req2 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.from_types((("test.type", "b"),)) + ) + ) + self.pump(by=0.1) + + # No more calls should have gone to the database, because the second + # request was already in the in-flight cache! + self.assertEqual(len(self.get_state_group_calls), 1) + self.assertFalse(req1.called) + self.assertFalse(req2.called) + + groups, sf, d = self.get_state_group_calls[0] + self.assertEqual(groups, (42,)) + # The state filter is expanded internally for increased cache hit rate, + # so we the database sees a wider state filter than requested. + self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) + + # Now we can complete the request + self._complete_request_fake(groups, sf, d) + + self.assertEqual( + self.get_success(req1), + {("test.type", "a"): "AAA", ("test.type", "b"): "BBB"}, + ) + self.assertEqual(self.get_success(req2), {("test.type", "b"): "BBB"}) + + def test_partially_overlapping_request_deduplicated(self) -> None: + """ + Tests that partially-overlapping requests are partially deduplicated. + + This test: + - requests a single type of wildcard state + (This is internally expanded to be all non-member state) + - requests the entire state in parallel + - checks to see that two database queries were made, but that the second + one is only for member state. + - completes the database queries + - checks that both requests have the correct result. + """ + + req1 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.from_types((("test.type", None),)) + ) + ) + self.pump(by=0.1) + + # This should have gone to the database + self.assertEqual(len(self.get_state_group_calls), 1) + self.assertFalse(req1.called) + + req2 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.all() + ) + ) + self.pump(by=0.1) + + # Because it only partially overlaps, this also went to the database + self.assertEqual(len(self.get_state_group_calls), 2) + self.assertFalse(req1.called) + self.assertFalse(req2.called) + + # First request: + groups, sf, d = self.get_state_group_calls[0] + self.assertEqual(groups, (42,)) + # The state filter is expanded internally for increased cache hit rate, + # so we the database sees a wider state filter than requested. + self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) + self._complete_request_fake(groups, sf, d) + + # Second request: + groups, sf, d = self.get_state_group_calls[1] + self.assertEqual(groups, (42,)) + # The state filter is narrowed to only request membership state, because + # the remainder of the state is already being queried in the first request! self.assertEqual( - self.get_success(req1), {("other.event.type", "state.key"): "123"} + sf, StateFilter.freeze({EventTypes.Member: None}, include_others=False) ) + self._complete_request_fake(groups, sf, d) + + # Check the results are correct self.assertEqual( - self.get_success(req2), {("other.event.type", "state.key"): "123"} + self.get_success(req1), + {("test.type", "a"): "AAA", ("test.type", "b"): "BBB"}, ) + self.assertEqual(self.get_success(req2), FAKE_STATE) + + def test_in_flight_requests_stop_being_in_flight(self) -> None: + """ + Tests that in-flight request deduplication doesn't somehow 'hold on' + to completed requests: once they're done, they're taken out of the + in-flight cache. + """ + req1 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.all() + ) + ) + self.pump(by=0.1) + + # This should have gone to the database + self.assertEqual(len(self.get_state_group_calls), 1) + self.assertFalse(req1.called) + + # Complete the request right away. + self._complete_request_fake(*self.get_state_group_calls[0]) + self.assertTrue(req1.called) + + # Send off another request + req2 = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, StateFilter.all() + ) + ) + self.pump(by=0.1) + + # It should have gone to the database again, because the previous request + # isn't in-flight and therefore isn't available for deduplication. + self.assertEqual(len(self.get_state_group_calls), 2) + self.assertFalse(req2.called) + + # Complete the request right away. + self._complete_request_fake(*self.get_state_group_calls[1]) + self.assertTrue(req2.called) + groups, sf, d = self.get_state_group_calls[0] + + self.assertEqual(self.get_success(req1), FAKE_STATE) + self.assertEqual(self.get_success(req2), FAKE_STATE) From 45e2c04f78abea88224191fdc424646e1bdd14f4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 Feb 2022 12:00:05 +0000 Subject: [PATCH 280/474] Update changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8d91a7921138..4bbb50b75207 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -11,7 +11,7 @@ Features -------- - Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). ([\#11215](https://github.com/matrix-org/synapse/issues/11215), [\#11966](https://github.com/matrix-org/synapse/issues/11966)) -- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11655](https://github.com/matrix-org/synapse/issues/11655)) +- Add a background database update to purge account data for deactivated users. ([\#11655](https://github.com/matrix-org/synapse/issues/11655)) - Experimental support for [MSC3666](https://github.com/matrix-org/matrix-doc/pull/3666): including bundled aggregations in server side search results. ([\#11837](https://github.com/matrix-org/synapse/issues/11837)) - Enable cache time-based expiry by default. The `expiry_time` config flag has been superseded by `expire_caches` and `cache_entry_ttl`. ([\#11849](https://github.com/matrix-org/synapse/issues/11849)) - Add a callback to allow modules to allow or forbid a 3PID (email address, phone number) from being associated to a local account. ([\#11854](https://github.com/matrix-org/synapse/issues/11854)) @@ -273,7 +273,7 @@ Bugfixes Synapse 1.50.0 (2022-01-18) =========================== -**This release contains a critical bug that may prevent clients from being able to connect. +**This release contains a critical bug that may prevent clients from being able to connect. As such, it is not recommended to upgrade to 1.50.0. Instead, please upgrade straight to to 1.50.1. Further details are available in [this issue](https://github.com/matrix-org/synapse/issues/11763).** From 1bf9cbbf75f14ef7746b595f0a9167f0d4dd210f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 Feb 2022 12:00:46 +0000 Subject: [PATCH 281/474] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 4bbb50b75207..367adff5d183 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.53.0 (2022-02-22) =========================== -No significant changes. +No significant changes since 1.53.0rc1. Synapse 1.53.0rc1 (2022-02-15) From 066171643b5812b05dd9352ee650f524567de877 Mon Sep 17 00:00:00 2001 From: AndrewRyanChama <89478935+AndrewRyanChama@users.noreply.github.com> Date: Tue, 22 Feb 2022 04:11:39 -0800 Subject: [PATCH 282/474] Fetch images when previewing Twitter URLs. (#11985) By including "bot" in the User-Agent, which some sites use to decide whether to include additional Open Graph information. --- changelog.d/11985.feature | 1 + synapse/res/providers.json | 4 +--- synapse/rest/media/v1/preview_url_resource.py | 10 +++++++++- 3 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 changelog.d/11985.feature diff --git a/changelog.d/11985.feature b/changelog.d/11985.feature new file mode 100644 index 000000000000..120d888a4914 --- /dev/null +++ b/changelog.d/11985.feature @@ -0,0 +1 @@ +Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. diff --git a/synapse/res/providers.json b/synapse/res/providers.json index f1838f955901..7b9958e45464 100644 --- a/synapse/res/providers.json +++ b/synapse/res/providers.json @@ -5,8 +5,6 @@ "endpoints": [ { "schemes": [ - "https://twitter.com/*/status/*", - "https://*.twitter.com/*/status/*", "https://twitter.com/*/moments/*", "https://*.twitter.com/*/moments/*" ], @@ -14,4 +12,4 @@ } ] } -] \ No newline at end of file +] diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 8d3d1e54dc9f..c08b60d10a09 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -402,7 +402,15 @@ async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResu url, output_stream=output_stream, max_size=self.max_spider_size, - headers={"Accept-Language": self.url_preview_accept_language}, + headers={ + b"Accept-Language": self.url_preview_accept_language, + # Use a custom user agent for the preview because some sites will only return + # Open Graph metadata to crawler user agents. Omit the Synapse version + # string to avoid leaking information. + b"User-Agent": [ + "Synapse (bot; +https://github.com/matrix-org/synapse)" + ], + }, is_allowed_content_type=_is_previewable, ) except SynapseError: From 7273011f60afbb1c9754ec73ee3661b19dca6bbd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 22 Feb 2022 12:17:10 +0000 Subject: [PATCH 283/474] Faster joins: Support for calling `/federation/v1/state` (#12013) This is an endpoint that we have server-side support for, but no client-side support. It's going to be useful for resyncing partial-stated rooms, so let's introduce it. --- changelog.d/12013.misc | 1 + synapse/federation/federation_base.py | 10 +- synapse/federation/federation_client.py | 93 +++++++++++-- synapse/federation/transport/client.py | 70 +++++++++- synapse/http/matrixfederationclient.py | 50 ++++++- tests/federation/test_federation_client.py | 149 +++++++++++++++++++++ tests/unittest.py | 21 +++ 7 files changed, 377 insertions(+), 17 deletions(-) create mode 100644 changelog.d/12013.misc create mode 100644 tests/federation/test_federation_client.py diff --git a/changelog.d/12013.misc b/changelog.d/12013.misc new file mode 100644 index 000000000000..c0fca8dccbae --- /dev/null +++ b/changelog.d/12013.misc @@ -0,0 +1 @@ +Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 896168c05c0a..fab6da3c087f 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -47,6 +47,11 @@ async def _check_sigs_and_hash( ) -> EventBase: """Checks that event is correctly signed by the sending server. + Also checks the content hash, and redacts the event if there is a mismatch. + + Also runs the event through the spam checker; if it fails, redacts the event + and flags it as soft-failed. + Args: room_version: The room version of the PDU pdu: the event to be checked @@ -55,7 +60,10 @@ async def _check_sigs_and_hash( * the original event if the checks pass * a redacted version of the event (if the signature matched but the hash did not) - * throws a SynapseError if the signature check failed.""" + + Raises: + SynapseError if the signature check failed. + """ try: await _check_sigs_on_pdu(self.keyring, room_version, pdu) except SynapseError as e: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 48c90bf0bbbd..c2997997da57 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -419,26 +419,90 @@ async def get_room_state_ids( return state_event_ids, auth_event_ids + async def get_room_state( + self, + destination: str, + room_id: str, + event_id: str, + room_version: RoomVersion, + ) -> Tuple[List[EventBase], List[EventBase]]: + """Calls the /state endpoint to fetch the state at a particular point + in the room. + + Any invalid events (those with incorrect or unverifiable signatures or hashes) + are filtered out from the response, and any duplicate events are removed. + + (Size limits and other event-format checks are *not* performed.) + + Note that the result is not ordered, so callers must be careful to process + the events in an order that handles dependencies. + + Returns: + a tuple of (state events, auth events) + """ + result = await self.transport_layer.get_room_state( + room_version, + destination, + room_id, + event_id, + ) + state_events = result.state + auth_events = result.auth_events + + # we may as well filter out any duplicates from the response, to save + # processing them multiple times. (In particular, events may be present in + # `auth_events` as well as `state`, which is redundant). + # + # We don't rely on the sort order of the events, so we can just stick them + # in a dict. + state_event_map = {event.event_id: event for event in state_events} + auth_event_map = { + event.event_id: event + for event in auth_events + if event.event_id not in state_event_map + } + + logger.info( + "Processing from /state: %d state events, %d auth events", + len(state_event_map), + len(auth_event_map), + ) + + valid_auth_events = await self._check_sigs_and_hash_and_fetch( + destination, auth_event_map.values(), room_version + ) + + valid_state_events = await self._check_sigs_and_hash_and_fetch( + destination, state_event_map.values(), room_version + ) + + return valid_state_events, valid_auth_events + async def _check_sigs_and_hash_and_fetch( self, origin: str, pdus: Collection[EventBase], room_version: RoomVersion, ) -> List[EventBase]: - """Takes a list of PDUs and checks the signatures and hashes of each - one. If a PDU fails its signature check then we check if we have it in - the database and if not then request if from the originating server of - that PDU. + """Checks the signatures and hashes of a list of events. + + If a PDU fails its signature check then we check if we have it in + the database, and if not then request it from the sender's server (if that + is different from `origin`). If that still fails, the event is omitted from + the returned list. If a PDU fails its content hash check then it is redacted. - The given list of PDUs are not modified, instead the function returns + Also runs each event through the spam checker; if it fails, redacts the event + and flags it as soft-failed. + + The given list of PDUs are not modified; instead the function returns a new list. Args: - origin - pdu - room_version + origin: The server that sent us these events + pdus: The events to be checked + room_version: the version of the room these events are in Returns: A list of PDUs that have valid signatures and hashes. @@ -469,11 +533,16 @@ async def _check_sigs_and_hash_and_fetch_one( origin: str, room_version: RoomVersion, ) -> Optional[EventBase]: - """Takes a PDU and checks its signatures and hashes. If the PDU fails - its signature check then we check if we have it in the database and if - not then request if from the originating server of that PDU. + """Takes a PDU and checks its signatures and hashes. + + If the PDU fails its signature check then we check if we have it in the + database; if not, we then request it from sender's server (if that is not the + same as `origin`). If that still fails, we return None. + + If the PDU fails its content hash check, it is redacted. - If then PDU fails its content hash check then it is redacted. + Also runs the event through the spam checker; if it fails, redacts the event + and flags it as soft-failed. Args: origin diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index dca6e5c45d07..7e510e224a46 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -65,13 +65,12 @@ def __init__(self, hs): async def get_room_state_ids( self, destination: str, room_id: str, event_id: str ) -> JsonDict: - """Requests all state for a given room from the given server at the - given event. Returns the state's event_id's + """Requests the IDs of all state for a given room at the given event. Args: destination: The host name of the remote homeserver we want to get the state from. - context: The name of the context we want the state of + room_id: the room we want the state of event_id: The event we want the context at. Returns: @@ -87,6 +86,29 @@ async def get_room_state_ids( try_trailing_slash_on_400=True, ) + async def get_room_state( + self, room_version: RoomVersion, destination: str, room_id: str, event_id: str + ) -> "StateRequestResponse": + """Requests the full state for a given room at the given event. + + Args: + room_version: the version of the room (required to build the event objects) + destination: The host name of the remote homeserver we want + to get the state from. + room_id: the room we want the state of + event_id: The event we want the context at. + + Returns: + Results in a dict received from the remote homeserver. + """ + path = _create_v1_path("/state/%s", room_id) + return await self.client.get_json( + destination, + path=path, + args={"event_id": event_id}, + parser=_StateParser(room_version), + ) + async def get_event( self, destination: str, event_id: str, timeout: Optional[int] = None ) -> JsonDict: @@ -1284,6 +1306,14 @@ class SendJoinResponse: servers_in_room: Optional[List[str]] = None +@attr.s(slots=True, auto_attribs=True) +class StateRequestResponse: + """The parsed response of a `/state` request.""" + + auth_events: List[EventBase] + state: List[EventBase] + + @ijson.coroutine def _event_parser(event_dict: JsonDict) -> Generator[None, Tuple[str, Any], None]: """Helper function for use with `ijson.kvitems_coro` to parse key-value pairs @@ -1411,3 +1441,37 @@ def finish(self) -> SendJoinResponse: self._response.event_dict, self._room_version ) return self._response + + +class _StateParser(ByteParser[StateRequestResponse]): + """A parser for the response to `/state` requests. + + Args: + room_version: The version of the room. + """ + + CONTENT_TYPE = "application/json" + + def __init__(self, room_version: RoomVersion): + self._response = StateRequestResponse([], []) + self._room_version = room_version + self._coros = [ + ijson.items_coro( + _event_list_parser(room_version, self._response.state), + "pdus.item", + use_float=True, + ), + ijson.items_coro( + _event_list_parser(room_version, self._response.auth_events), + "auth_chain.item", + use_float=True, + ), + ] + + def write(self, data: bytes) -> int: + for c in self._coros: + c.send(data) + return len(data) + + def finish(self) -> StateRequestResponse: + return self._response diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c5f8fcbb2a7d..e7656fbb9f7c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -958,6 +958,7 @@ async def post_json( ) return body + @overload async def get_json( self, destination: str, @@ -967,7 +968,38 @@ async def get_json( timeout: Optional[int] = None, ignore_backoff: bool = False, try_trailing_slash_on_400: bool = False, + parser: Literal[None] = None, + max_response_size: Optional[int] = None, ) -> Union[JsonDict, list]: + ... + + @overload + async def get_json( + self, + destination: str, + path: str, + args: Optional[QueryArgs] = ..., + retry_on_dns_fail: bool = ..., + timeout: Optional[int] = ..., + ignore_backoff: bool = ..., + try_trailing_slash_on_400: bool = ..., + parser: ByteParser[T] = ..., + max_response_size: Optional[int] = ..., + ) -> T: + ... + + async def get_json( + self, + destination: str, + path: str, + args: Optional[QueryArgs] = None, + retry_on_dns_fail: bool = True, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser] = None, + max_response_size: Optional[int] = None, + ): """GETs some json from the given host homeserver and path Args: @@ -992,6 +1024,13 @@ async def get_json( try_trailing_slash_on_400: True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. + + parser: The parser to use to decode the response. Defaults to + parsing as JSON. + + max_response_size: The maximum size to read from the response. If None, + uses the default. + Returns: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. @@ -1026,8 +1065,17 @@ async def get_json( else: _sec_timeout = self.default_timeout + if parser is None: + parser = JsonParser() + body = await _handle_response( - self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() + self.reactor, + _sec_timeout, + request, + response, + start_ms, + parser=parser, + max_response_size=max_response_size, ) return body diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py new file mode 100644 index 000000000000..ec8864dafe37 --- /dev/null +++ b/tests/federation/test_federation_client.py @@ -0,0 +1,149 @@ +# Copyright 2022 Matrix.org Federation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from unittest import mock + +import twisted.web.client +from twisted.internet import defer +from twisted.internet.protocol import Protocol +from twisted.python.failure import Failure +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.room_versions import RoomVersions +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock + +from tests.unittest import FederatingHomeserverTestCase + + +class FederationClientTest(FederatingHomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + super().prepare(reactor, clock, homeserver) + + # mock out the Agent used by the federation client, which is easier than + # catching the HTTPS connection and do the TLS stuff. + self._mock_agent = mock.create_autospec(twisted.web.client.Agent, spec_set=True) + homeserver.get_federation_http_client().agent = self._mock_agent + + def test_get_room_state(self): + creator = f"@creator:{self.OTHER_SERVER_NAME}" + test_room_id = "!room_id" + + # mock up some events to use in the response. + # In real life, these would have things in `prev_events` and `auth_events`, but that's + # a bit annoying to mock up, and the code under test doesn't care, so we don't bother. + create_event_dict = self.add_hashes_and_signatures( + { + "room_id": test_room_id, + "type": "m.room.create", + "state_key": "", + "sender": creator, + "content": {"creator": creator}, + "prev_events": [], + "auth_events": [], + "origin_server_ts": 500, + } + ) + member_event_dict = self.add_hashes_and_signatures( + { + "room_id": test_room_id, + "type": "m.room.member", + "sender": creator, + "state_key": creator, + "content": {"membership": "join"}, + "prev_events": [], + "auth_events": [], + "origin_server_ts": 600, + } + ) + pl_event_dict = self.add_hashes_and_signatures( + { + "room_id": test_room_id, + "type": "m.room.power_levels", + "sender": creator, + "state_key": "", + "content": {}, + "prev_events": [], + "auth_events": [], + "origin_server_ts": 700, + } + ) + + # mock up the response, and have the agent return it + self._mock_agent.request.return_value = defer.succeed( + _mock_response( + { + "pdus": [ + create_event_dict, + member_event_dict, + pl_event_dict, + ], + "auth_chain": [ + create_event_dict, + member_event_dict, + ], + } + ) + ) + + # now fire off the request + state_resp, auth_resp = self.get_success( + self.hs.get_federation_client().get_room_state( + "yet_another_server", + test_room_id, + "event_id", + RoomVersions.V9, + ) + ) + + # check the right call got made to the agent + self._mock_agent.request.assert_called_once_with( + b"GET", + b"matrix://yet_another_server/_matrix/federation/v1/state/%21room_id?event_id=event_id", + headers=mock.ANY, + bodyProducer=None, + ) + + # ... and that the response is correct. + + # the auth_resp should be empty because all the events are also in state + self.assertEqual(auth_resp, []) + + # all of the events should be returned in state_resp, though not necessarily + # in the same order. We just check the type on the assumption that if the type + # is right, so is the rest of the event. + self.assertCountEqual( + [e.type for e in state_resp], + ["m.room.create", "m.room.member", "m.room.power_levels"], + ) + + +def _mock_response(resp: JsonDict): + body = json.dumps(resp).encode("utf-8") + + def deliver_body(p: Protocol): + p.dataReceived(body) + p.connectionLost(Failure(twisted.web.client.ResponseDone())) + + response = mock.Mock( + code=200, + phrase=b"OK", + headers=twisted.web.client.Headers({"content-Type": ["application/json"]}), + length=len(body), + deliverBody=deliver_body, + ) + mock.seal(response) + return response diff --git a/tests/unittest.py b/tests/unittest.py index a71892cb9dbe..7983c1e8b860 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -51,7 +51,10 @@ from synapse import events from synapse.api.constants import EventTypes, Membership +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.config.homeserver import HomeServerConfig +from synapse.config.server import DEFAULT_ROOM_VERSION +from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.federation.transport.server import TransportLayerServer from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest, SynapseSite @@ -839,6 +842,24 @@ def make_signed_federation_request( client_ip=client_ip, ) + def add_hashes_and_signatures( + self, + event_dict: JsonDict, + room_version: RoomVersion = KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + ) -> JsonDict: + """Adds hashes and signatures to the given event dict + + Returns: + The modified event dict, for convenience + """ + add_hashes_and_signatures( + room_version, + event_dict, + signature_name=self.OTHER_SERVER_NAME, + signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + ) + return event_dict + def _auth_header_for_request( origin: str, From 235d2916ceb0c9a8e874ea8ac6994d604d743444 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 22 Feb 2022 13:29:04 +0000 Subject: [PATCH 284/474] Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. (#12056) --- changelog.d/12056.bugfix | 1 + .../storage/databases/main/registration.py | 18 +++- .../04_refresh_tokens_index_next_token_id.sql | 28 ++++++ tests/rest/client/test_auth.py | 93 ++++++++++++++++++- 4 files changed, 136 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12056.bugfix create mode 100644 synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql diff --git a/changelog.d/12056.bugfix b/changelog.d/12056.bugfix new file mode 100644 index 000000000000..210e30c63fed --- /dev/null +++ b/changelog.d/12056.bugfix @@ -0,0 +1 @@ +Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. \ No newline at end of file diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 17110bb03314..dc6665237abf 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1681,7 +1681,8 @@ def _lookup_refresh_token_txn(txn) -> Optional[RefreshTokenLookupResult]: user_id=row[1], device_id=row[2], next_token_id=row[3], - has_next_refresh_token_been_refreshed=row[4], + # SQLite returns 0 or 1 for false/true, so convert to a bool. + has_next_refresh_token_been_refreshed=bool(row[4]), # This column is nullable, ensure it's a boolean has_next_access_token_been_used=(row[5] or False), expiry_ts=row[6], @@ -1697,12 +1698,15 @@ async def replace_refresh_token(self, token_id: int, next_token_id: int) -> None Set the successor of a refresh token, removing the existing successor if any. + This also deletes the predecessor refresh and access tokens, + since they cannot be valid anymore. + Args: token_id: ID of the refresh token to update. next_token_id: ID of its successor. """ - def _replace_refresh_token_txn(txn) -> None: + def _replace_refresh_token_txn(txn: LoggingTransaction) -> None: # First check if there was an existing refresh token old_next_token_id = self.db_pool.simple_select_one_onecol_txn( txn, @@ -1728,6 +1732,16 @@ def _replace_refresh_token_txn(txn) -> None: {"id": old_next_token_id}, ) + # Delete the previous refresh token, since we only want to keep the + # last 2 refresh tokens in the database. + # (The predecessor of the latest refresh token is still useful in + # case the refresh was interrupted and the client re-uses the old + # one.) + # This cascades to delete the associated access token. + self.db_pool.simple_delete_txn( + txn, "refresh_tokens", {"next_token_id": token_id} + ) + await self.db_pool.runInteraction( "replace_refresh_token", _replace_refresh_token_txn ) diff --git a/synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql b/synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql new file mode 100644 index 000000000000..09305638ea54 --- /dev/null +++ b/synapse/storage/schema/main/delta/68/04_refresh_tokens_index_next_token_id.sql @@ -0,0 +1,28 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- next_token_id is a foreign key reference, so previously required a table scan +-- when a row in the referenced table was deleted. +-- As it was self-referential and cascaded deletes, this led to O(t*n) time to +-- delete a row, where t: number of rows in the table and n: number of rows in +-- the ancestral 'chain' of access tokens. +-- +-- This index is partial since we only require it for rows which reference +-- another. +-- Performance was tested to be the same regardless of whether the index was +-- full or partial, but a partial index can be smaller. +CREATE INDEX refresh_tokens_next_token_id + ON refresh_tokens(next_token_id) + WHERE next_token_id IS NOT NULL; diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 27cb856b0acd..4a68d6657321 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -13,15 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus -from typing import Optional, Union +from typing import Optional, Tuple, Union from twisted.internet.defer import succeed import synapse.rest.admin from synapse.api.constants import LoginType from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker -from synapse.rest.client import account, auth, devices, login, register +from synapse.rest.client import account, auth, devices, login, logout, register from synapse.rest.synapse.client import build_synapse_client_resource_tree +from synapse.storage.database import LoggingTransaction from synapse.types import JsonDict, UserID from tests import unittest @@ -527,6 +528,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): auth.register_servlets, account.register_servlets, login.register_servlets, + logout.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, register.register_servlets, ] @@ -984,3 +986,90 @@ def test_refresh_token_invalidation(self): self.assertEqual( fifth_refresh_response.code, HTTPStatus.OK, fifth_refresh_response.result ) + + def test_many_token_refresh(self): + """ + If a refresh is performed many times during a session, there shouldn't be + extra 'cruft' built up over time. + + This test was written specifically to troubleshoot a case where logout + was very slow if a lot of refreshes had been performed for the session. + """ + + def _refresh(refresh_token: str) -> Tuple[str, str]: + """ + Performs one refresh, returning the next refresh token and access token. + """ + refresh_response = self.use_refresh_token(refresh_token) + self.assertEqual( + refresh_response.code, HTTPStatus.OK, refresh_response.result + ) + return ( + refresh_response.json_body["refresh_token"], + refresh_response.json_body["access_token"], + ) + + def _table_length(table_name: str) -> int: + """ + Helper to get the size of a table, in rows. + For testing only; trivially vulnerable to SQL injection. + """ + + def _txn(txn: LoggingTransaction) -> int: + txn.execute(f"SELECT COUNT(1) FROM {table_name}") + row = txn.fetchone() + # Query is infallible + assert row is not None + return row[0] + + return self.get_success( + self.hs.get_datastores().main.db_pool.runInteraction( + "_table_length", _txn + ) + ) + + # Before we log in, there are no access tokens. + self.assertEqual(_table_length("access_tokens"), 0) + self.assertEqual(_table_length("refresh_tokens"), 0) + + body = { + "type": "m.login.password", + "user": "test", + "password": self.user_pass, + "refresh_token": True, + } + login_response = self.make_request( + "POST", + "/_matrix/client/v3/login", + body, + ) + self.assertEqual(login_response.code, HTTPStatus.OK, login_response.result) + + access_token = login_response.json_body["access_token"] + refresh_token = login_response.json_body["refresh_token"] + + # Now that we have logged in, there should be one access token and one + # refresh token + self.assertEqual(_table_length("access_tokens"), 1) + self.assertEqual(_table_length("refresh_tokens"), 1) + + for _ in range(5): + refresh_token, access_token = _refresh(refresh_token) + + # After 5 sequential refreshes, there should only be the latest two + # refresh/access token pairs. + # (The last one is preserved because it's in use! + # The one before that is preserved because it can still be used to + # replace the last token pair, in case of e.g. a network interruption.) + self.assertEqual(_table_length("access_tokens"), 2) + self.assertEqual(_table_length("refresh_tokens"), 2) + + logout_response = self.make_request( + "POST", "/_matrix/client/v3/logout", {}, access_token=access_token + ) + self.assertEqual(logout_response.code, HTTPStatus.OK, logout_response.result) + + # Now that we have logged in, there should be no access token + # and no refresh token + self.assertEqual(_table_length("access_tokens"), 0) + self.assertEqual(_table_length("refresh_tokens"), 0) From 81364db49b7778021edcd5912555dd3e1583c1b8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 22 Feb 2022 13:33:22 +0000 Subject: [PATCH 285/474] Run `_handle_queued_pdus` as a background process (#12041) ... to ensure it gets a proper log context, mostly. --- changelog.d/12041.misc | 1 + synapse/handlers/federation.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12041.misc diff --git a/changelog.d/12041.misc b/changelog.d/12041.misc new file mode 100644 index 000000000000..e56dc093def6 --- /dev/null +++ b/changelog.d/12041.misc @@ -0,0 +1 @@ +After joining a room, create a dedicated logcontext to process the queued events. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c8356f233dad..e9ac920bcc5e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -49,8 +49,8 @@ make_deferred_yieldable, nested_logging_context, preserve_fn, - run_in_background, ) +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, @@ -559,7 +559,9 @@ async def do_invite_join( # lots of requests for missing prev_events which we do actually # have. Hence we fire off the background task, but don't wait for it. - run_in_background(self._handle_queued_pdus, room_queue) + run_as_background_process( + "handle_queued_pdus", self._handle_queued_pdus, room_queue + ) async def do_knock( self, From 7bcc28f82fe160dc8dca1d70f8dc3667c94fb738 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 22 Feb 2022 09:09:40 -0500 Subject: [PATCH 286/474] Use room version 9 as the default room version (per MSC3589). (#12058) --- changelog.d/12058.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12058.feature diff --git a/changelog.d/12058.feature b/changelog.d/12058.feature new file mode 100644 index 000000000000..7b716922299a --- /dev/null +++ b/changelog.d/12058.feature @@ -0,0 +1 @@ +Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d2bb3d42080a..6f3623c88ab9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -163,7 +163,7 @@ presence: # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "6" +#default_room_version: "9" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index 7bc962454684..49cd0a4f192d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -146,7 +146,7 @@ def generate_ip_set( "fec0::/10", ] -DEFAULT_ROOM_VERSION = "6" +DEFAULT_ROOM_VERSION = "9" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " From dcb6a378372d08a46a76c591704be4dc15c68df3 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 22 Feb 2022 14:24:31 +0000 Subject: [PATCH 287/474] Cap the number of in-flight requests for state from a single group (#11608) --- changelog.d/11608.misc | 1 + synapse/storage/databases/state/store.py | 16 +++++ tests/storage/databases/test_state_store.py | 69 +++++++++++++++++++++ 3 files changed, 86 insertions(+) create mode 100644 changelog.d/11608.misc diff --git a/changelog.d/11608.misc b/changelog.d/11608.misc new file mode 100644 index 000000000000..3af049b96961 --- /dev/null +++ b/changelog.d/11608.misc @@ -0,0 +1 @@ +Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 3af69a207690..b8016f679a7f 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -56,6 +56,7 @@ logger = logging.getLogger(__name__) MAX_STATE_DELTA_HOPS = 100 +MAX_INFLIGHT_REQUESTS_PER_GROUP = 5 @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -258,6 +259,11 @@ def _get_state_for_group_gather_inflight_requests( Attempts to gather in-flight requests and re-use them to retrieve state for the given state group, filtered with the given state filter. + If there are more than MAX_INFLIGHT_REQUESTS_PER_GROUP in-flight requests, + and there *still* isn't enough information to complete the request by solely + reusing others, a full state filter will be requested to ensure that subsequent + requests can reuse this request. + Used as part of _get_state_for_group_using_inflight_cache. Returns: @@ -288,6 +294,16 @@ def _get_state_for_group_gather_inflight_requests( # to cover our StateFilter and give us the state we need. break + if ( + state_filter_left_over != StateFilter.none() + and len(inflight_requests) >= MAX_INFLIGHT_REQUESTS_PER_GROUP + ): + # There are too many requests for this group. + # To prevent even more from building up, we request the whole + # state filter to guarantee that we can be reused by any subsequent + # requests for this state group. + return (), StateFilter.all() + return reusable_requests, state_filter_left_over async def _get_state_for_group_fire_request( diff --git a/tests/storage/databases/test_state_store.py b/tests/storage/databases/test_state_store.py index 3a4a4a3a2971..076b66080946 100644 --- a/tests/storage/databases/test_state_store.py +++ b/tests/storage/databases/test_state_store.py @@ -19,6 +19,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes +from synapse.storage.databases.state.store import MAX_INFLIGHT_REQUESTS_PER_GROUP from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util import Clock @@ -281,3 +282,71 @@ def test_in_flight_requests_stop_being_in_flight(self) -> None: self.assertEqual(self.get_success(req1), FAKE_STATE) self.assertEqual(self.get_success(req2), FAKE_STATE) + + def test_inflight_requests_capped(self) -> None: + """ + Tests that the number of in-flight requests is capped to 5. + + - requests several pieces of state separately + (5 to hit the limit, 1 to 'shunt out', another that comes after the + group has been 'shunted out') + - checks to see that the torrent of requests is shunted out by + rewriting one of the filters as the 'all' state filter + - requests after that one do not cause any additional queries + """ + # 5 at the time of writing. + CAP_COUNT = MAX_INFLIGHT_REQUESTS_PER_GROUP + + reqs = [] + + # Request 7 different keys (1 to 7) of the `some.state` type. + for req_id in range(CAP_COUNT + 2): + reqs.append( + ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, + StateFilter.freeze( + {"some.state": {str(req_id + 1)}}, include_others=False + ), + ) + ) + ) + self.pump(by=0.1) + + # There should only be 6 calls to the database, not 7. + self.assertEqual(len(self.get_state_group_calls), CAP_COUNT + 1) + + # Assert that the first 5 are exact requests for the individual pieces + # wanted + for req_id in range(CAP_COUNT): + groups, sf, d = self.get_state_group_calls[req_id] + self.assertEqual( + sf, + StateFilter.freeze( + {"some.state": {str(req_id + 1)}}, include_others=False + ), + ) + + # The 6th request should be the 'all' state filter + groups, sf, d = self.get_state_group_calls[CAP_COUNT] + self.assertEqual(sf, StateFilter.all()) + + # Complete the queries and check which requests complete as a result + for req_id in range(CAP_COUNT): + # This request should not have been completed yet + self.assertFalse(reqs[req_id].called) + + groups, sf, d = self.get_state_group_calls[req_id] + self._complete_request_fake(groups, sf, d) + + # This should have only completed this one request + self.assertTrue(reqs[req_id].called) + + # Now complete the final query; the last 2 requests should complete + # as a result + self.assertFalse(reqs[CAP_COUNT].called) + self.assertFalse(reqs[CAP_COUNT + 1].called) + groups, sf, d = self.get_state_group_calls[CAP_COUNT] + self._complete_request_fake(groups, sf, d) + self.assertTrue(reqs[CAP_COUNT].called) + self.assertTrue(reqs[CAP_COUNT + 1].called) From 94a396e7c4b4488d7f0ca08672114a4a586cf42c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 22 Feb 2022 14:52:56 +0000 Subject: [PATCH 288/474] Prune setup.cfg some more (#12059) * Remove `trial` section from setup.cfg This was added in the initial commit from 2014. I can't see that it does anything. Maybe it's there so that you can run `trial` without any extra args, but if I do that then I just get the `--help` message. * Move flake8's config to its own file --- .flake8 | 11 +++++++++++ MANIFEST.in | 1 + changelog.d/12052.misc | 2 +- changelog.d/12059.misc | 1 + setup.cfg | 12 ------------ 5 files changed, 14 insertions(+), 13 deletions(-) create mode 100644 .flake8 create mode 100644 changelog.d/12059.misc diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000000..acb118c86e84 --- /dev/null +++ b/.flake8 @@ -0,0 +1,11 @@ +# TODO: incorporate this into pyproject.toml if flake8 supports it in the future. +# See https://github.com/PyCQA/flake8/issues/234 +[flake8] +# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes +# for error codes. The ones we ignore are: +# W503: line break before binary operator +# W504: line break after binary operator +# E203: whitespace before ':' (which is contrary to pep8?) +# E731: do not assign a lambda expression, use a def +# E501: Line too long (black enforces this for us) +ignore=W503,W504,E203,E731,E501 diff --git a/MANIFEST.in b/MANIFEST.in index c24786c3b371..76d14eb642b2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -45,6 +45,7 @@ include book.toml include pyproject.toml recursive-include changelog.d * +include .flake8 prune .circleci prune .github prune .ci diff --git a/changelog.d/12052.misc b/changelog.d/12052.misc index fbaff67e959f..11755ae61b9a 100644 --- a/changelog.d/12052.misc +++ b/changelog.d/12052.misc @@ -1 +1 @@ -Move `isort` configuration to `pyproject.toml`. +Move configuration out of `setup.cfg`. diff --git a/changelog.d/12059.misc b/changelog.d/12059.misc new file mode 100644 index 000000000000..9ba4759d99de --- /dev/null +++ b/changelog.d/12059.misc @@ -0,0 +1 @@ +Move configuration out of `setup.cfg`. \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index a0506572d96b..6213f3265b59 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,3 @@ -[trial] -test_suite = tests - [check-manifest] ignore = .git-blame-ignore-revs @@ -10,12 +7,3 @@ ignore = pylint.cfg tox.ini -[flake8] -# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes -# for error codes. The ones we ignore are: -# W503: line break before binary operator -# W504: line break after binary operator -# E203: whitespace before ':' (which is contrary to pep8?) -# E731: do not assign a lambda expression, use a def -# E501: Line too long (black enforces this for us) -ignore=W503,W504,E203,E731,E501 From 250104d357c17a1c87fa46af35bbf3612f4ef171 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 22 Feb 2022 16:10:10 +0100 Subject: [PATCH 289/474] Implement account status endpoints (MSC3720) (#12001) See matrix-org/matrix-doc#3720 Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/12001.feature | 1 + synapse/config/experimental.py | 3 + synapse/federation/federation_client.py | 60 +++++- synapse/federation/transport/client.py | 19 +- .../federation/transport/server/__init__.py | 8 + .../federation/transport/server/federation.py | 35 +++ synapse/handlers/account.py | 144 +++++++++++++ synapse/rest/client/account.py | 33 +++ synapse/rest/client/capabilities.py | 5 + synapse/server.py | 5 + tests/rest/client/test_account.py | 204 +++++++++++++++++- 11 files changed, 511 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12001.feature create mode 100644 synapse/handlers/account.py diff --git a/changelog.d/12001.feature b/changelog.d/12001.feature new file mode 100644 index 000000000000..dc1153c49ef4 --- /dev/null +++ b/changelog.d/12001.feature @@ -0,0 +1 @@ +Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index bcdeb9ee232d..772eb3501382 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -65,3 +65,6 @@ def read_config(self, config: JsonDict, **kwargs): # experimental support for faster joins over federation (msc2775, msc3706) # requires a target server with msc3706_enabled enabled. self.faster_joins_enabled: bool = experimental.get("faster_joins", False) + + # MSC3720 (Account status endpoint) + self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index c2997997da57..2121e92e3a9f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -56,7 +56,7 @@ from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json from synapse.federation.transport.client import SendJoinResponse -from synapse.types import JsonDict, get_domain_from_id +from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination @@ -1610,6 +1610,64 @@ async def timestamp_to_event( except ValueError as e: raise InvalidResponseError(str(e)) + async def get_account_status( + self, destination: str, user_ids: List[str] + ) -> Tuple[JsonDict, List[str]]: + """Retrieves account statuses for a given list of users on a given remote + homeserver. + + If the request fails for any reason, all user IDs for this destination are marked + as failed. + + Args: + destination: the destination to contact + user_ids: the user ID(s) for which to request account status(es) + + Returns: + The account statuses, as well as the list of user IDs for which it was not + possible to retrieve a status. + """ + try: + res = await self.transport_layer.get_account_status(destination, user_ids) + except Exception: + # If the query failed for any reason, mark all the users as failed. + return {}, user_ids + + statuses = res.get("account_statuses", {}) + failures = res.get("failures", []) + + if not isinstance(statuses, dict) or not isinstance(failures, list): + # Make sure we're not feeding back malformed data back to the caller. + logger.warning( + "Destination %s responded with malformed data to account_status query", + destination, + ) + return {}, user_ids + + for user_id in user_ids: + # Any account whose status is missing is a user we failed to receive the + # status of. + if user_id not in statuses and user_id not in failures: + failures.append(user_id) + + # Filter out any user ID that doesn't belong to the remote server that sent its + # status (or failure). + def filter_user_id(user_id: str) -> bool: + try: + return UserID.from_string(user_id).domain == destination + except SynapseError: + # If the user ID doesn't parse, ignore it. + return False + + filtered_statuses = dict( + # item is a (key, value) tuple, so item[0] is the user ID. + filter(lambda item: filter_user_id(item[0]), statuses.items()) + ) + + filtered_failures = list(filter(filter_user_id, failures)) + + return filtered_statuses, filtered_failures + @attr.s(frozen=True, slots=True, auto_attribs=True) class TimestampToEventResponse: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 7e510e224a46..69998de52068 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -258,8 +258,9 @@ async def make_query( args: dict, retry_on_dns_fail: bool, ignore_backoff: bool = False, + prefix: str = FEDERATION_V1_PREFIX, ) -> JsonDict: - path = _create_v1_path("/query/%s", query_type) + path = _create_path(prefix, "/query/%s", query_type) return await self.client.get_json( destination=destination, @@ -1247,6 +1248,22 @@ async def get_room_hierarchy_unstable( args={"suggested_only": "true" if suggested_only else "false"}, ) + async def get_account_status( + self, destination: str, user_ids: List[str] + ) -> JsonDict: + """ + Args: + destination: The remote server. + user_ids: The user ID(s) for which to request account status(es). + """ + path = _create_path( + FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc3720/account_status" + ) + + return await self.client.post_json( + destination=destination, path=path, data={"user_ids": user_ids} + ) + def _create_path(federation_prefix: str, path: str, *args: str) -> str: """ diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index db4fe2c79803..67a634790712 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -24,6 +24,7 @@ ) from synapse.federation.transport.server.federation import ( FEDERATION_SERVLET_CLASSES, + FederationAccountStatusServlet, FederationTimestampLookupServlet, ) from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES @@ -336,6 +337,13 @@ def register_servlets( ): continue + # Only allow the `/account_status` servlet if msc3720 is enabled + if ( + servletclass == FederationAccountStatusServlet + and not hs.config.experimental.msc3720_enabled + ): + continue + servletclass( hs=hs, authenticator=authenticator, diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index e85a8eda5b87..4d75e58bfc81 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -766,6 +766,40 @@ async def on_GET( return 200, complexity +class FederationAccountStatusServlet(BaseFederationServerServlet): + PATH = "/query/account_status" + PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3720" + + def __init__( + self, + hs: "HomeServer", + authenticator: Authenticator, + ratelimiter: FederationRateLimiter, + server_name: str, + ): + super().__init__(hs, authenticator, ratelimiter, server_name) + self._account_handler = hs.get_account_handler() + + async def on_POST( + self, + origin: str, + content: JsonDict, + query: Mapping[bytes, Sequence[bytes]], + room_id: str, + ) -> Tuple[int, JsonDict]: + if "user_ids" not in content: + raise SynapseError( + 400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM + ) + + statuses, failures = await self._account_handler.get_account_statuses( + content["user_ids"], + allow_remote=False, + ) + + return 200, {"account_statuses": statuses, "failures": failures} + + FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = ( FederationSendServlet, FederationEventServlet, @@ -797,4 +831,5 @@ async def on_GET( FederationRoomHierarchyUnstableServlet, FederationV1SendKnockServlet, FederationMakeKnockServlet, + FederationAccountStatusServlet, ) diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py new file mode 100644 index 000000000000..f8cfe9f6dec7 --- /dev/null +++ b/synapse/handlers/account.py @@ -0,0 +1,144 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Dict, List, Tuple + +from synapse.api.errors import Codes, SynapseError +from synapse.types import JsonDict, UserID + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class AccountHandler: + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + self._is_mine = hs.is_mine + self._federation_client = hs.get_federation_client() + + async def get_account_statuses( + self, + user_ids: List[str], + allow_remote: bool, + ) -> Tuple[JsonDict, List[str]]: + """Get account statuses for a list of user IDs. + + If one or more account(s) belong to remote homeservers, retrieve their status(es) + over federation if allowed. + + Args: + user_ids: The list of accounts to retrieve the status of. + allow_remote: Whether to try to retrieve the status of remote accounts, if + any. + + Returns: + The account statuses as well as the list of users whose statuses could not be + retrieved. + + Raises: + SynapseError if a required parameter is missing or malformed, or if one of + the accounts isn't local to this homeserver and allow_remote is False. + """ + statuses = {} + failures = [] + remote_users: List[UserID] = [] + + for raw_user_id in user_ids: + try: + user_id = UserID.from_string(raw_user_id) + except SynapseError: + raise SynapseError( + 400, + f"Not a valid Matrix user ID: {raw_user_id}", + Codes.INVALID_PARAM, + ) + + if self._is_mine(user_id): + status = await self._get_local_account_status(user_id) + statuses[user_id.to_string()] = status + else: + if not allow_remote: + raise SynapseError( + 400, + f"Not a local user: {raw_user_id}", + Codes.INVALID_PARAM, + ) + + remote_users.append(user_id) + + if allow_remote and len(remote_users) > 0: + remote_statuses, remote_failures = await self._get_remote_account_statuses( + remote_users, + ) + + statuses.update(remote_statuses) + failures += remote_failures + + return statuses, failures + + async def _get_local_account_status(self, user_id: UserID) -> JsonDict: + """Retrieve the status of a local account. + + Args: + user_id: The account to retrieve the status of. + + Returns: + The account's status. + """ + status = {"exists": False} + + userinfo = await self._store.get_userinfo_by_id(user_id.to_string()) + + if userinfo is not None: + status = { + "exists": True, + "deactivated": userinfo.is_deactivated, + } + + return status + + async def _get_remote_account_statuses( + self, remote_users: List[UserID] + ) -> Tuple[JsonDict, List[str]]: + """Send out federation requests to retrieve the statuses of remote accounts. + + Args: + remote_users: The accounts to retrieve the statuses of. + + Returns: + The statuses of the accounts, and a list of accounts for which no status + could be retrieved. + """ + # Group remote users by destination, so we only send one request per remote + # homeserver. + by_destination: Dict[str, List[str]] = {} + for user in remote_users: + if user.domain not in by_destination: + by_destination[user.domain] = [] + + by_destination[user.domain].append(user.to_string()) + + # Retrieve the statuses and failures for remote accounts. + final_statuses: JsonDict = {} + final_failures: List[str] = [] + for destination, users in by_destination.items(): + statuses, failures = await self._federation_client.get_account_status( + destination, + users, + ) + + final_statuses.update(statuses) + final_failures += failures + + return final_statuses, final_failures diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index efe299e69828..5802de5b7c58 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -896,6 +896,36 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return 200, response +class AccountStatusRestServlet(RestServlet): + PATTERNS = client_patterns( + "/org.matrix.msc3720/account_status$", unstable=True, releases=() + ) + + def __init__(self, hs: "HomeServer"): + super().__init__() + self._auth = hs.get_auth() + self._store = hs.get_datastore() + self._is_mine = hs.is_mine + self._federation_client = hs.get_federation_client() + self._account_handler = hs.get_account_handler() + + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + await self._auth.get_user_by_req(request) + + body = parse_json_object_from_request(request) + if "user_ids" not in body: + raise SynapseError( + 400, "Required parameter 'user_ids' is missing", Codes.MISSING_PARAM + ) + + statuses, failures = await self._account_handler.get_account_statuses( + body["user_ids"], + allow_remote=True, + ) + + return 200, {"account_statuses": statuses, "failures": failures} + + def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: EmailPasswordRequestTokenRestServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) @@ -910,3 +940,6 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ThreepidUnbindRestServlet(hs).register(http_server) ThreepidDeleteRestServlet(hs).register(http_server) WhoamiRestServlet(hs).register(http_server) + + if hs.config.experimental.msc3720_enabled: + AccountStatusRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index e05c926b6f06..b80fdd3712fa 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -75,6 +75,11 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if self.config.experimental.msc3440_enabled: response["capabilities"]["io.element.thread"] = {"enabled": True} + if self.config.experimental.msc3720_enabled: + response["capabilities"]["org.matrix.msc3720.account_status"] = { + "enabled": True, + } + return HTTPStatus.OK, response diff --git a/synapse/server.py b/synapse/server.py index 564afdcb9697..4c07f2101566 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -62,6 +62,7 @@ from synapse.federation.transport.client import TransportLayerClient from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer from synapse.groups.groups_server import GroupsServerHandler, GroupsServerWorkerHandler +from synapse.handlers.account import AccountHandler from synapse.handlers.account_data import AccountDataHandler from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.admin import AdminHandler @@ -807,6 +808,10 @@ def get_event_auth_handler(self) -> EventAuthHandler: def get_external_cache(self) -> ExternalCache: return ExternalCache(self) + @cache_in_self + def get_account_handler(self) -> AccountHandler: + return AccountHandler(self) + @cache_in_self def get_outbound_redis_connection(self) -> "RedisProtocol": """ diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 51146c471d94..afaa597f6597 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -1,6 +1,4 @@ -# Copyright 2015-2016 OpenMarket Ltd -# Copyright 2017-2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,16 +15,22 @@ import os import re from email.parser import Parser -from typing import Optional +from typing import Dict, List, Optional +from unittest.mock import Mock import pkg_resources +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import LoginType, Membership from synapse.api.errors import Codes, HttpResponseException from synapse.appservice import ApplicationService +from synapse.rest import admin from synapse.rest.client import account, login, register, room from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request @@ -1040,3 +1044,195 @@ def _add_email(self, request_email, expected_email): threepids = {threepid["address"] for threepid in channel.json_body["threepids"]} self.assertIn(expected_email, threepids) + + +class AccountStatusTestCase(unittest.HomeserverTestCase): + servlets = [ + account.register_servlets, + admin.register_servlets, + login.register_servlets, + ] + + url = "/_matrix/client/unstable/org.matrix.msc3720/account_status" + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["experimental_features"] = {"msc3720_enabled": True} + + return self.setup_test_homeserver(config=config) + + def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + self.requester = self.register_user("requester", "password") + self.requester_tok = self.login("requester", "password") + self.server_name = homeserver.config.server.server_name + + def test_missing_mxid(self): + """Tests that not providing any MXID raises an error.""" + self._test_status( + users=None, + expected_status_code=400, + expected_errcode=Codes.MISSING_PARAM, + ) + + def test_invalid_mxid(self): + """Tests that providing an invalid MXID raises an error.""" + self._test_status( + users=["bad:test"], + expected_status_code=400, + expected_errcode=Codes.INVALID_PARAM, + ) + + def test_local_user_not_exists(self): + """Tests that the account status endpoints correctly reports that a user doesn't + exist. + """ + user = "@unknown:" + self.hs.config.server.server_name + + self._test_status( + users=[user], + expected_statuses={ + user: { + "exists": False, + }, + }, + expected_failures=[], + ) + + def test_local_user_exists(self): + """Tests that the account status endpoint correctly reports that a user doesn't + exist. + """ + user = self.register_user("someuser", "password") + + self._test_status( + users=[user], + expected_statuses={ + user: { + "exists": True, + "deactivated": False, + }, + }, + expected_failures=[], + ) + + def test_local_user_deactivated(self): + """Tests that the account status endpoint correctly reports a deactivated user.""" + user = self.register_user("someuser", "password") + self.get_success( + self.hs.get_datastore().set_user_deactivated_status(user, deactivated=True) + ) + + self._test_status( + users=[user], + expected_statuses={ + user: { + "exists": True, + "deactivated": True, + }, + }, + expected_failures=[], + ) + + def test_mixed_local_and_remote_users(self): + """Tests that if some users are remote the account status endpoint correctly + merges the remote responses with the local result. + """ + # We use 3 users: one doesn't exist but belongs on the local homeserver, one is + # deactivated and belongs on one remote homeserver, and one belongs to another + # remote homeserver that didn't return any result (the federation code should + # mark that user as a failure). + users = [ + "@unknown:" + self.hs.config.server.server_name, + "@deactivated:remote", + "@failed:otherremote", + "@bad:badremote", + ] + + async def post_json(destination, path, data, *a, **kwa): + if destination == "remote": + return { + "account_statuses": { + users[1]: { + "exists": True, + "deactivated": True, + }, + } + } + if destination == "otherremote": + return {} + if destination == "badremote": + # badremote tries to overwrite the status of a user that doesn't belong + # to it (i.e. users[1]) with false data, which Synapse is expected to + # ignore. + return { + "account_statuses": { + users[3]: { + "exists": False, + }, + users[1]: { + "exists": False, + }, + } + } + + # Register a mock that will return the expected result depending on the remote. + self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) + + # Check that we've got the correct response from the client-side endpoint. + self._test_status( + users=users, + expected_statuses={ + users[0]: { + "exists": False, + }, + users[1]: { + "exists": True, + "deactivated": True, + }, + users[3]: { + "exists": False, + }, + }, + expected_failures=[users[2]], + ) + + def _test_status( + self, + users: Optional[List[str]], + expected_status_code: int = 200, + expected_statuses: Optional[Dict[str, Dict[str, bool]]] = None, + expected_failures: Optional[List[str]] = None, + expected_errcode: Optional[str] = None, + ): + """Send a request to the account status endpoint and check that the response + matches with what's expected. + + Args: + users: The account(s) to request the status of, if any. If set to None, no + `user_id` query parameter will be included in the request. + expected_status_code: The expected HTTP status code. + expected_statuses: The expected account statuses, if any. + expected_failures: The expected failures, if any. + expected_errcode: The expected Matrix error code, if any. + """ + content = {} + if users is not None: + content["user_ids"] = users + + channel = self.make_request( + method="POST", + path=self.url, + content=content, + access_token=self.requester_tok, + ) + + self.assertEqual(channel.code, expected_status_code) + + if expected_statuses is not None: + self.assertEqual(channel.json_body["account_statuses"], expected_statuses) + + if expected_failures is not None: + self.assertEqual(channel.json_body["failures"], expected_failures) + + if expected_errcode is not None: + self.assertEqual(channel.json_body["errcode"], expected_errcode) From 6d14b3dabfe38c6ae487d0f663e294056b6cc056 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 22 Feb 2022 15:52:08 +0000 Subject: [PATCH 290/474] Better error message when failing to request from another process (#12060) --- changelog.d/12060.misc | 1 + synapse/replication/http/_base.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12060.misc diff --git a/changelog.d/12060.misc b/changelog.d/12060.misc new file mode 100644 index 000000000000..d771e6a1b393 --- /dev/null +++ b/changelog.d/12060.misc @@ -0,0 +1 @@ +Fix error message when a worker process fails to talk to another worker process. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index bc1d28dd19b1..2e697c74a6bb 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -268,7 +268,9 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: raise e.to_synapse_error() except Exception as e: _outgoing_request_counter.labels(cls.NAME, "ERR").inc() - raise SynapseError(502, "Failed to talk to main process") from e + raise SynapseError( + 502, f"Failed to talk to {instance_name} process" + ) from e _outgoing_request_counter.labels(cls.NAME, 200).inc() return result From e3fe6347be1da930b6a0ed2005b565369800a327 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 22 Feb 2022 11:35:01 -0700 Subject: [PATCH 291/474] Remove excess condition on `knock->leave` check (#11900) --- changelog.d/11900.misc | 1 + synapse/event_auth.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11900.misc diff --git a/changelog.d/11900.misc b/changelog.d/11900.misc new file mode 100644 index 000000000000..edd2852fd49f --- /dev/null +++ b/changelog.d/11900.misc @@ -0,0 +1 @@ +Remove unnecessary condition on knock->leave auth rule check. \ No newline at end of file diff --git a/synapse/event_auth.py b/synapse/event_auth.py index eca00bc97506..621a3efcccec 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -374,9 +374,9 @@ def _is_membership_change_allowed( return # Require the user to be in the room for membership changes other than join/knock. - if Membership.JOIN != membership and ( - RoomVersion.msc2403_knocking and Membership.KNOCK != membership - ): + # Note that the room version check for knocking is done implicitly by `caller_knocked` + # and the ability to set a membership of `knock` in the first place. + if Membership.JOIN != membership and Membership.KNOCK != membership: # If the user has been invited or has knocked, they are allowed to change their # membership event to leave if ( From c1ac2a81350f3b5b86f4c53a585eccd17e3b8e75 Mon Sep 17 00:00:00 2001 From: Nicolas Werner <89468146+nico-famedly@users.noreply.github.com> Date: Wed, 23 Feb 2022 10:06:18 +0000 Subject: [PATCH 292/474] Rename default branch of complement.sh to main (#12063) The complement.sh script relies on the name of the ref matching the name of the unpacked folder. The branch redirect from renaming the default branch breaks that assumption. Signed-off-by: Nicolas Werner --- changelog.d/12063.misc | 1 + scripts-dev/complement.sh | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12063.misc diff --git a/changelog.d/12063.misc b/changelog.d/12063.misc new file mode 100644 index 000000000000..e48c5dd08bd7 --- /dev/null +++ b/changelog.d/12063.misc @@ -0,0 +1 @@ +Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index e08ffedaf33a..0aecb3daf158 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -5,7 +5,7 @@ # It makes a Synapse image which represents the current checkout, # builds a synapse-complement image on top, then runs tests with it. # -# By default the script will fetch the latest Complement master branch and +# By default the script will fetch the latest Complement main branch and # run tests with that. This can be overridden to use a custom Complement # checkout by setting the COMPLEMENT_DIR environment variable to the # filepath of a local Complement checkout or by setting the COMPLEMENT_REF @@ -32,7 +32,7 @@ cd "$(dirname $0)/.." # Check for a user-specified Complement checkout if [[ -z "$COMPLEMENT_DIR" ]]; then - COMPLEMENT_REF=${COMPLEMENT_REF:-master} + COMPLEMENT_REF=${COMPLEMENT_REF:-main} echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..." wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz tar -xzf ${COMPLEMENT_REF}.tar.gz From e24ff8ebe3d4119d377355402245947f7de61c00 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Feb 2022 11:04:02 +0000 Subject: [PATCH 293/474] Remove `HomeServer.get_datastore()` (#12031) The presence of this method was confusing, and mostly present for backwards compatibility. Let's get rid of it. Part of #11733 --- changelog.d/12031.misc | 1 + docs/manhole.md | 2 +- scripts/update_synapse_database | 2 +- synapse/api/auth.py | 2 +- synapse/api/auth_blocking.py | 2 +- synapse/api/filtering.py | 4 +-- synapse/app/_base.py | 2 +- synapse/app/generic_worker.py | 2 +- synapse/app/homeserver.py | 2 +- synapse/app/phone_stats_home.py | 14 +++++--- synapse/appservice/scheduler.py | 2 +- synapse/crypto/keyring.py | 4 +-- synapse/events/builder.py | 2 +- synapse/events/third_party_rules.py | 2 +- synapse/federation/federation_base.py | 2 +- synapse/federation/sender/__init__.py | 2 +- .../sender/per_destination_queue.py | 9 ++--- .../federation/sender/transaction_manager.py | 2 +- synapse/federation/transport/server/_base.py | 2 +- .../federation/transport/server/federation.py | 2 +- synapse/groups/attestations.py | 2 +- synapse/groups/groups_server.py | 2 +- synapse/handlers/account_data.py | 4 +-- synapse/handlers/account_validity.py | 2 +- synapse/handlers/admin.py | 2 +- synapse/handlers/appservice.py | 2 +- synapse/handlers/auth.py | 4 +-- synapse/handlers/cas.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/device.py | 4 +-- synapse/handlers/devicemessage.py | 2 +- synapse/handlers/directory.py | 2 +- synapse/handlers/e2e_keys.py | 4 +-- synapse/handlers/e2e_room_keys.py | 2 +- synapse/handlers/event_auth.py | 2 +- synapse/handlers/events.py | 4 +-- synapse/handlers/federation.py | 2 +- synapse/handlers/federation_event.py | 2 +- synapse/handlers/groups_local.py | 2 +- synapse/handlers/identity.py | 2 +- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/message.py | 4 +-- synapse/handlers/oidc.py | 2 +- synapse/handlers/pagination.py | 2 +- synapse/handlers/presence.py | 4 +-- synapse/handlers/profile.py | 2 +- synapse/handlers/read_marker.py | 2 +- synapse/handlers/receipts.py | 4 +-- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 10 +++--- synapse/handlers/room_batch.py | 2 +- synapse/handlers/room_list.py | 2 +- synapse/handlers/room_member.py | 2 +- synapse/handlers/room_summary.py | 2 +- synapse/handlers/saml.py | 2 +- synapse/handlers/search.py | 2 +- synapse/handlers/set_password.py | 2 +- synapse/handlers/sso.py | 2 +- synapse/handlers/state_deltas.py | 2 +- synapse/handlers/stats.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/handlers/typing.py | 4 +-- synapse/handlers/ui_auth/checkers.py | 4 +-- synapse/handlers/user_directory.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/module_api/__init__.py | 8 +++-- synapse/notifier.py | 2 +- synapse/push/__init__.py | 2 +- synapse/push/bulk_push_rule_evaluator.py | 4 +-- synapse/push/emailpusher.py | 2 +- synapse/push/httppusher.py | 4 +-- synapse/push/mailer.py | 2 +- synapse/push/pusherpool.py | 2 +- synapse/replication/http/devices.py | 2 +- synapse/replication/http/federation.py | 10 +++--- synapse/replication/http/membership.py | 10 +++--- synapse/replication/http/register.py | 4 +-- synapse/replication/http/send_event.py | 2 +- synapse/replication/tcp/client.py | 4 +-- synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/resource.py | 2 +- synapse/replication/tcp/streams/_base.py | 24 ++++++------- synapse/replication/tcp/streams/events.py | 2 +- synapse/rest/admin/__init__.py | 2 +- synapse/rest/admin/background_updates.py | 2 +- synapse/rest/admin/devices.py | 6 ++-- synapse/rest/admin/event_reports.py | 4 +-- synapse/rest/admin/federation.py | 8 ++--- synapse/rest/admin/media.py | 20 +++++------ synapse/rest/admin/registration_tokens.py | 6 ++-- synapse/rest/admin/rooms.py | 16 ++++----- synapse/rest/admin/statistics.py | 2 +- synapse/rest/admin/users.py | 24 ++++++------- synapse/rest/client/account.py | 18 +++++----- synapse/rest/client/account_data.py | 4 +-- synapse/rest/client/directory.py | 6 ++-- synapse/rest/client/events.py | 2 +- synapse/rest/client/groups.py | 8 ++--- synapse/rest/client/initial_sync.py | 2 +- synapse/rest/client/keys.py | 2 +- synapse/rest/client/login.py | 4 +-- synapse/rest/client/notifications.py | 2 +- synapse/rest/client/openid.py | 2 +- synapse/rest/client/push_rule.py | 2 +- synapse/rest/client/pusher.py | 4 ++- synapse/rest/client/register.py | 10 +++--- synapse/rest/client/relations.py | 6 ++-- synapse/rest/client/report_event.py | 2 +- synapse/rest/client/room.py | 12 +++---- synapse/rest/client/room_batch.py | 2 +- synapse/rest/client/shared_rooms.py | 2 +- synapse/rest/client/sync.py | 2 +- synapse/rest/client/tags.py | 2 +- synapse/rest/consent/consent_resource.py | 2 +- synapse/rest/key/v2/remote_key_resource.py | 2 +- synapse/rest/media/v1/media_repository.py | 2 +- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/rest/media/v1/thumbnail_resource.py | 2 +- synapse/rest/media/v1/upload_resource.py | 2 +- synapse/rest/synapse/client/password_reset.py | 2 +- synapse/server.py | 16 +++------ .../server_notices/consent_server_notices.py | 2 +- .../resource_limits_server_notices.py | 2 +- .../server_notices/server_notices_manager.py | 2 +- synapse/state/__init__.py | 2 +- .../databases/main/monthly_active_users.py | 2 +- synapse/streams/events.py | 2 +- tests/api/test_auth.py | 8 +++-- tests/api/test_filtering.py | 2 +- tests/api/test_ratelimiting.py | 18 +++++----- tests/app/test_phone_stats_home.py | 34 ++++++++++--------- tests/crypto/test_keyring.py | 10 +++--- tests/events/test_snapshot.py | 2 +- tests/federation/test_complexity.py | 4 +-- tests/federation/test_federation_catch_up.py | 26 +++++++------- tests/federation/test_federation_sender.py | 6 ++-- tests/federation/transport/test_knocking.py | 2 +- tests/handlers/test_appservice.py | 10 +++--- tests/handlers/test_auth.py | 12 +++---- tests/handlers/test_cas.py | 2 +- tests/handlers/test_deactivate_account.py | 2 +- tests/handlers/test_device.py | 4 +-- tests/handlers/test_directory.py | 6 ++-- tests/handlers/test_e2e_keys.py | 2 +- tests/handlers/test_federation.py | 2 +- tests/handlers/test_message.py | 2 +- tests/handlers/test_oidc.py | 6 ++-- tests/handlers/test_presence.py | 4 +-- tests/handlers/test_profile.py | 4 +-- tests/handlers/test_register.py | 6 ++-- tests/handlers/test_saml.py | 4 +-- tests/handlers/test_stats.py | 2 +- tests/handlers/test_sync.py | 4 +-- tests/handlers/test_typing.py | 2 +- tests/handlers/test_user_directory.py | 2 +- tests/module_api/test_api.py | 2 +- tests/push/test_email.py | 22 ++++++------ tests/push/test_http.py | 22 ++++++------ tests/replication/_base.py | 6 ++-- tests/replication/slave/storage/_base.py | 4 +-- .../tcp/streams/test_account_data.py | 4 +-- tests/replication/tcp/streams/test_events.py | 4 +-- .../replication/tcp/streams/test_receipts.py | 4 +-- .../test_federation_sender_shard.py | 2 +- tests/replication/test_pusher_shard.py | 2 +- .../test_sharded_event_persister.py | 8 ++--- tests/rest/admin/test_background_updates.py | 2 +- tests/rest/admin/test_federation.py | 4 +-- tests/rest/admin/test_media.py | 4 +-- tests/rest/admin/test_registration_tokens.py | 2 +- tests/rest/admin/test_room.py | 6 ++-- tests/rest/admin/test_server_notice.py | 2 +- tests/rest/admin/test_user.py | 20 +++++------ tests/rest/client/test_account.py | 10 +++--- tests/rest/client/test_filter.py | 2 +- tests/rest/client/test_login.py | 4 +-- tests/rest/client/test_profile.py | 2 +- tests/rest/client/test_register.py | 28 ++++++++------- tests/rest/client/test_relations.py | 4 +-- tests/rest/client/test_retention.py | 4 +-- tests/rest/client/test_rooms.py | 10 +++--- tests/rest/client/test_shadow_banned.py | 2 +- tests/rest/client/test_shared_rooms.py | 2 +- tests/rest/client/test_sync.py | 2 +- tests/rest/client/test_typing.py | 2 +- tests/rest/client/test_upgrade_room.py | 2 +- tests/rest/media/v1/test_media_storage.py | 2 +- .../test_resource_limits_server_notices.py | 2 +- .../databases/main/test_deviceinbox.py | 2 +- .../databases/main/test_events_worker.py | 6 ++-- tests/storage/databases/main/test_lock.py | 2 +- tests/storage/databases/main/test_room.py | 2 +- tests/storage/test__base.py | 2 +- tests/storage/test_account_data.py | 2 +- tests/storage/test_appservice.py | 2 +- tests/storage/test_background_update.py | 8 ++--- tests/storage/test_cleanup_extrems.py | 4 +-- tests/storage/test_client_ips.py | 4 +-- tests/storage/test_devices.py | 2 +- tests/storage/test_directory.py | 2 +- tests/storage/test_e2e_room_keys.py | 2 +- tests/storage/test_end_to_end_keys.py | 2 +- tests/storage/test_event_chain.py | 4 +-- tests/storage/test_event_federation.py | 2 +- tests/storage/test_event_push_actions.py | 2 +- tests/storage/test_events.py | 4 +-- tests/storage/test_id_generators.py | 6 ++-- tests/storage/test_keys.py | 4 +-- tests/storage/test_main.py | 2 +- tests/storage/test_monthly_active_users.py | 2 +- tests/storage/test_profile.py | 2 +- tests/storage/test_purge.py | 4 +-- tests/storage/test_redaction.py | 2 +- tests/storage/test_registration.py | 2 +- tests/storage/test_rollback_worker.py | 6 ++-- tests/storage/test_room.py | 4 +-- tests/storage/test_room_search.py | 2 +- tests/storage/test_roommember.py | 4 +-- tests/storage/test_state.py | 2 +- tests/storage/test_stream.py | 2 +- tests/storage/test_transactions.py | 2 +- tests/storage/test_user_directory.py | 4 +-- tests/test_federation.py | 8 +++-- tests/test_mau.py | 2 +- tests/test_state.py | 4 +-- tests/test_utils/event_injection.py | 4 ++- tests/test_visibility.py | 4 ++- tests/unittest.py | 14 ++++---- tests/util/test_retryutils.py | 4 +-- tests/utils.py | 2 +- 230 files changed, 526 insertions(+), 500 deletions(-) create mode 100644 changelog.d/12031.misc diff --git a/changelog.d/12031.misc b/changelog.d/12031.misc new file mode 100644 index 000000000000..d4bedc6b97cb --- /dev/null +++ b/changelog.d/12031.misc @@ -0,0 +1 @@ +Remove legacy `HomeServer.get_datastore()`. diff --git a/docs/manhole.md b/docs/manhole.md index 715ed840f2dd..a82fad0f0fe0 100644 --- a/docs/manhole.md +++ b/docs/manhole.md @@ -94,6 +94,6 @@ As a simple example, retrieving an event from the database: ```pycon >>> from twisted.internet import defer ->>> defer.ensureDeferred(hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')) +>>> defer.ensureDeferred(hs.get_datastores().main.get_event('$1416420717069yeQaw:matrix.org')) > ``` diff --git a/scripts/update_synapse_database b/scripts/update_synapse_database index 5c6453d77ffb..f43676afaaac 100755 --- a/scripts/update_synapse_database +++ b/scripts/update_synapse_database @@ -44,7 +44,7 @@ class MockHomeserver(HomeServer): def run_background_updates(hs): - store = hs.get_datastore() + store = hs.get_datastores().main async def run_background_updates(): await store.db_pool.updates.run_background_updates(sleep=False) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 683241201ca6..01c32417d862 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -60,7 +60,7 @@ class Auth: def __init__(self, hs: "HomeServer"): self.hs = hs self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state = hs.get_state_handler() self._account_validity_handler = hs.get_account_validity_handler() diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index 08fe160c98ec..22348d2d8630 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -28,7 +28,7 @@ class AuthBlocking: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self._hs_disabled = hs.config.server.hs_disabled diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index d087c816db57..fe4cc2e8eec1 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -150,7 +150,7 @@ def matrix_user_id_validator(user_id_str: str) -> UserID: class Filtering: def __init__(self, hs: "HomeServer"): self._hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.DEFAULT_FILTER_COLLECTION = FilterCollection(hs, {}) @@ -294,7 +294,7 @@ def blocks_all_room_timeline(self) -> bool: class Filter: def __init__(self, hs: "HomeServer", filter_json: JsonDict): self._hs = hs - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self.filter_json = filter_json self.limit = filter_json.get("limit", 10) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 452c0c09d526..3e59805baa90 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -448,7 +448,7 @@ def run_sighup(*args: Any, **kwargs: Any) -> None: # It is now safe to start your Synapse. hs.start_listening() - hs.get_datastore().db_pool.start_profiling() + hs.get_datastores().main.db_pool.start_profiling() hs.get_pusherpool().start() # Log when we start the shut down process. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index aadc882bf898..1536a4272333 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -142,7 +142,7 @@ def __init__(self, hs: HomeServer): """ super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.http_client = hs.get_simple_http_client() self.main_uri = hs.config.worker.worker_main_http_uri diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index bfb30003c2d3..b9931001c25e 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -372,7 +372,7 @@ async def start() -> None: await _base.start(hs) - hs.get_datastore().db_pool.updates.start_doing_background_updates() + hs.get_datastores().main.db_pool.updates.start_doing_background_updates() register_start(start) diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 899dba5c3d76..40dbdace8ed1 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -82,7 +82,7 @@ async def phone_stats_home( # General statistics # - store = hs.get_datastore() + store = hs.get_datastores().main stats["homeserver"] = hs.config.server.server_name stats["server_context"] = hs.config.server.server_context @@ -170,18 +170,22 @@ def performance_stats_init() -> None: # Rather than update on per session basis, batch up the requests. # If you increase the loop period, the accuracy of user_daily_visits # table will decrease - clock.looping_call(hs.get_datastore().generate_user_daily_visits, 5 * 60 * 1000) + clock.looping_call( + hs.get_datastores().main.generate_user_daily_visits, 5 * 60 * 1000 + ) # monthly active user limiting functionality - clock.looping_call(hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60) - hs.get_datastore().reap_monthly_active_users() + clock.looping_call( + hs.get_datastores().main.reap_monthly_active_users, 1000 * 60 * 60 + ) + hs.get_datastores().main.reap_monthly_active_users() @wrap_as_background_process("generate_monthly_active_users") async def generate_monthly_active_users() -> None: current_mau_count = 0 current_mau_count_by_service = {} reserved_users: Sized = () - store = hs.get_datastore() + store = hs.get_datastores().main if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: current_mau_count = await store.get_monthly_active_count() current_mau_count_by_service = ( diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index c42fa32fff32..b4e602e880b2 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -92,7 +92,7 @@ class ApplicationServiceScheduler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.as_api = hs.get_application_service_api() self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 72d4a69aac35..93d56c077a57 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -476,7 +476,7 @@ class StoreKeyFetcher(KeyFetcher): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def _fetch_keys( self, keys_to_fetch: List[_FetchKeyRequest] @@ -498,7 +498,7 @@ class BaseV2KeyFetcher(KeyFetcher): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.config = hs.config async def process_v2_response( diff --git a/synapse/events/builder.py b/synapse/events/builder.py index eb39e0ae3291..1ea1bb7d3790 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -189,7 +189,7 @@ def __init__(self, hs: "HomeServer"): self.hostname = hs.hostname self.signing_key = hs.signing_key - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 1bb8ca7145fd..71ec100a7fb1 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -143,7 +143,7 @@ class ThirdPartyEventRules: def __init__(self, hs: "HomeServer"): self.third_party_rules = None - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = [] self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = [] diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index fab6da3c087f..41ac49fdc8bf 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -39,7 +39,7 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.keyring = hs.get_keyring() self.spam_checker = hs.get_spam_checker() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._clock = hs.get_clock() async def _check_sigs_and_hash( diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 720d7bd74d07..6106a486d10a 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -228,7 +228,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.server_name = hs.hostname - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state = hs.get_state_handler() self.clock = hs.get_clock() diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index c3132f731921..c8768f22bc6b 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -76,7 +76,7 @@ def __init__( ): self._server_name = hs.hostname self._clock = hs.get_clock() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._transaction_manager = transaction_manager self._instance_name = hs.get_instance_name() self._federation_shard_config = hs.config.worker.federation_shard_config @@ -381,9 +381,8 @@ async def _catch_up_transmission_loop(self) -> None: ) ) - last_successful_stream_ordering = self._last_successful_stream_ordering - - if last_successful_stream_ordering is None: + _tmp_last_successful_stream_ordering = self._last_successful_stream_ordering + if _tmp_last_successful_stream_ordering is None: # if it's still None, then this means we don't have the information # in our database ­ we haven't successfully sent a PDU to this server # (at least since the introduction of the feature tracking @@ -393,6 +392,8 @@ async def _catch_up_transmission_loop(self) -> None: self._catching_up = False return + last_successful_stream_ordering: int = _tmp_last_successful_stream_ordering + # get at most 50 catchup room/PDUs while True: event_ids = await self._store.get_catch_up_room_event_ids( diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 742ee572558d..0c1cad86ab65 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -53,7 +53,7 @@ class TransactionManager: def __init__(self, hs: "synapse.server.HomeServer"): self._server_name = hs.hostname self.clock = hs.get_clock() # nb must be called this for @measure_func - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._transaction_actions = TransactionActions(self._store) self._transport_layer = hs.get_federation_transport_client() diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index dff2b68359b2..87e99c7ddf5b 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -55,7 +55,7 @@ def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist ) diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 4d75e58bfc81..9cc9a7339d19 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -746,7 +746,7 @@ def __init__( server_name: str, ): super().__init__(hs, authenticator, ratelimiter, server_name) - self._store = self.hs.get_datastore() + self._store = self.hs.get_datastores().main async def on_GET( self, diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index a87896e5386c..ed26d6a6ce72 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -140,7 +140,7 @@ class GroupAttestionRenewer: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.assestations = hs.get_groups_attestation_signing() self.transport_client = hs.get_federation_transport_client() self.is_mine_id = hs.is_mine_id diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 449bbc70042d..4c3a5a6e24d1 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -45,7 +45,7 @@ class GroupsServerWorkerHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room_list_handler = hs.get_room_list_handler() self.auth = hs.get_auth() self.clock = hs.get_clock() diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index bad48713bcb1..177b4f899155 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -30,7 +30,7 @@ class AccountDataHandler: def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._instance_name = hs.get_instance_name() self._notifier = hs.get_notifier() @@ -166,7 +166,7 @@ async def remove_tag_from_room(self, user_id: str, room_id: str, tag: str) -> in class AccountDataEventSource(EventSource[int, JsonDict]): def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def get_current_key(self, direction: str = "f") -> int: return self.store.get_max_account_data_stream_id() diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 87e415df75e8..9d0975f636ab 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -43,7 +43,7 @@ class AccountValidityHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.config = hs.config - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.send_email_handler = self.hs.get_send_email_handler() self.clock = self.hs.get_clock() diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 00ab5e79bf2e..96376963f239 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -29,7 +29,7 @@ class AdminHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index a42c3558e42b..e6461cc3c980 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -47,7 +47,7 @@ class ApplicationServicesHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.is_mine_id = hs.is_mine_id self.appservice_api = hs.get_application_service_api() self.scheduler = hs.get_application_service_scheduler() diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 572f54b1e353..3e29c96a49e5 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -194,7 +194,7 @@ class AuthHandler: SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.clock = hs.get_clock() self.checkers: Dict[str, UserInteractiveAuthChecker] = {} @@ -1183,7 +1183,7 @@ async def validate_login( # No password providers were able to handle this 3pid # Check local store - user_id = await self.hs.get_datastore().get_user_id_by_threepid( + user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( medium, address ) if not user_id: diff --git a/synapse/handlers/cas.py b/synapse/handlers/cas.py index 5d8f6c50a949..7163af8004e3 100644 --- a/synapse/handlers/cas.py +++ b/synapse/handlers/cas.py @@ -61,7 +61,7 @@ class CasHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self._hostname = hs.hostname - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._auth_handler = hs.get_auth_handler() self._registration_handler = hs.get_registration_handler() diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 7a13d76a687f..e4eae0305666 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -29,7 +29,7 @@ class DeactivateAccountHandler: """Handler which deals with deactivating user accounts.""" def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.hs = hs self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 36c05f8363b9..934b5bd7349b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -63,7 +63,7 @@ class DeviceWorkerHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.state = hs.get_state_handler() self.state_store = hs.get_storage().state @@ -628,7 +628,7 @@ class DeviceListUpdater: "Handles incoming device list updates from federation and updates the DB" def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.federation = hs.get_federation_client() self.clock = hs.get_clock() self.device_handler = device_handler diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index b582266af908..4cb725d027c7 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -43,7 +43,7 @@ def __init__(self, hs: "HomeServer"): Args: hs: server """ - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.is_mine = hs.is_mine diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 082f521791cd..b7064c6624b7 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -44,7 +44,7 @@ def __init__(self, hs: "HomeServer"): self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.config = hs.config self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.require_membership = hs.config.server.require_membership_for_aliases diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index d4dfddf63fb4..d96456cd406c 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -47,7 +47,7 @@ class E2eKeysHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.federation = hs.get_federation_client() self.device_handler = hs.get_device_handler() self.is_mine = hs.is_mine @@ -1335,7 +1335,7 @@ class SigningKeyEduUpdater: """Handles incoming signing key updates from federation and updates the DB""" def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.federation = hs.get_federation_client() self.clock = hs.get_clock() self.e2e_keys_handler = e2e_keys_handler diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 12614b2c5d5a..52e44a2d426f 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -45,7 +45,7 @@ class E2eRoomKeysHandler: """ def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main # Used to lock whenever a client is uploading key data. This prevents collisions # between clients trying to upload the details of a new session, given all diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 365063ebdf77..d441ebb0ab3d 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -43,7 +43,7 @@ class EventAuthHandler: def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._server_name = hs.hostname async def check_auth_rules_from_context( diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index bac5de052609..97e75e60c32e 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -33,7 +33,7 @@ class EventStreamHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.hs = hs @@ -134,7 +134,7 @@ async def get_stream( class EventHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() async def get_event( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e9ac920bcc5e..c055c26ecaa7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -107,7 +107,7 @@ class FederationHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state self.federation_client = hs.get_federation_client() diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 7683246bef90..09d0de1ead82 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -95,7 +95,7 @@ class FederationEventHandler: """ def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._storage = hs.get_storage() self._state_store = self._storage.state diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 9e270d461b2c..e7a399787beb 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -63,7 +63,7 @@ async def f( class GroupsLocalWorkerHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room_list_handler = hs.get_room_list_handler() self.groups_server_handler = hs.get_groups_server_handler() self.transport_client = hs.get_federation_transport_client() diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index c83eaea359e7..57c9fdfe629a 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -49,7 +49,7 @@ class IdentityHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main # An HTTP client for contacting trusted URLs. self.http_client = SimpleHttpClient(hs) # An HTTP client for contacting identity servers specified by clients. diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 346a06ff49b7..344f20f37cce 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -46,7 +46,7 @@ class InitialSyncHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.state_handler = hs.get_state_handler() self.hs = hs diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 4d0da842873f..a9c964cd7533 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -75,7 +75,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.state = hs.get_state_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state self._event_serializer = hs.get_event_client_serializer() @@ -397,7 +397,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self._event_auth_handler = hs.get_event_auth_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state = hs.get_state_handler() self.clock = hs.get_clock() diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 8f71d975e9a4..593a2aac6691 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -273,7 +273,7 @@ def __init__( token_generator: "OidcSessionTokenGenerator", provider: OidcProviderConfig, ): - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._token_generator = token_generator diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 973f262964c1..5c01a426ff32 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -127,7 +127,7 @@ class PaginationHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state self.clock = hs.get_clock() diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index b223b72623e8..c155098beeb8 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -133,7 +133,7 @@ class BasePresenceHandler(abc.ABC): def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.presence_router = hs.get_presence_router() self.state = hs.get_state_handler() self.is_mine_id = hs.is_mine_id @@ -1541,7 +1541,7 @@ def __init__(self, hs: "HomeServer"): self.get_presence_handler = hs.get_presence_handler self.get_presence_router = hs.get_presence_router self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def get_new_events( self, diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 36e3ad2ba971..dd27f0accc70 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -54,7 +54,7 @@ class ProfileHandler: PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.hs = hs diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index 58593e570e1d..bad1acc6344d 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -26,7 +26,7 @@ class ReadMarkerHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.config.server.server_name - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.account_data_handler = hs.get_account_data_handler() self.read_marker_linearizer = Linearizer(name="read_marker") diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 5cb1ff749d92..b4132c353ae2 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -29,7 +29,7 @@ class ReceiptsHandler: def __init__(self, hs: "HomeServer"): self.notifier = hs.get_notifier() self.server_name = hs.config.server.server_name - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.event_auth_handler = hs.get_event_auth_handler() self.hs = hs @@ -163,7 +163,7 @@ async def received_client_receipt( class ReceiptEventSource(EventSource[int, JsonDict]): def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.config = hs.config @staticmethod diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 80320d2c070b..05bb1e0225c3 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -86,7 +86,7 @@ class LoginDict(TypedDict): class RegistrationHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.hs = hs self.auth = hs.get_auth() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a990727fc580..7b965b4b962a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -105,7 +105,7 @@ class EventContext: class RoomCreationHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.clock = hs.get_clock() self.hs = hs @@ -1115,7 +1115,7 @@ class RoomContextHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state @@ -1246,7 +1246,7 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: class TimestampLookupHandler: def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.federation_client = hs.get_federation_client() @@ -1386,7 +1386,7 @@ async def get_event_for_timestamp( class RoomEventSource(EventSource[RoomStreamToken, EventBase]): def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def get_new_events( self, @@ -1476,7 +1476,7 @@ def __init__(self, hs: "HomeServer"): self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() self.event_creation_handler = hs.get_event_creation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def shutdown_room( self, diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py index f8137ec04cc5..abbf7b7b27a8 100644 --- a/synapse/handlers/room_batch.py +++ b/synapse/handlers/room_batch.py @@ -16,7 +16,7 @@ class RoomBatchHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state_store = hs.get_storage().state self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 1a33211a1fe1..f3577b5d5aec 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -49,7 +49,7 @@ class RoomListHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.hs = hs self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search self.response_cache: ResponseCache[ diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index b2adc0f48be9..a582837cf0aa 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -66,7 +66,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.state_handler = hs.get_state_handler() self.config = hs.config diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 4844b69a0345..2e61d1cbe92d 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -90,7 +90,7 @@ class RoomSummaryHandler: def __init__(self, hs: "HomeServer"): self._event_auth_handler = hs.get_event_auth_handler() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._event_serializer = hs.get_event_client_serializer() self._server_name = hs.hostname self._federation_client = hs.get_federation_client() diff --git a/synapse/handlers/saml.py b/synapse/handlers/saml.py index 727d75a50c6c..9602f0d0bb48 100644 --- a/synapse/handlers/saml.py +++ b/synapse/handlers/saml.py @@ -52,7 +52,7 @@ class Saml2SessionData: class SamlHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.server_name = hs.hostname self._saml_client = Saml2Client(hs.config.saml2.saml2_sp_config) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0e0e58de02f0..aa16e417eb30 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -49,7 +49,7 @@ class _SearchResult: class SearchHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.clock = hs.get_clock() self.hs = hs diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index 706ad72761a3..73861bbd4085 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -27,7 +27,7 @@ class SetPasswordHandler: """Handler which deals with changing user account passwords""" def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 0bb8b0929e7e..ff5b5169cac1 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -180,7 +180,7 @@ class SsoHandler: def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._server_name = hs.hostname self._registration_handler = hs.get_registration_handler() self._auth_handler = hs.get_auth_handler() diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index d30ba2b7248c..2d197282edda 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -30,7 +30,7 @@ class MatchChange(Enum): class StateDeltasHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def _get_key_change( self, diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 29e41a4c796c..436cd971cedd 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -39,7 +39,7 @@ class StatsHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state = hs.get_state_handler() self.server_name = hs.hostname self.clock = hs.get_clock() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index e6050cbce6c8..98eaad33184e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -266,7 +266,7 @@ def __bool__(self) -> bool: class SyncHandler: def __init__(self, hs: "HomeServer"): self.hs_config = hs.config - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.presence_handler = hs.get_presence_handler() self.event_sources = hs.get_event_sources() diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index e4bed1c93758..843c68eb0fdf 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -57,7 +57,7 @@ class FollowerTypingHandler: """ def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.server_name = hs.config.server.server_name self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id @@ -446,7 +446,7 @@ def process_replication_rows( class TypingNotificationEventSource(EventSource[int, JsonDict]): def __init__(self, hs: "HomeServer"): - self._main_store = hs.get_datastore() + self._main_store = hs.get_datastores().main self.clock = hs.get_clock() # We can't call get_typing_handler here because there's a cycle: # diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 184730ebe8a4..014754a6308a 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -139,7 +139,7 @@ async def check_auth(self, authdict: dict, clientip: str) -> Any: class _BaseThreepidAuthChecker: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def _check_threepid(self, medium: str, authdict: dict) -> dict: if "threepid_creds" not in authdict: @@ -255,7 +255,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs self._enabled = bool(hs.config.registration.registration_requires_token) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def is_enabled(self) -> bool: return self._enabled diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 1565e034cb25..d27ed2be6a99 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -55,7 +55,7 @@ class UserDirectoryHandler(StateDeltasHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.server_name = hs.hostname self.clock = hs.get_clock() self.notifier = hs.get_notifier() diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index e7656fbb9f7c..40bf1e06d602 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -351,7 +351,7 @@ def __init__(self, hs: "HomeServer", tls_client_options_factory): ) self.clock = hs.get_clock() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self.version_string_bytes = hs.version_string.encode("ascii") self.default_timeout = 60 diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 07020bfb8d5a..902916d80056 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -172,7 +172,9 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: # TODO: Fix this type hint once the types for the data stores have been ironed # out. - self._store: Union[DataStore, "GenericWorkerSlavedStore"] = hs.get_datastore() + self._store: Union[ + DataStore, "GenericWorkerSlavedStore" + ] = hs.get_datastores().main self._auth = hs.get_auth() self._auth_handler = auth_handler self._server_name = hs.hostname @@ -926,7 +928,7 @@ async def update_room_membership( ) # Try to retrieve the resulting event. - event = await self._hs.get_datastore().get_event(event_id) + event = await self._hs.get_datastores().main.get_event(event_id) # update_membership is supposed to always return after the event has been # successfully persisted. @@ -1270,7 +1272,7 @@ class PublicRoomListManager: """ def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() + self._store = hs.get_datastores().main async def room_is_in_public_room_list(self, room_id: str) -> bool: """Checks whether a room is in the public room list. diff --git a/synapse/notifier.py b/synapse/notifier.py index 753dd6b6a5f1..16d15a1f3328 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -222,7 +222,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.storage = hs.get_storage() self.event_sources = hs.get_event_sources() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.pending_new_room_events: List[_PendingRoomEventEntry] = [] # Called when there are new things to stream over replication diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 5176a1c1861d..a1b771109848 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -68,7 +68,7 @@ class ThrottleParams: class Pusher(metaclass=abc.ABCMeta): def __init__(self, hs: "HomeServer", pusher_config: PusherConfig): self.hs = hs - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.clock = self.hs.get_clock() self.pusher_id = pusher_config.id diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index bee660893be2..fecf86034eb9 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -103,7 +103,7 @@ class BulkPushRuleEvaluator: def __init__(self, hs: "HomeServer"): self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._event_auth_handler = hs.get_event_auth_handler() # Used by `RulesForRoom` to ensure only one thing mutates the cache at a @@ -366,7 +366,7 @@ def __init__( """ self.room_id = room_id self.is_mine_id = hs.is_mine_id - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room_push_rule_cache_metrics = room_push_rule_cache_metrics # Used to ensure only one thing mutates the cache at a time. Keyed off diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 39bb2acae4b5..1710dd51b9d4 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -66,7 +66,7 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig, mailer: Mailer super().__init__(hs, pusher_config) self.mailer = mailer - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.email = pusher_config.pushkey self.timed_call: Optional[IDelayedCall] = None self.throttle_params: Dict[str, ThrottleParams] = {} diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 52c7ff3572e7..5818344520f5 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -133,7 +133,7 @@ async def _update_badge(self) -> None: # XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems # to be largely redundant. perhaps we can remove it. badge = await push_tools.get_badge_count( - self.hs.get_datastore(), + self.hs.get_datastores().main, self.user_id, group_by_room=self._group_unread_count_by_room, ) @@ -283,7 +283,7 @@ async def _process_one(self, push_action: HttpPushAction) -> bool: tweaks = push_rule_evaluator.tweaks_for_actions(push_action.actions) badge = await push_tools.get_badge_count( - self.hs.get_datastore(), + self.hs.get_datastores().main, self.user_id, group_by_room=self._group_unread_count_by_room, ) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 3df8452eecf7..649a4f49d024 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -112,7 +112,7 @@ def __init__( self.template_text = template_text self.send_email_handler = hs.get_send_email_handler() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.state_store = self.hs.get_storage().state self.macaroon_gen = self.hs.get_macaroon_generator() self.state_handler = self.hs.get_state_handler() diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 7912311d2401..d0cc657b4464 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -59,7 +59,7 @@ class PusherPool: def __init__(self, hs: "HomeServer"): self.hs = hs self.pusher_factory = PusherFactory(hs) - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.clock = self.hs.get_clock() # We shard the handling of push notifications by user ID. diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index f2f40129fe68..3d63645726b9 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -63,7 +63,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.device_list_updater = hs.get_device_handler().device_list_updater - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() @staticmethod diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index d529c8a19fa2..3e7300b4a148 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -68,7 +68,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.clock = hs.get_clock() self.federation_event_handler = hs.get_federation_event_handler() @@ -167,7 +167,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.registry = hs.get_federation_registry() @@ -214,7 +214,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.registry = hs.get_federation_registry() @@ -260,7 +260,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main @staticmethod async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override] @@ -297,7 +297,7 @@ class ReplicationStoreRoomOnOutlierMembershipRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main @staticmethod async def _serialize_payload(room_id: str, room_version: RoomVersion) -> JsonDict: # type: ignore[override] diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 0145858e4719..663bff573848 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -50,7 +50,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation_handler = hs.get_federation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() @staticmethod @@ -119,7 +119,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation_handler = hs.get_federation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() @staticmethod @@ -188,7 +188,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.member_handler = hs.get_room_member_handler() @@ -258,7 +258,7 @@ class ReplicationRemoteRescindKnockRestServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.member_handler = hs.get_room_member_handler() @@ -325,7 +325,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.registeration_handler = hs.get_registration_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.distributor = hs.get_distributor() diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index c7f751b70d0d..6c8f8388fd60 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -36,7 +36,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.registration_handler = hs.get_registration_handler() @staticmethod @@ -112,7 +112,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): def __init__(self, hs: "HomeServer"): super().__init__(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.registration_handler = hs.get_registration_handler() @staticmethod diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 33e98daf8aba..ce781768364b 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -69,7 +69,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.clock = hs.get_clock() diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index d59ce7ccf967..1b8479b0b4ec 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -111,7 +111,7 @@ class ReplicationDataHandler: """ def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self._reactor = hs.get_reactor() self._clock = hs.get_clock() @@ -340,7 +340,7 @@ class FederationSenderHandler: def __init__(self, hs: "HomeServer"): assert hs.should_send_federation() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._is_mine_id = hs.is_mine_id self._hs = hs diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 17e157239336..0d2013a3cfc5 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -95,7 +95,7 @@ class ReplicationCommandHandler: def __init__(self, hs: "HomeServer"): self._replication_data_handler = hs.get_replication_data_handler() self._presence_handler = hs.get_presence_handler() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._notifier = hs.get_notifier() self._clock = hs.get_clock() self._instance_id = hs.get_instance_id() diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ecd6190f5b9b..494e42a2be8f 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -72,7 +72,7 @@ class ReplicationStreamer: """ def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.notifier = hs.get_notifier() self._instance_name = hs.get_instance_name() diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 914b9eae8492..23d631a76944 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -239,7 +239,7 @@ class BackfillStreamRow: ROW_TYPE = BackfillStreamRow def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), self._current_token, @@ -267,7 +267,7 @@ class PresenceStreamRow: ROW_TYPE = PresenceStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main if hs.get_instance_name() in hs.config.worker.writers.presence: # on the presence writer, query the presence handler @@ -355,7 +355,7 @@ class ReceiptsStreamRow: ROW_TYPE = ReceiptsStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(store.get_max_receipt_stream_id), @@ -374,7 +374,7 @@ class PushRulesStreamRow: ROW_TYPE = PushRulesStreamRow def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), @@ -401,7 +401,7 @@ class PushersStreamRow: ROW_TYPE = PushersStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), @@ -434,7 +434,7 @@ class CachesStreamRow: ROW_TYPE = CachesStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), store.get_cache_stream_token_for_writer, @@ -455,7 +455,7 @@ class DeviceListsStreamRow: ROW_TYPE = DeviceListsStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(store.get_device_stream_token), @@ -474,7 +474,7 @@ class ToDeviceStreamRow: ROW_TYPE = ToDeviceStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(store.get_to_device_stream_token), @@ -495,7 +495,7 @@ class TagAccountDataStreamRow: ROW_TYPE = TagAccountDataStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(store.get_max_account_data_stream_id), @@ -516,7 +516,7 @@ class AccountDataStreamRow: ROW_TYPE = AccountDataStreamRow def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(self.store.get_max_account_data_stream_id), @@ -585,7 +585,7 @@ class GroupsStreamRow: ROW_TYPE = GroupsStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(store.get_group_stream_token), @@ -604,7 +604,7 @@ class UserSignatureStreamRow: ROW_TYPE = UserSignatureStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastore() + store = hs.get_datastores().main super().__init__( hs.get_instance_name(), current_token_without_instance(store.get_device_stream_token), diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 50c4a5ba03a1..26f4fa7cfd16 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -124,7 +124,7 @@ class EventsStream(Stream): NAME = "events" def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() + self._store = hs.get_datastores().main super().__init__( hs.get_instance_name(), self._store._stream_id_gen.get_current_token_for_writer, diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index ba0d989d81c3..6de302f81352 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -116,7 +116,7 @@ class PurgeHistoryRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.pagination_handler = hs.get_pagination_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( diff --git a/synapse/rest/admin/background_updates.py b/synapse/rest/admin/background_updates.py index e9bce22a347b..93a78db81186 100644 --- a/synapse/rest/admin/background_updates.py +++ b/synapse/rest/admin/background_updates.py @@ -112,7 +112,7 @@ class BackgroundUpdateStartJobRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index d9905ff560cb..cef46ba0ddf2 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -44,7 +44,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.is_mine = hs.is_mine async def on_GET( @@ -113,7 +113,7 @@ class DevicesRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.is_mine = hs.is_mine async def on_GET( @@ -144,7 +144,7 @@ class DeleteDevicesRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.is_mine = hs.is_mine async def on_POST( diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index 38477f8eadeb..6d634eef7081 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -53,7 +53,7 @@ class EventReportsRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -115,7 +115,7 @@ class EventReportDetailRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, report_id: str diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index d162e0081ea1..023ed92144b4 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -48,7 +48,7 @@ class ListDestinationsRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) @@ -105,7 +105,7 @@ class DestinationRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, destination: str @@ -165,7 +165,7 @@ class DestinationMembershipRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, destination: str @@ -221,7 +221,7 @@ class DestinationResetConnectionRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._authenticator = Authenticator(hs) async def on_POST( diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 299f5c9eb0f2..8ca57bdb2830 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -47,7 +47,7 @@ class QuarantineMediaInRoom(RestServlet): ] def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( @@ -74,7 +74,7 @@ class QuarantineMediaByUser(RestServlet): PATTERNS = admin_patterns("/user/(?P[^/]*)/media/quarantine$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( @@ -103,7 +103,7 @@ class QuarantineMediaByID(RestServlet): ) def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( @@ -132,7 +132,7 @@ class UnquarantineMediaByID(RestServlet): ) def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( @@ -156,7 +156,7 @@ class ProtectMediaByID(RestServlet): PATTERNS = admin_patterns("/media/protect/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( @@ -178,7 +178,7 @@ class UnprotectMediaByID(RestServlet): PATTERNS = admin_patterns("/media/unprotect/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_POST( @@ -200,7 +200,7 @@ class ListMediaInRoom(RestServlet): PATTERNS = admin_patterns("/room/(?P[^/]*)/media$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_GET( @@ -251,7 +251,7 @@ class DeleteMediaByID(RestServlet): PATTERNS = admin_patterns("/media/(?P[^/]*)/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.server_name = hs.hostname self.media_repository = hs.get_media_repository() @@ -283,7 +283,7 @@ class DeleteMediaByDateSize(RestServlet): PATTERNS = admin_patterns("/media/(?P[^/]*)/delete$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.server_name = hs.hostname self.media_repository = hs.get_media_repository() @@ -352,7 +352,7 @@ class UserMediaRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.is_mine = hs.is_mine self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.media_repository = hs.get_media_repository() async def on_GET( diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index 04948b640834..af606e925216 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -71,7 +71,7 @@ class ListRegistrationTokensRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -109,7 +109,7 @@ class NewRegistrationTokenRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() # A string of all the characters allowed to be in a registration_token self.allowed_chars = string.ascii_letters + string.digits + "._~-" @@ -260,7 +260,7 @@ class RegistrationTokenRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDict]: """Retrieve a registration token.""" diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 5b706efbcff0..f4736a3dad83 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -65,7 +65,7 @@ class RoomRestV2Servlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._pagination_handler = hs.get_pagination_handler() async def on_DELETE( @@ -188,7 +188,7 @@ class ListRoomRestServlet(RestServlet): PATTERNS = admin_patterns("/rooms$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() @@ -278,7 +278,7 @@ class RoomRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room_shutdown_handler = hs.get_room_shutdown_handler() self.pagination_handler = hs.get_pagination_handler() @@ -382,7 +382,7 @@ class RoomMembersRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, room_id: str @@ -408,7 +408,7 @@ class RoomStateRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() @@ -525,7 +525,7 @@ class MakeRoomAdminRestServlet(ResolveRoomIdMixin, RestServlet): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.event_creation_handler = hs.get_event_creation_handler() self.state_handler = hs.get_state_handler() self.is_mine_id = hs.is_mine_id @@ -670,7 +670,7 @@ class ForwardExtremitiesRestServlet(ResolveRoomIdMixin, RestServlet): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_DELETE( self, request: SynapseRequest, room_identifier: str @@ -781,7 +781,7 @@ class BlockRoomRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, room_id: str diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index 7a6546372eef..3b142b840206 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -38,7 +38,7 @@ class UserMediaStatisticsRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index c2617ee30c48..8e29ada8a07c 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -66,7 +66,7 @@ class UsersRestServletV2(RestServlet): """ def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() @@ -156,7 +156,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.admin_handler = hs.get_admin_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.set_password_handler = hs.get_set_password_handler() @@ -588,7 +588,7 @@ def __init__(self, hs: "HomeServer"): self._deactivate_account_handler = hs.get_deactivate_account_handler() self.auth = hs.get_auth() self.is_mine = hs.is_mine - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_POST( self, request: SynapseRequest, target_user_id: str @@ -674,7 +674,7 @@ class ResetPasswordRestServlet(RestServlet): PATTERNS = admin_patterns("/reset_password/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() self._set_password_handler = hs.get_set_password_handler() @@ -717,7 +717,7 @@ class SearchUsersRestServlet(RestServlet): PATTERNS = admin_patterns("/search_users/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.is_mine = hs.is_mine @@ -775,7 +775,7 @@ class UserAdminServlet(RestServlet): PATTERNS = admin_patterns("/users/(?P[^/]*)/admin$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.is_mine = hs.is_mine @@ -835,7 +835,7 @@ class UserMembershipRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.is_mine = hs.is_mine self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, user_id: str @@ -864,7 +864,7 @@ class PushersRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.is_mine = hs.is_mine - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_GET( @@ -905,7 +905,7 @@ class UserTokenRestServlet(RestServlet): PATTERNS = admin_patterns("/users/(?P[^/]*)/login$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() self.is_mine_id = hs.is_mine_id @@ -974,7 +974,7 @@ class ShadowBanRestServlet(RestServlet): PATTERNS = admin_patterns("/users/(?P[^/]*)/shadow_ban$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.is_mine_id = hs.is_mine_id @@ -1026,7 +1026,7 @@ class RateLimitRestServlet(RestServlet): PATTERNS = admin_patterns("/users/(?P[^/]*)/override_ratelimit$") def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.is_mine_id = hs.is_mine_id @@ -1129,7 +1129,7 @@ class AccountDataRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._is_mine_id = hs.is_mine_id async def on_GET( diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 5802de5b7c58..4b217882e812 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -60,7 +60,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs - self.datastore = hs.get_datastore() + self.datastore = hs.get_datastores().main self.config = hs.config self.identity_handler = hs.get_identity_handler() @@ -114,7 +114,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # This avoids a potential account hijack by requesting a password reset to # an email address which is controlled by the attacker but which, after # canonicalisation, matches the one in our database. - existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid( + existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( "email", email ) @@ -168,7 +168,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() - self.datastore = self.hs.get_datastore() + self.datastore = self.hs.get_datastores().main self.password_policy_handler = hs.get_password_policy_handler() self._set_password_handler = hs.get_set_password_handler() @@ -347,7 +347,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.config = hs.config self.identity_handler = hs.get_identity_handler() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self.mailer = Mailer( @@ -450,7 +450,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.hs = hs super().__init__() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.identity_handler = hs.get_identity_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: @@ -533,7 +533,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.config = hs.config self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self._failure_email_template = ( self.config.email.email_add_threepid_template_failure_html @@ -600,7 +600,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.config = hs.config self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.identity_handler = hs.get_identity_handler() async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: @@ -634,7 +634,7 @@ def __init__(self, hs: "HomeServer"): self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() - self.datastore = self.hs.get_datastore() + self.datastore = self.hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) @@ -768,7 +768,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.identity_handler = hs.get_identity_handler() self.auth = hs.get_auth() - self.datastore = self.hs.get_datastore() + self.datastore = self.hs.get_datastores().main async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: """Unbind the given 3pid from a specific identity server, or identity servers that are diff --git a/synapse/rest/client/account_data.py b/synapse/rest/client/account_data.py index 58b8adbd3245..bfe985939beb 100644 --- a/synapse/rest/client/account_data.py +++ b/synapse/rest/client/account_data.py @@ -42,7 +42,7 @@ class AccountDataServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = hs.get_account_data_handler() async def on_PUT( @@ -90,7 +90,7 @@ class RoomAccountDataServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = hs.get_account_data_handler() async def on_PUT( diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index ee247e3d1e0d..e181a0dde2f8 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -47,7 +47,7 @@ class ClientDirectoryServer(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() @@ -129,7 +129,7 @@ class ClientDirectoryListServer(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() @@ -173,7 +173,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.directory_handler = hs.get_directory_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 672c821061ff..916f5230f1b1 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -39,7 +39,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.event_stream_handler = hs.get_event_stream_handler() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) diff --git a/synapse/rest/client/groups.py b/synapse/rest/client/groups.py index a7e9aa3e9bbf..7e1149c7f433 100644 --- a/synapse/rest/client/groups.py +++ b/synapse/rest/client/groups.py @@ -705,7 +705,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.is_mine_id = hs.is_mine_id @_validate_group_id @@ -854,7 +854,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main @_validate_group_id async def on_PUT( @@ -879,7 +879,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.groups_handler = hs.get_groups_local_handler() async def on_GET( @@ -901,7 +901,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.groups_handler = hs.get_groups_local_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: diff --git a/synapse/rest/client/initial_sync.py b/synapse/rest/client/initial_sync.py index 49b1037b28d4..cfadcb8e50f5 100644 --- a/synapse/rest/client/initial_sync.py +++ b/synapse/rest/client/initial_sync.py @@ -33,7 +33,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 730c18f08f78..ce806e3c11be 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -198,7 +198,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index f9994658c4a3..c9d44c5964ce 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -104,13 +104,13 @@ def __init__(self, hs: "HomeServer"): self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter( - store=hs.get_datastore(), + store=hs.get_datastores().main, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second, burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count, ) self._account_ratelimiter = Ratelimiter( - store=hs.get_datastore(), + store=hs.get_datastores().main, clock=hs.get_clock(), rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second, burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count, diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 8e427a96a320..20377a9ac628 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -35,7 +35,7 @@ class NotificationsServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() diff --git a/synapse/rest/client/openid.py b/synapse/rest/client/openid.py index add56d699884..820682ec4265 100644 --- a/synapse/rest/client/openid.py +++ b/synapse/rest/client/openid.py @@ -67,7 +67,7 @@ class IdTokenServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.server_name = hs.config.server.server_name diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 8fe75bd750bd..a93f6fd5e0a8 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -57,7 +57,7 @@ class PushRuleRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self._is_worker = hs.config.worker.worker_app is not None diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py index 98604a93887f..d6487c31dd48 100644 --- a/synapse/rest/client/pusher.py +++ b/synapse/rest/client/pusher.py @@ -46,7 +46,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) user = requester.user - pushers = await self.hs.get_datastore().get_pushers_by_user_id(user.to_string()) + pushers = await self.hs.get_datastores().main.get_pushers_by_user_id( + user.to_string() + ) filtered_pushers = [p.as_dict() for p in pushers] diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index b8a5135e02e9..70baf50fa473 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -123,7 +123,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: request, "email", email ) - existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid( + existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( "email", email ) @@ -203,7 +203,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: request, "msisdn", msisdn ) - existing_user_id = await self.hs.get_datastore().get_user_id_by_threepid( + existing_user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( "msisdn", msisdn ) @@ -258,7 +258,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.config = hs.config self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main if self.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL: self._failure_email_template = ( @@ -385,7 +385,7 @@ class RegistrationTokenValidityRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.ratelimiter = Ratelimiter( store=self.store, clock=hs.get_clock(), @@ -415,7 +415,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth_handler = hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() self.identity_handler = hs.get_identity_handler() diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 2cab83c4e6e7..487ea38b55cc 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -85,7 +85,7 @@ class RelationPaginationServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() self.event_handler = hs.get_event_handler() @@ -190,7 +190,7 @@ class RelationAggregationPaginationServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.event_handler = hs.get_event_handler() async def on_GET( @@ -282,7 +282,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() self.event_handler = hs.get_event_handler() diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py index d4a4adb50c59..6e962a45321e 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/report_event.py @@ -38,7 +38,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_POST( self, request: SynapseRequest, room_id: str, event_id: str diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 90355e44b25e..5ccfe5a92f06 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -477,7 +477,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, room_id: str @@ -553,7 +553,7 @@ def __init__(self, hs: "HomeServer"): self._hs = hs self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, room_id: str @@ -621,7 +621,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, room_id: str @@ -642,7 +642,7 @@ class RoomEventServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.clock = hs.get_clock() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() self.auth = hs.get_auth() @@ -1027,7 +1027,7 @@ class JoinedRoomsRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth = hs.get_auth() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: @@ -1116,7 +1116,7 @@ class TimestampLookupRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self._auth = hs.get_auth() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self.timestamp_lookup_handler = hs.get_timestamp_lookup_handler() async def on_GET( diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index 4b6be38327e0..0048973e5961 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -75,7 +75,7 @@ class RoomBatchSendEventRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() self.room_batch_handler = hs.get_room_batch_handler() diff --git a/synapse/rest/client/shared_rooms.py b/synapse/rest/client/shared_rooms.py index 09a46737de06..e669fa78902d 100644 --- a/synapse/rest/client/shared_rooms.py +++ b/synapse/rest/client/shared_rooms.py @@ -41,7 +41,7 @@ class UserSharedRoomsServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_directory_active = hs.config.server.update_user_directory async def on_GET( diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index f9615da52583..f3018ff69077 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -103,7 +103,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.sync_handler = hs.get_sync_handler() self.clock = hs.get_clock() self.filtering = hs.get_filtering() diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py index c88cb9367c5f..ca638755c7ee 100644 --- a/synapse/rest/client/tags.py +++ b/synapse/rest/client/tags.py @@ -39,7 +39,7 @@ class TagListServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, user_id: str, room_id: str diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 3d2afacc50d9..25f9ea285bca 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -78,7 +78,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.registration_handler = hs.get_registration_handler() # this is required by the request_handler wrapper diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 3923ba8439b0..3525d6ae54c6 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -94,7 +94,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.fetcher = ServerKeyFetcher(hs) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.federation_domain_whitelist = ( hs.config.federation.federation_domain_whitelist diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 71b9a34b145d..6c414402bd46 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -75,7 +75,7 @@ def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() self.clock = hs.get_clock() self.server_name = hs.hostname - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.max_upload_size = hs.config.media.max_upload_size self.max_image_pixels = hs.config.media.max_image_pixels diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index c08b60d10a09..14ea88b24052 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -134,7 +134,7 @@ def __init__( self.filepaths = media_repo.filepaths self.max_spider_size = hs.config.media.max_spider_size self.server_name = hs.hostname - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.client = SimpleHttpClient( hs, treq_args={"browser_like_redirects": True}, diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index ed91ef5a4283..53b156524375 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -50,7 +50,7 @@ def __init__( ): super().__init__() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.media_repo = media_repo self.media_storage = media_storage self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index fde28d08cb8e..e73e431dc9f1 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -37,7 +37,7 @@ def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"): self.media_repo = media_repo self.filepaths = media_repo.filepaths - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.clock = hs.get_clock() self.server_name = hs.hostname self.auth = hs.get_auth() diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index 28a67f04e33f..6ac9dbc7c9be 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -44,7 +44,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._local_threepid_handling_disabled_due_to_email_config = ( hs.config.email.local_threepid_handling_disabled_due_to_email_config diff --git a/synapse/server.py b/synapse/server.py index 4c07f2101566..b5e2a319bcef 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -17,7 +17,7 @@ # homeservers; either as a full homeserver as a real application, or a small # partial one for unit test mocking. -# Imports required for the default HomeServer() implementation + import abc import functools import logging @@ -134,7 +134,7 @@ WorkerServerNoticesSender, ) from synapse.state import StateHandler, StateResolutionHandler -from synapse.storage import Databases, DataStore, Storage +from synapse.storage import Databases, Storage from synapse.streams.events import EventSources from synapse.types import DomainSpecificString, ISynapseReactor from synapse.util import Clock @@ -225,7 +225,7 @@ class HomeServer(metaclass=abc.ABCMeta): # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be - # instantiated during setup() for future return by get_datastore() + # instantiated during setup() for future return by get_datastores() DATASTORE_CLASS = abc.abstractproperty() tls_server_context_factory: Optional[IOpenSSLContextFactory] @@ -355,12 +355,6 @@ def is_mine_id(self, string: str) -> bool: def get_clock(self) -> Clock: return Clock(self._reactor) - def get_datastore(self) -> DataStore: - if not self.datastores: - raise Exception("HomeServer.setup must be called before getting datastores") - - return self.datastores.main - def get_datastores(self) -> Databases: if not self.datastores: raise Exception("HomeServer.setup must be called before getting datastores") @@ -374,7 +368,7 @@ def get_distributor(self) -> Distributor: @cache_in_self def get_registration_ratelimiter(self) -> Ratelimiter: return Ratelimiter( - store=self.get_datastore(), + store=self.get_datastores().main, clock=self.get_clock(), rate_hz=self.config.ratelimiting.rc_registration.per_second, burst_count=self.config.ratelimiting.rc_registration.burst_count, @@ -847,7 +841,7 @@ def should_send_federation(self) -> bool: @cache_in_self def get_request_ratelimiter(self) -> RequestRatelimiter: return RequestRatelimiter( - self.get_datastore(), + self.get_datastores().main, self.get_clock(), self.config.ratelimiting.rc_message, self.config.ratelimiting.rc_admin_redaction, diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index e09a25591feb..698ca742ed66 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -32,7 +32,7 @@ class ConsentServerNotices: def __init__(self, hs: "HomeServer"): self._server_notices_manager = hs.get_server_notices_manager() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._users_in_progress: Set[str] = set() diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 8522930b5048..015dd08f05e4 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -36,7 +36,7 @@ class ResourceLimitsServerNotices: def __init__(self, hs: "HomeServer"): self._server_notices_manager = hs.get_server_notices_manager() - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._auth = hs.get_auth() self._config = hs.config self._resouce_limited = False diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 0cf60236f8b4..7b4814e04994 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -29,7 +29,7 @@ class ServerNoticesManager: def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self._config = hs.config self._account_data_handler = hs.get_account_data_handler() self._room_creation_handler = hs.get_room_creation_handler() diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 67e8bc6ec288..fcc24ad1296a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -126,7 +126,7 @@ class StateHandler: def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state_store = hs.get_storage().state self.hs = hs self._state_resolution_handler = hs.get_state_resolution_handler() diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 8f09dd8e8751..e9a0cdc6be94 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -112,7 +112,7 @@ async def get_registered_reserved_users(self) -> List[str]: for tp in self.hs.config.server.mau_limits_reserved_threepids[ : self.hs.config.server.max_mau_value ]: - user_id = await self.hs.get_datastore().get_user_id_by_threepid( + user_id = await self.hs.get_datastores().main.get_user_id_by_threepid( tp["medium"], canonicalise_email(tp["address"]) ) if user_id: diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 4ec2a713cf55..fb8fe1729516 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -48,7 +48,7 @@ def __init__(self, hs: "HomeServer"): # all the attributes of `_EventSourcesInner` are annotated. *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) # type: ignore[misc] ) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def get_current_token(self) -> StreamToken: push_rules_key = self.store.get_max_push_rules_stream_id() diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 4b53b6d40b31..686d17c0de50 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -16,6 +16,8 @@ import pymacaroons +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.auth import Auth from synapse.api.constants import UserTypes from synapse.api.errors import ( @@ -26,8 +28,10 @@ ResourceLimitError, ) from synapse.appservice import ApplicationService +from synapse.server import HomeServer from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import Requester +from synapse.util import Clock from tests import unittest from tests.test_utils import simple_async_mock @@ -36,10 +40,10 @@ class AuthTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): self.store = Mock() - hs.get_datastore = Mock(return_value=self.store) + hs.datastores.main = self.store hs.get_auth_handler().store = self.store self.auth = Auth(hs) diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index b7fc33dc94d1..973f0f7fa176 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -40,7 +40,7 @@ def MockEvent(**kwargs): class FilteringTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.filtering = hs.get_filtering() - self.datastore = hs.get_datastore() + self.datastore = hs.get_datastores().main def test_errors_on_invalid_filters(self): invalid_filters = [ diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index dcf0110c16e0..4ef754a186c1 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -8,7 +8,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): def test_allowed_via_can_do_action(self): limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", _time_now_s=0) @@ -39,7 +39,7 @@ def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): as_requester = create_requester("@user:example.com", app_service=appservice) limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=0) @@ -70,7 +70,7 @@ def test_allowed_appservice_via_can_requester_do_action(self): as_requester = create_requester("@user:example.com", app_service=appservice) limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=0) @@ -92,7 +92,7 @@ def test_allowed_appservice_via_can_requester_do_action(self): def test_allowed_via_ratelimit(self): limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) # Shouldn't raise @@ -116,7 +116,7 @@ def test_allowed_via_can_do_action_and_overriding_parameters(self): """ # Create a Ratelimiter with a very low allowed rate_hz and burst_count limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) # First attempt should be allowed @@ -162,7 +162,7 @@ def test_allowed_via_ratelimit_and_overriding_parameters(self): """ # Create a Ratelimiter with a very low allowed rate_hz and burst_count limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) # First attempt should be allowed @@ -190,7 +190,7 @@ def test_allowed_via_ratelimit_and_overriding_parameters(self): def test_pruning(self): limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 ) self.get_success_or_raise( limiter.can_do_action(None, key="test_id_1", _time_now_s=0) @@ -208,7 +208,7 @@ def test_db_user_override(self): """Test that users that have ratelimiting disabled in the DB aren't ratelimited. """ - store = self.hs.get_datastore() + store = self.hs.get_datastores().main user_id = "@user:test" requester = create_requester(user_id) @@ -233,7 +233,7 @@ def test_db_user_override(self): def test_multiple_actions(self): limiter = Ratelimiter( - store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=3 + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 ) # Test that 4 actions aren't allowed with a maximum burst of 3. allowed, time_allowed = self.get_success_or_raise( diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py index 19eb4c79d043..df731eb599cc 100644 --- a/tests/app/test_phone_stats_home.py +++ b/tests/app/test_phone_stats_home.py @@ -32,7 +32,7 @@ def test_r30_minimum_usage(self): self.helper.send(room_id, "message", tok=access_token) # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) # Advance 30 days (+ 1 second, because strict inequality causes issues if we are @@ -40,7 +40,7 @@ def test_r30_minimum_usage(self): self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) # Send a message (this counts as activity) @@ -51,21 +51,21 @@ def test_r30_minimum_usage(self): self.reactor.advance(2 * 60 * 60) # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 1, "unknown": 1}) # Advance 29 days. The user has now not posted for 29 days. self.reactor.advance(29 * ONE_DAY_IN_SECONDS) # The user is still counted. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 1, "unknown": 1}) # Advance another day. The user has now not posted for 30 days. self.reactor.advance(ONE_DAY_IN_SECONDS) # The user is now no longer counted in R30. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) def test_r30_minimum_usage_using_default_config(self): @@ -84,7 +84,7 @@ def test_r30_minimum_usage_using_default_config(self): self.helper.send(room_id, "message", tok=access_token) # Check the R30 results do not count that user. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) # Advance 30 days (+ 1 second, because strict inequality causes issues if we are @@ -92,7 +92,7 @@ def test_r30_minimum_usage_using_default_config(self): self.reactor.advance(30 * ONE_DAY_IN_SECONDS + 1) # (Make sure the user isn't somehow counted by this point.) - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) # Send a message (this counts as activity) @@ -103,14 +103,14 @@ def test_r30_minimum_usage_using_default_config(self): self.reactor.advance(2 * 60 * 60) # *Now* the user is counted. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 1, "unknown": 1}) # Advance 27 days. The user has now not posted for 27 days. self.reactor.advance(27 * ONE_DAY_IN_SECONDS) # The user is still counted. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 1, "unknown": 1}) # Advance another day. The user has now not posted for 28 days. @@ -119,7 +119,7 @@ def test_r30_minimum_usage_using_default_config(self): # The user is now no longer counted in R30. # (This is because the user_ips table has been pruned, which by default # only preserves the last 28 days of entries.) - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) def test_r30_user_must_be_retained_for_at_least_a_month(self): @@ -135,7 +135,7 @@ def test_r30_user_must_be_retained_for_at_least_a_month(self): self.helper.send(room_id, "message", tok=access_token) # Check the user does not contribute to R30 yet. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) for _ in range(30): @@ -144,14 +144,16 @@ def test_r30_user_must_be_retained_for_at_least_a_month(self): self.helper.send(room_id, "I'm still here", tok=access_token) # Notice that the user *still* does not contribute to R30! - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success( + self.hs.get_datastores().main.count_r30_users() + ) self.assertEqual(r30_results, {"all": 0}) self.reactor.advance(ONE_DAY_IN_SECONDS) self.helper.send(room_id, "Still here!", tok=access_token) # *Now* the user appears in R30. - r30_results = self.get_success(self.hs.get_datastore().count_r30_users()) + r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 1, "unknown": 1}) @@ -196,7 +198,7 @@ def test_r30v2_minimum_usage(self): # (user_daily_visits is updated every 5 minutes using a looping call.) self.reactor.advance(FIVE_MINUTES_IN_SECONDS) - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Check the R30 results do not count that user. r30_results = self.get_success(store.count_r30v2_users()) @@ -275,7 +277,7 @@ def test_r30v2_user_must_be_retained_for_at_least_a_month(self): # (user_daily_visits is updated every 5 minutes using a looping call.) self.reactor.advance(FIVE_MINUTES_IN_SECONDS) - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Check the user does not contribute to R30 yet. r30_results = self.get_success(store.count_r30v2_users()) @@ -347,7 +349,7 @@ def test_r30v2_returning_dormant_users_not_counted(self): # (user_daily_visits is updated every 5 minutes using a looping call.) self.reactor.advance(FIVE_MINUTES_IN_SECONDS) - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Check that the user does not contribute to R30v2, even though it's been # more than 30 days since registration. diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 17a9fb63a176..3a4d5027195a 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -179,7 +179,7 @@ def test_verify_json_for_server(self): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) - r = self.hs.get_datastore().store_server_verify_keys( + r = self.hs.get_datastores().main.store_server_verify_keys( "server9", time.time() * 1000, [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], @@ -272,7 +272,7 @@ def test_verify_json_for_server_with_null_valid_until_ms(self): ) key1 = signedjson.key.generate_signing_key(1) - r = self.hs.get_datastore().store_server_verify_keys( + r = self.hs.get_datastores().main.store_server_verify_keys( "server9", time.time() * 1000, [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))], @@ -448,7 +448,7 @@ async def get_json(destination, path, **kwargs): # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( - self.hs.get_datastore().get_server_keys_json([lookup_triplet]) + self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) res = key_json[lookup_triplet] self.assertEqual(len(res), 1) @@ -564,7 +564,7 @@ def test_get_keys_from_perspectives(self): # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( - self.hs.get_datastore().get_server_keys_json([lookup_triplet]) + self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) res = key_json[lookup_triplet] self.assertEqual(len(res), 1) @@ -683,7 +683,7 @@ def test_get_perspectives_own_key(self): # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) key_json = self.get_success( - self.hs.get_datastore().get_server_keys_json([lookup_triplet]) + self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) res = key_json[lookup_triplet] self.assertEqual(len(res), 1) diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py index ca27388ae8a3..defbc68c18cd 100644 --- a/tests/events/test_snapshot.py +++ b/tests/events/test_snapshot.py @@ -28,7 +28,7 @@ class TestEventContext(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.user_id = self.register_user("u1", "pass") diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index e40ef9587417..9336181c96a5 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -55,7 +55,7 @@ def test_complexity_simple(self): self.assertTrue(complexity > 0, complexity) # Artificially raise the complexity - store = self.hs.get_datastore() + store = self.hs.get_datastores().main store.get_current_state_event_counts = lambda x: make_awaitable(500 * 1.23) # Get the room complexity again -- make sure it's our artificial value @@ -149,7 +149,7 @@ def test_join_too_large_once_joined(self): ) # Artificially raise the complexity - self.hs.get_datastore().get_current_state_event_counts = ( + self.hs.get_datastores().main.get_current_state_event_counts = ( lambda x: make_awaitable(600) ) diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index f0aa8ed9db4d..2873b4d43012 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -64,7 +64,7 @@ def get_destination_room(self, room: str, destination: str = "host2") -> dict: Dictionary of { event_id: str, stream_ordering: int } """ event_id, stream_ordering = self.get_success( - self.hs.get_datastore().db_pool.execute( + self.hs.get_datastores().main.db_pool.execute( "test:get_destination_rooms", None, """ @@ -125,7 +125,7 @@ def test_catch_up_last_successful_stream_ordering_tracking(self): self.pump() lsso_1 = self.get_success( - self.hs.get_datastore().get_destination_last_successful_stream_ordering( + self.hs.get_datastores().main.get_destination_last_successful_stream_ordering( "host2" ) ) @@ -141,7 +141,7 @@ def test_catch_up_last_successful_stream_ordering_tracking(self): event_id_2 = self.helper.send(room, "rabbits!", tok=u1_token)["event_id"] lsso_2 = self.get_success( - self.hs.get_datastore().get_destination_last_successful_stream_ordering( + self.hs.get_datastores().main.get_destination_last_successful_stream_ordering( "host2" ) ) @@ -216,7 +216,9 @@ def test_catch_up_from_blank_state(self): # let's also clear any backoffs self.get_success( - self.hs.get_datastore().set_destination_retry_timings("host2", None, 0, 0) + self.hs.get_datastores().main.set_destination_retry_timings( + "host2", None, 0, 0 + ) ) # bring the remote online and clear the received pdu list @@ -296,13 +298,13 @@ def test_catch_up_loop(self): # destination_rooms should already be populated, but let us pretend that we already # sent (successfully) up to and including event id 2 - event_2 = self.get_success(self.hs.get_datastore().get_event(event_id_2)) + event_2 = self.get_success(self.hs.get_datastores().main.get_event(event_id_2)) # also fetch event 5 so we know its last_successful_stream_ordering later - event_5 = self.get_success(self.hs.get_datastore().get_event(event_id_5)) + event_5 = self.get_success(self.hs.get_datastores().main.get_event(event_id_5)) self.get_success( - self.hs.get_datastore().set_destination_last_successful_stream_ordering( + self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( "host2", event_2.internal_metadata.stream_ordering ) ) @@ -359,7 +361,7 @@ def test_catch_up_on_synapse_startup(self): # ASSERT: # - All servers are up to date so none should have outstanding catch-up outstanding_when_successful = self.get_success( - self.hs.get_datastore().get_catch_up_outstanding_destinations(None) + self.hs.get_datastores().main.get_catch_up_outstanding_destinations(None) ) self.assertEqual(outstanding_when_successful, []) @@ -370,7 +372,7 @@ def test_catch_up_on_synapse_startup(self): # - Mark zzzerver as being backed-off from now = self.clock.time_msec() self.get_success( - self.hs.get_datastore().set_destination_retry_timings( + self.hs.get_datastores().main.set_destination_retry_timings( "zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day ) ) @@ -382,14 +384,14 @@ def test_catch_up_on_synapse_startup(self): # - all remotes are outstanding # - they are returned in batches of 25, in order outstanding_1 = self.get_success( - self.hs.get_datastore().get_catch_up_outstanding_destinations(None) + self.hs.get_datastores().main.get_catch_up_outstanding_destinations(None) ) self.assertEqual(len(outstanding_1), 25) self.assertEqual(outstanding_1, server_names[0:25]) outstanding_2 = self.get_success( - self.hs.get_datastore().get_catch_up_outstanding_destinations( + self.hs.get_datastores().main.get_catch_up_outstanding_destinations( outstanding_1[-1] ) ) @@ -457,7 +459,7 @@ def test_not_latest_event(self): ) self.get_success( - self.hs.get_datastore().set_destination_last_successful_stream_ordering( + self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( "host2", event_1.internal_metadata.stream_ordering ) ) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index b2376e2db925..60e0c31f4384 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -176,7 +176,7 @@ def prepare(self, reactor, clock, hs): def get_users_who_share_room_with_user(user_id): return defer.succeed({"@user2:host2"}) - hs.get_datastore().get_users_who_share_room_with_user = ( + hs.get_datastores().main.get_users_who_share_room_with_user = ( get_users_who_share_room_with_user ) @@ -395,7 +395,7 @@ def test_prune_outbound_device_pokes1(self): # run the prune job self.reactor.advance(10) self.get_success( - self.hs.get_datastore()._prune_old_outbound_device_pokes(prune_age=1) + self.hs.get_datastores().main._prune_old_outbound_device_pokes(prune_age=1) ) # recover the server @@ -445,7 +445,7 @@ def test_prune_outbound_device_pokes2(self): # run the prune job self.reactor.advance(10) self.get_success( - self.hs.get_datastore()._prune_old_outbound_device_pokes(prune_age=1) + self.hs.get_datastores().main._prune_old_outbound_device_pokes(prune_age=1) ) # recover the server diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index 686f42ab48ac..adf0535d97e3 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -198,7 +198,7 @@ class FederationKnockingTestCase( ] def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main # We're not going to be properly signing events as our remote homeserver is fake, # therefore disable event signature checks. diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index fe57ff267119..9918ff68079e 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -38,7 +38,7 @@ def setUp(self): self.mock_as_api = Mock() self.mock_scheduler = Mock() hs = Mock() - hs.get_datastore.return_value = self.mock_store + hs.get_datastores.return_value = Mock(main=self.mock_store) self.mock_store.get_received_ts.return_value = make_awaitable(0) self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None) self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable( @@ -355,7 +355,9 @@ def prepare(self, reactor, clock, hs): # Mock out application services, and allow defining our own in tests self._services: List[ApplicationService] = [] - self.hs.get_datastore().get_app_services = Mock(return_value=self._services) + self.hs.get_datastores().main.get_app_services = Mock( + return_value=self._services + ) # A user on the homeserver. self.local_user_device_id = "local_device" @@ -494,7 +496,7 @@ def test_application_services_receive_bursts_of_to_device(self): # Create a fake device per message. We can't send to-device messages to # a device that doesn't exist. self.get_success( - self.hs.get_datastore().db_pool.simple_insert_many( + self.hs.get_datastores().main.db_pool.simple_insert_many( desc="test_application_services_receive_burst_of_to_device", table="devices", keys=("user_id", "device_id"), @@ -510,7 +512,7 @@ def test_application_services_receive_bursts_of_to_device(self): # Seed the device_inbox table with our fake messages self.get_success( - self.hs.get_datastore().add_messages_to_device_inbox(messages, {}) + self.hs.get_datastores().main.add_messages_to_device_inbox(messages, {}) ) # Now have local_user send a final to-device message to exclusive_as_user. All unsent diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 03b8b8615c62..0c6e55e72592 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -129,7 +129,7 @@ def test_mau_limits_disabled(self): def test_mau_limits_exceeded_large(self): self.auth_blocking._limit_usage_by_mau = True - self.hs.get_datastore().get_monthly_active_count = Mock( + self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.large_number_of_users) ) @@ -140,7 +140,7 @@ def test_mau_limits_exceeded_large(self): ResourceLimitError, ) - self.hs.get_datastore().get_monthly_active_count = Mock( + self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.large_number_of_users) ) self.get_failure( @@ -156,7 +156,7 @@ def test_mau_limits_parity(self): self.auth_blocking._limit_usage_by_mau = True # Set the server to be at the edge of too many users. - self.hs.get_datastore().get_monthly_active_count = Mock( + self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.auth_blocking._max_mau_value) ) @@ -175,7 +175,7 @@ def test_mau_limits_parity(self): ) # If in monthly active cohort - self.hs.get_datastore().user_last_seen_monthly_active = Mock( + self.hs.get_datastores().main.user_last_seen_monthly_active = Mock( return_value=make_awaitable(self.clock.time_msec()) ) self.get_success( @@ -192,7 +192,7 @@ def test_mau_limits_parity(self): def test_mau_limits_not_exceeded(self): self.auth_blocking._limit_usage_by_mau = True - self.hs.get_datastore().get_monthly_active_count = Mock( + self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.small_number_of_users) ) # Ensure does not raise exception @@ -202,7 +202,7 @@ def test_mau_limits_not_exceeded(self): ) ) - self.hs.get_datastore().get_monthly_active_count = Mock( + self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.small_number_of_users) ) self.get_success( diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 8705ff894343..a2672288465a 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -77,7 +77,7 @@ def test_map_cas_user_to_user(self): def test_map_cas_user_to_existing_user(self): """Existing users can log in with CAS account.""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.register_user(user_id="@test_user:test", password_hash=None) ) diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index 01096a1581e3..ddda36c5a93b 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -34,7 +34,7 @@ class DeactivateAccountTestCase(HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self.user = self.register_user("user", "pass") self.token = self.login("user", "pass") diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 43031e07ea77..683677fd0770 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -28,7 +28,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver("server", federation_http_client=None) self.handler = hs.get_device_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main return hs def prepare(self, reactor, clock, hs): @@ -263,7 +263,7 @@ def make_homeserver(self, reactor, clock): self.handler = hs.get_device_handler() self.registration = hs.get_registration_handler() self.auth = hs.get_auth() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main return hs def test_dehydrate_and_rehydrate_device(self): diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 0ea4e753e2b2..65ab107d0e37 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -46,7 +46,7 @@ def register_query_handler(query_type, handler): self.handler = hs.get_directory_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.my_room = RoomAlias.from_string("#my-room:test") self.your_room = RoomAlias.from_string("#your-room:test") @@ -174,7 +174,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -289,7 +289,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 734ed84d78d0..9338ab92e98e 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -34,7 +34,7 @@ def make_homeserver(self, reactor, clock): def prepare(self, reactor, clock, hs): self.handler = hs.get_e2e_keys_handler() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main def test_query_local_devices_no_devices(self): """If the user has no devices, we expect an empty list.""" diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 496b58172643..e8b4e39d1a32 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -45,7 +45,7 @@ class FederationTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state_store = hs.get_storage().state self._event_auth_handler = hs.get_event_auth_handler() return hs diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 5816295d8b97..f4f7ab48458e 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -44,7 +44,7 @@ def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) self.info = self.get_success( - self.hs.get_datastore().get_user_by_access_token( + self.hs.get_datastores().main.get_user_by_access_token( self.access_token, ) ) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index a552d8182e09..e8418b6638e4 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -856,7 +856,7 @@ def test_map_userinfo_to_user(self): auth_handler.complete_sso_login.reset_mock() # Test if the mxid is already taken - store = self.hs.get_datastore() + store = self.hs.get_datastores().main user3 = UserID.from_string("@test_user_3:test") self.get_success( store.register_user(user_id=user3.to_string(), password_hash=None) @@ -872,7 +872,7 @@ def test_map_userinfo_to_user(self): @override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}}) def test_map_userinfo_to_existing_user(self): """Existing users can log in with OpenID Connect when allow_existing_users is True.""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main user = UserID.from_string("@test_user:test") self.get_success( store.register_user(user_id=user.to_string(), password_hash=None) @@ -996,7 +996,7 @@ def test_map_userinfo_to_user_retries(self): auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.register_user(user_id="@test_user:test", password_hash=None) ) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 671dc7d083c8..61d28603ae30 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -43,7 +43,7 @@ class PresenceUpdateTestCase(unittest.HomeserverTestCase): servlets = [admin.register_servlets] def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main def test_offline_to_online(self): wheel_timer = Mock() @@ -891,7 +891,7 @@ def prepare(self, reactor, clock, hs): # self.event_builder_for_2 = EventBuilderFactory(hs) # self.event_builder_for_2.hostname = "test2" - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 60235e569987..69e299fc1772 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -48,7 +48,7 @@ def register_query_handler(query_type, handler): return hs def prepare(self, reactor, clock, hs: HomeServer): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.frank = UserID.from_string("@1234abcd:test") self.bob = UserID.from_string("@4567:test") @@ -325,7 +325,7 @@ def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): properties are "mimetype" (for the file's type) and "size" (for the file's size). """ - store = self.hs.get_datastore() + store = self.hs.get_datastores().main for name, props in names_and_props.items(): self.get_success( diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index cd6f2c77aea4..51ee667ab459 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -154,7 +154,7 @@ def make_homeserver(self, reactor, clock): def prepare(self, reactor, clock, hs): self.handler = self.hs.get_registration_handler() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.lots_of_users = 100 self.small_number_of_users = 1 @@ -172,7 +172,7 @@ def test_user_is_created_and_logged_in_if_doesnt_exist(self): self.assertGreater(len(result_token), 20) def test_if_user_exists(self): - store = self.hs.get_datastore() + store = self.hs.get_datastores().main frank = UserID.from_string("@frank:test") self.get_success( store.register_user(user_id=frank.to_string(), password_hash=None) @@ -760,7 +760,7 @@ async def lookup_room_alias(*args, **kwargs): def prepare(self, reactor, clock, hs): self.handler = self.hs.get_registration_handler() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main @override_config({"auto_join_rooms": ["#room:remotetest"]}) def test_auto_create_auto_join_remote_room(self): diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 50551aa6e3c2..23941abed852 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -142,7 +142,7 @@ def test_map_saml_response_to_user(self): @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}}) def test_map_saml_response_to_existing_user(self): """Existing users can log in with SAML account.""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.register_user(user_id="@test_user:test", password_hash=None) ) @@ -217,7 +217,7 @@ def test_map_saml_response_to_user_retries(self): sso_handler.render_error = Mock(return_value=None) # register a user to occupy the first-choice MXID - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.register_user(user_id="@test_user:test", password_hash=None) ) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 56207f4db6f6..ecd78fa369a8 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -33,7 +33,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = self.hs.get_stats_handler() def _add_background_updates(self): diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 07a760e91aed..66b0bd4d1ae8 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -41,7 +41,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs: HomeServer): self.sync_handler = self.hs.get_sync_handler() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' @@ -248,7 +248,7 @@ def test_ban_wins_race_with_join(self): # the prev_events used when creating the join event, such that the ban does not # precede the join. mocked_get_prev_events = patch.object( - self.hs.get_datastore(), + self.hs.get_datastores().main, "get_prev_events_for_room", new_callable=MagicMock, return_value=make_awaitable([last_room_creation_event_id]), diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 000f9b9fde2b..e461e03599e8 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -91,7 +91,7 @@ def prepare(self, reactor, clock, hs): self.event_source = hs.get_event_sources().sources.typing - self.datastore = hs.get_datastore() + self.datastore = hs.get_datastores().main self.datastore.get_destination_retry_timings = Mock( return_value=defer.succeed(None) ) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 482c90ef68c5..e159169e22d6 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -77,7 +77,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = hs.get_user_directory_handler() self.event_builder_factory = self.hs.get_event_builder_factory() self.event_creation_handler = self.hs.get_event_creation_handler() diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index d16cd141a750..c3f20f9692ac 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -41,7 +41,7 @@ class ModuleApiTestCase(HomeserverTestCase): ] def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main self.module_api = homeserver.get_module_api() self.event_creation_handler = homeserver.get_event_creation_handler() self.sync_handler = homeserver.get_sync_handler() diff --git a/tests/push/test_email.py b/tests/push/test_email.py index f8cba7b64584..7a3b0d675592 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -102,13 +102,13 @@ def prepare(self, reactor, clock, hs): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(self.access_token) + self.hs.get_datastores().main.get_user_by_access_token(self.access_token) ) self.token_id = user_tuple.token_id # We need to add email to account before we can create a pusher. self.get_success( - hs.get_datastore().user_add_threepid( + hs.get_datastores().main.user_add_threepid( self.user_id, "email", "a@example.com", 0, 0 ) ) @@ -128,7 +128,7 @@ def prepare(self, reactor, clock, hs): ) self.auth_handler = hs.get_auth_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def test_need_validated_email(self): """Test that we can only add an email pusher if the user has validated @@ -375,7 +375,7 @@ def test_no_email_sent_after_removed(self): # check that the pusher for that email address has been deleted pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 0) @@ -388,14 +388,14 @@ def test_remove_unlinked_pushers_background_job(self): # This resembles the old behaviour, which the background update below is intended # to clean up. self.get_success( - self.hs.get_datastore().user_delete_threepid( + self.hs.get_datastores().main.user_delete_threepid( self.user_id, "email", "a@example.com" ) ) # Run the "remove_deleted_email_pushers" background job self.get_success( - self.hs.get_datastore().db_pool.simple_insert( + self.hs.get_datastores().main.db_pool.simple_insert( table="background_updates", values={ "update_name": "remove_deleted_email_pushers", @@ -406,14 +406,14 @@ def test_remove_unlinked_pushers_background_job(self): ) # ... and tell the DataStore that it hasn't finished all updates yet - self.hs.get_datastore().db_pool.updates._all_done = False + self.hs.get_datastores().main.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Check that all pushers with unlinked addresses were deleted pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 0) @@ -428,7 +428,7 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: """ # Get the stream ordering before it gets sent pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -439,7 +439,7 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: # It hasn't succeeded yet, so the stream ordering shouldn't have moved pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -458,7 +458,7 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: # The stream ordering has increased pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": self.user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) diff --git a/tests/push/test_http.py b/tests/push/test_http.py index e1e3fb97c54d..c284beb37ce0 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -62,7 +62,7 @@ def test_invalid_configuration(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id @@ -108,7 +108,7 @@ def test_sends_http(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id @@ -138,7 +138,7 @@ def test_sends_http(self): # Get the stream ordering before it gets sent pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -149,7 +149,7 @@ def test_sends_http(self): # It hasn't succeeded yet, so the stream ordering shouldn't have moved pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -170,7 +170,7 @@ def test_sends_http(self): # The stream ordering has increased pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -192,7 +192,7 @@ def test_sends_http(self): # The stream ordering has increased, again pushers = self.get_success( - self.hs.get_datastore().get_pushers_by({"user_name": user_id}) + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) ) pushers = list(pushers) self.assertEqual(len(pushers), 1) @@ -224,7 +224,7 @@ def test_sends_high_priority_for_encrypted(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id @@ -344,7 +344,7 @@ def test_sends_high_priority_for_one_to_one_only(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id @@ -430,7 +430,7 @@ def test_sends_high_priority_for_mention(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id @@ -507,7 +507,7 @@ def test_sends_high_priority_for_atroom(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id @@ -613,7 +613,7 @@ def _test_push_unread_count(self): # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 9fc50f885227..a7a05a564fe9 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -68,7 +68,7 @@ def prepare(self, reactor, clock, hs): # Since we use sqlite in memory databases we need to make sure the # databases objects are the same. - self.worker_hs.get_datastore().db_pool = hs.get_datastore().db_pool + self.worker_hs.get_datastores().main.db_pool = hs.get_datastores().main.db_pool # Normally we'd pass in the handler to `setup_test_homeserver`, which would # eventually hit "Install @cache_in_self attributes" in tests/utils.py. @@ -233,7 +233,7 @@ def setUp(self): # We may have an attempt to connect to redis for the external cache already. self.connect_any_redis_attempts() - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" @@ -332,7 +332,7 @@ def make_worker_hs( lambda: self._handle_http_replication_attempt(worker_hs, port), ) - store = worker_hs.get_datastore() + store = worker_hs.get_datastores().main store.db_pool._db_pool = self.database_pool._db_pool # Set up TCP replication between master and the new worker if we don't diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 83e89383f64f..85be79d19d48 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -30,8 +30,8 @@ def prepare(self, reactor, clock, hs): self.reconnect() - self.master_store = hs.get_datastore() - self.slaved_store = self.worker_hs.get_datastore() + self.master_store = hs.get_datastores().main + self.slaved_store = self.worker_hs.get_datastores().main self.storage = hs.get_storage() def replicate(self): diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py index cdd052001b6d..50fbff5f324f 100644 --- a/tests/replication/tcp/streams/test_account_data.py +++ b/tests/replication/tcp/streams/test_account_data.py @@ -23,7 +23,7 @@ class AccountDataStreamTestCase(BaseStreamTestCase): def test_update_function_room_account_data_limit(self): """Test replication with many room account data updates""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # generate lots of account data updates updates = [] @@ -69,7 +69,7 @@ def test_update_function_room_account_data_limit(self): def test_update_function_global_account_data_limit(self): """Test replication with many global account data updates""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # generate lots of account data updates updates = [] diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index f198a9488746..f9d5da723cce 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -136,7 +136,7 @@ def test_update_function_huge_state_change(self): # this is the point in the DAG where we make a fork fork_point: List[str] = self.get_success( - self.hs.get_datastore().get_latest_event_ids_in_room(self.room_id) + self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) events = [ @@ -291,7 +291,7 @@ def test_update_function_state_row_limit(self): # this is the point in the DAG where we make a fork fork_point: List[str] = self.get_success( - self.hs.get_datastore().get_latest_event_ids_in_room(self.room_id) + self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) events: List[EventBase] = [] diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index 38e292c1ab67..eb0011784518 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -32,7 +32,7 @@ def test_receipt(self): # tell the master to send a new receipt self.get_success( - self.hs.get_datastore().insert_receipt( + self.hs.get_datastores().main.insert_receipt( "!room:blue", "m.read", USER_ID, ["$event:blue"], {"a": 1} ) ) @@ -56,7 +56,7 @@ def test_receipt(self): self.test_handler.on_rdata.reset_mock() self.get_success( - self.hs.get_datastore().insert_receipt( + self.hs.get_datastores().main.insert_receipt( "!room2:blue", "m.read", USER_ID, ["$event2:foo"], {"a": 2} ) ) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 92a5b53e11e7..ba1a63c0d667 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -204,7 +204,7 @@ def test_send_typing_sharded(self): def create_room_with_remote_server(self, user, token, remote_server="other_server"): room = self.helper.create_room_as(user, tok=token) - store = self.hs.get_datastore() + store = self.hs.get_datastores().main federation = self.hs.get_federation_event_handler() prev_event_ids = self.get_success(store.get_latest_event_ids_in_room(room)) diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 4094a75f363c..8f4f6688ce86 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -50,7 +50,7 @@ def _create_pusher_and_send_msg(self, localpart): # Register a pusher user_dict = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_dict.token_id diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 596ba5a0c9f4..5f142e84c359 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -47,7 +47,7 @@ def prepare(self, reactor, clock, hs): self.other_access_token = self.login("otheruser", "pass") self.room_creator = self.hs.get_room_creation_handler() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def default_config(self): conf = super().default_config() @@ -99,7 +99,7 @@ def test_basic(self): persisted_on_1 = False persisted_on_2 = False - store = self.hs.get_datastore() + store = self.hs.get_datastores().main user_id = self.register_user("user", "pass") access_token = self.login("user", "pass") @@ -166,7 +166,7 @@ def test_vector_clock_token(self): user_id = self.register_user("user", "pass") access_token = self.login("user", "pass") - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Create two room on the different workers. self._create_room(room_id1, user_id, access_token) @@ -194,7 +194,7 @@ def test_vector_clock_token(self): # # Worker2's event stream position will not advance until we call # __aexit__ again. - worker_store2 = worker_hs2.get_datastore() + worker_store2 = worker_hs2.get_datastores().main assert isinstance(worker_store2._stream_id_gen, MultiWriterIdGenerator) actx = worker_store2._stream_id_gen.get_next() diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 1e3fe9c62c00..fb36aa994090 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -36,7 +36,7 @@ class BackgroundUpdatesTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index 71068d16cd18..929bbdc37da0 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -35,7 +35,7 @@ class FederationTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -537,7 +537,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 86aff7575c9a..0d47dd0aff73 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -634,7 +634,7 @@ class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: media_repo = hs.get_media_repository_resource() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.server_name = hs.hostname self.admin_user = self.register_user("admin", "pass", admin=True) @@ -767,7 +767,7 @@ class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: media_repo = hs.get_media_repository_resource() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") diff --git a/tests/rest/admin/test_registration_tokens.py b/tests/rest/admin/test_registration_tokens.py index 8513b1d2df53..8354250ec21e 100644 --- a/tests/rest/admin/test_registration_tokens.py +++ b/tests/rest/admin/test_registration_tokens.py @@ -34,7 +34,7 @@ class ManageRegistrationTokensTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 23da0ad73679..09c48e85c798 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -50,7 +50,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: consent_uri_builder.build_user_consent_uri.return_value = "http://example.com" self.event_creation_handler._consent_uri_builder = consent_uri_builder - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -465,7 +465,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: consent_uri_builder.build_user_consent_uri.return_value = "http://example.com" self.event_creation_handler._consent_uri_builder = consent_uri_builder - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -2239,7 +2239,7 @@ class BlockRoomTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self._store = hs.get_datastore() + self._store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index 3c59f5f766bc..2c855bff99c7 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -38,7 +38,7 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room_shutdown_handler = hs.get_room_shutdown_handler() self.pagination_handler = hs.get_pagination_handler() self.server_notices_manager = self.hs.get_server_notices_manager() diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 272637e96575..a60ea0a56305 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -410,7 +410,7 @@ def test_register_mau_limit_reached(self) -> None: even if the MAU limit is reached. """ handler = self.hs.get_registration_handler() - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Set monthly active users to the limit store.get_monthly_active_count = Mock( @@ -455,7 +455,7 @@ class UsersListTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v2/users" def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -913,7 +913,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -1167,7 +1167,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.auth_handler = hs.get_auth_handler() # create users and get access tokens @@ -2609,7 +2609,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -2737,7 +2737,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.media_repo = hs.get_media_repository_resource() self.filepaths = MediaFilePaths(hs.config.media.media_store_path) @@ -3317,7 +3317,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -3609,7 +3609,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -3687,7 +3687,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -3913,7 +3913,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index afaa597f6597..aa019c9a445b 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -77,7 +77,7 @@ async def sendmail( return hs def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.submit_token_resource = PasswordResetSubmitTokenResource(hs) def test_basic_password_reset(self): @@ -398,7 +398,7 @@ def test_deactivate_account(self): self.deactivate(user_id, tok) - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Check that the user has been marked as deactivated. self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id))) @@ -409,7 +409,7 @@ def test_deactivate_account(self): def test_pending_invites(self): """Tests that deactivating a user rejects every pending invite for them.""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main inviter_id = self.register_user("inviter", "test") inviter_tok = self.login("inviter", "test") @@ -527,7 +527,7 @@ def test_GET_whoami_appservices(self): namespaces={"users": [{"regex": user_id, "exclusive": True}]}, sender=user_id, ) - self.hs.get_datastore().services_cache.append(appservice) + self.hs.get_datastores().main.services_cache.append(appservice) whoami = self._whoami(as_token) self.assertEqual( @@ -586,7 +586,7 @@ async def sendmail( return self.hs def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_id = self.register_user("kermit", "test") self.user_id_tok = self.login("kermit", "test") diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 475c6bed3d1a..a573cc3c2ee7 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -32,7 +32,7 @@ class FilterTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.filtering = hs.get_filtering() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def test_add_filter(self): channel = self.make_request( diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 19f5e465372b..26d0d83e00f2 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -1101,8 +1101,8 @@ def make_homeserver(self, reactor, clock): }, ) - self.hs.get_datastore().services_cache.append(self.service) - self.hs.get_datastore().services_cache.append(self.another_service) + self.hs.get_datastores().main.services_cache.append(self.service) + self.hs.get_datastores().main.services_cache.append(self.another_service) return self.hs def test_login_appservice_user(self): diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index ead883ded886..b9647d5bd8cf 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -292,7 +292,7 @@ def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): properties are "mimetype" (for the file's type) and "size" (for the file's size). """ - store = self.hs.get_datastore() + store = self.hs.get_datastores().main for name, props in names_and_props.items(): self.get_success( diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 0f1c47dcbb87..2835d86e5b7c 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -56,7 +56,7 @@ def test_POST_appservice_registration_valid(self): sender="@as:test", ) - self.hs.get_datastore().services_cache.append(appservice) + self.hs.get_datastores().main.services_cache.append(appservice) request_data = json.dumps( {"username": "as_user_kermit", "type": APP_SERVICE_REGISTRATION_TYPE} ) @@ -80,7 +80,7 @@ def test_POST_appservice_registration_no_type(self): sender="@as:test", ) - self.hs.get_datastore().services_cache.append(appservice) + self.hs.get_datastores().main.services_cache.append(appservice) request_data = json.dumps({"username": "as_user_kermit"}) channel = self.make_request( @@ -210,7 +210,7 @@ def test_POST_registration_requires_token(self): username = "kermit" device_id = "frogfone" token = "abcd" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( "registration_tokens", @@ -316,7 +316,7 @@ def test_POST_registration_token_invalid(self): @override_config({"registration_requires_token": True}) def test_POST_registration_token_limit_uses(self): token = "abcd" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Create token that can be used once self.get_success( store.db_pool.simple_insert( @@ -391,7 +391,7 @@ def test_POST_registration_token_limit_uses(self): def test_POST_registration_token_expiry(self): token = "abcd" now = self.hs.get_clock().time_msec() - store = self.hs.get_datastore() + store = self.hs.get_datastores().main # Create token that expired yesterday self.get_success( store.db_pool.simple_insert( @@ -439,7 +439,7 @@ def test_POST_registration_token_expiry(self): def test_POST_registration_token_session_expiry(self): """Test `pending` is decremented when an uncompleted session expires.""" token = "abcd" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( "registration_tokens", @@ -530,7 +530,7 @@ def test_POST_registration_token_session_expiry_deleted_token(self): 3. Expire the session """ token = "abcd" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( "registration_tokens", @@ -657,7 +657,7 @@ def test_request_token_existing_email_inhibit_error(self): # Add a threepid self.get_success( - self.hs.get_datastore().user_add_threepid( + self.hs.get_datastores().main.user_add_threepid( user_id=user_id, medium="email", address=email, @@ -941,7 +941,7 @@ async def sendmail(*args, **kwargs): self.email_attempts = [] self.hs.get_send_email_handler()._sendmail = sendmail - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main return self.hs @@ -1126,10 +1126,12 @@ def make_homeserver(self, reactor, clock): # We need to set these directly, instead of in the homeserver config dict above. # This is due to account validity-related config options not being read by # Synapse when account_validity.enabled is False. - self.hs.get_datastore()._account_validity_period = self.validity_period - self.hs.get_datastore()._account_validity_startup_job_max_delta = self.max_delta + self.hs.get_datastores().main._account_validity_period = self.validity_period + self.hs.get_datastores().main._account_validity_startup_job_max_delta = ( + self.max_delta + ) - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main return self.hs @@ -1163,7 +1165,7 @@ def default_config(self): def test_GET_token_valid(self): token = "abcd" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( "registration_tokens", diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index dfd9ffcb9380..5687dea48dca 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -53,7 +53,7 @@ def default_config(self) -> dict: return config def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_id, self.user_token = self._create_user("alice") self.user2_id, self.user2_token = self._create_user("bob") @@ -107,7 +107,7 @@ def test_deny_invalid_event(self): # Unless that event is referenced from another event! self.get_success( - self.hs.get_datastore().db_pool.simple_insert( + self.hs.get_datastores().main.db_pool.simple_insert( table="event_relations", values={ "event_id": "bar", diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index fe5b536d9705..c41a1c14a138 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -51,7 +51,7 @@ def prepare(self, reactor, clock, homeserver): self.user_id = self.register_user("user", "password") self.token = self.login("user", "password") - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.serializer = self.hs.get_event_client_serializer() self.clock = self.hs.get_clock() @@ -114,7 +114,7 @@ def test_visibility(self): """Tests that synapse.visibility.filter_events_for_client correctly filters out outdated events """ - store = self.hs.get_datastore() + store = self.hs.get_datastores().main storage = self.hs.get_storage() room_id = self.helper.create_room_as(self.user_id, tok=self.token) events = [] diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index b7f086927bcf..1afd96b8f5d8 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -65,7 +65,7 @@ def make_homeserver(self, reactor, clock): async def _insert_client_ip(*args, **kwargs): return None - self.hs.get_datastore().insert_client_ip = _insert_client_ip + self.hs.get_datastores().main.insert_client_ip = _insert_client_ip return self.hs @@ -667,7 +667,7 @@ def test_post_room_invitees_ratelimit(self): # Add the current user to the ratelimit overrides, allowing them no ratelimiting. self.get_success( - self.hs.get_datastore().set_ratelimit_for_user(self.user_id, 0, 0) + self.hs.get_datastores().main.set_ratelimit_for_user(self.user_id, 0, 0) ) # Test that the invites aren't ratelimited anymore. @@ -1060,7 +1060,9 @@ def test_autojoin_rooms(self): user_id = self.register_user("testuser", "password") # Check that the new user successfully joined the four rooms - rooms = self.get_success(self.hs.get_datastore().get_rooms_for_user(user_id)) + rooms = self.get_success( + self.hs.get_datastores().main.get_rooms_for_user(user_id) + ) self.assertEqual(len(rooms), 4) @@ -1184,7 +1186,7 @@ def test_stream_token_is_accepted_for_fwd_pagianation(self): self.assertTrue("end" in channel.json_body) def test_room_messages_purge(self): - store = self.hs.get_datastore() + store = self.hs.get_datastores().main pagination_handler = self.hs.get_pagination_handler() # Send a first message in the room, which will be removed by the purge. diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index b0c44af033c0..7d0e66b53412 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -34,7 +34,7 @@ def prepare(self, reactor, clock, homeserver): self.banned_user_id = self.register_user("banned", "test") self.banned_access_token = self.login("banned", "test") - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.get_success( self.store.set_shadow_banned(UserID.from_string(self.banned_user_id), True) diff --git a/tests/rest/client/test_shared_rooms.py b/tests/rest/client/test_shared_rooms.py index 283eccd53f95..c42c8aff6c2c 100644 --- a/tests/rest/client/test_shared_rooms.py +++ b/tests/rest/client/test_shared_rooms.py @@ -36,7 +36,7 @@ def make_homeserver(self, reactor, clock): return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.handler = hs.get_user_directory_handler() def _get_shared_rooms(self, token, other_user) -> FakeChannel: diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index cd4af2b1f358..e06256136548 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -299,7 +299,7 @@ class SyncKnockTestCase( ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.url = "/sync?since=%s" self.next_batch = "s0" diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py index ee0abd5295af..de312cb63c34 100644 --- a/tests/rest/client/test_typing.py +++ b/tests/rest/client/test_typing.py @@ -57,7 +57,7 @@ async def get_user_by_access_token(token=None, allow_guest=False): async def _insert_client_ip(*args, **kwargs): return None - hs.get_datastore().insert_client_ip = _insert_client_ip + hs.get_datastores().main.insert_client_ip = _insert_client_ip return hs diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index a42388b26ffb..7f79336abcad 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -32,7 +32,7 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs: "HomeServer"): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.creator = self.register_user("creator", "pass") self.creator_token = self.login(self.creator, "pass") diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 4cf1ed5ddff0..6878ccddbf19 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -243,7 +243,7 @@ def prepare(self, reactor, clock, hs): media_resource = hs.get_media_repository_resource() self.download_resource = media_resource.children[b"download"] self.thumbnail_resource = media_resource.children[b"thumbnail"] - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.media_repo = hs.get_media_repository() self.media_id = "example.com/12345" diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 36c495954ff3..02b96c9e6ecf 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -242,7 +242,7 @@ def default_config(self): return c def prepare(self, reactor, clock, hs): - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.server_notices_sender = self.hs.get_server_notices_sender() self.server_notices_manager = self.hs.get_server_notices_manager() self.event_source = self.hs.get_event_sources() diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py index 36c933b9e91a..50c20c5b921a 100644 --- a/tests/storage/databases/main/test_deviceinbox.py +++ b/tests/storage/databases/main/test_deviceinbox.py @@ -26,7 +26,7 @@ class DeviceInboxBackgroundUpdateStoreTestCase(HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_id = self.register_user("foo", "pass") def test_background_remove_deleted_devices_from_device_inbox(self): diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 5ae491ff5a52..59def6e59cdf 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -37,7 +37,7 @@ class HaveSeenEventsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store: EventsWorkerStore = hs.get_datastore() + self.store: EventsWorkerStore = hs.get_datastores().main # insert some test data for rid in ("room1", "room2"): @@ -122,7 +122,7 @@ class EventCacheTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store: EventsWorkerStore = hs.get_datastore() + self.store: EventsWorkerStore = hs.get_datastores().main self.user = self.register_user("user", "pass") self.token = self.login(self.user, "pass") @@ -163,7 +163,7 @@ class DatabaseOutageTestCase(unittest.HomeserverTestCase): """Test event fetching during a database outage.""" def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): - self.store: EventsWorkerStore = hs.get_datastore() + self.store: EventsWorkerStore = hs.get_datastores().main self.room_id = f"!room:{hs.hostname}" self.event_ids = [f"event{i}" for i in range(20)] diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index d326a1d6a642..3ac4646969a4 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -20,7 +20,7 @@ class LockTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs: HomeServer): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def test_simple_lock(self): """Test that we can take out a lock and that while we hold it nobody diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py index 7496974da3a8..9abd0cb4468d 100644 --- a/tests/storage/databases/main/test_room.py +++ b/tests/storage/databases/main/test_room.py @@ -28,7 +28,7 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_id = self.register_user("foo", "pass") self.token = self.login("foo", "pass") diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 200b9198f910..4899cd5c3610 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -20,7 +20,7 @@ class UpsertManyTests(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.storage = hs.get_datastore() + self.storage = hs.get_datastores().main self.table_name = "table_" + secrets.token_hex(6) self.get_success( diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index d697d2bc1e79..272cd3540220 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -21,7 +21,7 @@ class IgnoredUsersTestCase(unittest.HomeserverTestCase): def prepare(self, hs, reactor, clock): - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.user = "@user:test" def _update_ignore_list( diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index ddcb7f554918..50703ccaee0a 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -467,7 +467,7 @@ def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer ) -> None: self.service = Mock(id="foo") - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.get_success( self.store.set_appservice_state(self.service, ApplicationServiceState.UP) ) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 6156dfac4e58..39dcc094bd8b 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -24,7 +24,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): - self.updates: BackgroundUpdater = self.hs.get_datastore().db_pool.updates + self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates # the base test class should have run the real bg updates for us self.assertTrue( self.get_success(self.updates.has_completed_background_updates()) @@ -42,7 +42,7 @@ def test_do_background_update(self): # the target runtime for each bg update target_background_update_duration_ms = 100 - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( "background_updates", @@ -102,7 +102,7 @@ async def update(progress, count): class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): - self.updates: BackgroundUpdater = self.hs.get_datastore().db_pool.updates + self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates # the base test class should have run the real bg updates for us self.assertTrue( self.get_success(self.updates.has_completed_background_updates()) @@ -138,7 +138,7 @@ class MockCM: ) def test_controller(self): - store = self.hs.get_datastore() + store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( "background_updates", diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index a59c28f89681..ce89c9691206 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -30,7 +30,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): """ def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main self.room_creator = homeserver.get_room_creation_handler() # Create a test user and room @@ -242,7 +242,7 @@ def make_homeserver(self, reactor, clock): return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main self.room_creator = homeserver.get_room_creation_handler() self.event_creator_handler = homeserver.get_event_creation_handler() diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index c8ac67e35b67..49ad3c132425 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -35,7 +35,7 @@ def make_homeserver(self, reactor, clock): return hs def prepare(self, hs, reactor, clock): - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main def test_insert_new_client_ip(self): self.reactor.advance(12345678) @@ -666,7 +666,7 @@ def make_homeserver(self, reactor, clock): return hs def prepare(self, hs, reactor, clock): - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.user_id = self.register_user("bob", "abc123", True) def test_request_with_xforwarded(self): diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index b547bf8d9978..21ffc5a9095b 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -19,7 +19,7 @@ class DeviceStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def test_store_new_device(self): self.get_success( diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index 43628ce44f41..7b72a92424b7 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -19,7 +19,7 @@ class DirectoryStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room = RoomID.from_string("!abcde:test") self.alias = RoomAlias.from_string("#my-room:test") diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index 7556171d8ac4..fb96ab3a2fd0 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -28,7 +28,7 @@ class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver("server", federation_http_client=None) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main return hs def test_room_keys_version_delete(self): diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py index 3bf6e337f4e1..0f04493ad06c 100644 --- a/tests/storage/test_end_to_end_keys.py +++ b/tests/storage/test_end_to_end_keys.py @@ -17,7 +17,7 @@ class EndToEndKeyStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def test_key_without_device_name(self): now = 1470174257070 diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index e3273a93f9a2..401020fd6361 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -30,7 +30,7 @@ class EventChainStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self._next_stream_ordering = 1 def test_simple(self): @@ -492,7 +492,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase): ] def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_id = self.register_user("foo", "pass") self.token = self.login("foo", "pass") self.requester = create_requester(self.user_id) diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 667ca90a4d3e..645d564d1c40 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -31,7 +31,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main def test_get_prev_events_for_room(self): room_id = "@ROOM:local" diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 738f3ad1dcc7..c9e3b9fa7994 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -30,7 +30,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.persist_events_store = hs.get_datastores().persist_events def test_get_unread_push_actions_for_user_in_range_for_http(self): diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index a8639d8f8216..ef5e25873c22 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -32,7 +32,7 @@ class ExtremPruneTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.state = self.hs.get_state_handler() self.persistence = self.hs.get_storage().persistence - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.register_user("user", "pass") self.token = self.login("user", "pass") @@ -341,7 +341,7 @@ class InvalideUsersInRoomCacheTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.state = self.hs.get_state_handler() self.persistence = self.hs.get_storage().persistence - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main def test_remote_user_rooms_cache_invalidated(self): """Test that if the server leaves a room the `get_rooms_for_user` cache diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 748607828496..6ac4b93f981a 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -26,7 +26,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): skip = "Requires Postgres" def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) @@ -459,7 +459,7 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): skip = "Requires Postgres" def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) @@ -585,7 +585,7 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): skip = "Requires Postgres" def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index a94b5fd721f2..9059095525e0 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -37,7 +37,7 @@ def decode_verify_key_base64(key_id: str, key_base64: str): class KeyStoreTestCase(tests.unittest.HomeserverTestCase): def test_get_server_verify_keys(self): - store = self.hs.get_datastore() + store = self.hs.get_datastores().main key_id_1 = "ed25519:key1" key_id_2 = "ed25519:KEY_ID_2" @@ -74,7 +74,7 @@ def test_get_server_verify_keys(self): def test_cache(self): """Check that updates correctly invalidate the cache.""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main key_id_1 = "ed25519:key1" key_id_2 = "ed25519:key2" diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index f8d11bac4ec5..4ca212fd1105 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -22,7 +22,7 @@ class DataStoreTestCase(unittest.HomeserverTestCase): def setUp(self) -> None: super(DataStoreTestCase, self).setUp() - self.store = self.hs.get_datastore() + self.store = self.hs.get_datastores().main self.user = UserID.from_string("@abcde:test") self.displayname = "Frank" diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index d6b4cdd788ff..79648d45dba4 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -45,7 +45,7 @@ def default_config(self): return config def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main # Advance the clock a bit reactor.advance(FORTY_DAYS) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index d37736edf8f9..b6f99af2f1b8 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -22,7 +22,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.u_frank = UserID.from_string("@frank:test") diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 22a77c3cccc5..08cc60237ec1 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -30,7 +30,7 @@ def make_homeserver(self, reactor, clock): def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.user_id) - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = self.hs.get_storage() def test_purge_history(self): @@ -47,7 +47,7 @@ def test_purge_history(self): token = self.get_success( self.store.get_topological_token_for_event(last["event_id"]) ) - token_str = self.get_success(token.to_string(self.hs.get_datastore())) + token_str = self.get_success(token.to_string(self.hs.get_datastores().main)) # Purge everything before this topological token self.get_success( diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 8c95a0a2fb1f..03e9cc7d4ad9 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -30,7 +30,7 @@ def default_config(self): return config def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.event_builder_factory = hs.get_event_builder_factory() self.event_creation_handler = hs.get_event_creation_handler() diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 97480652824d..1fa495f7787d 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -20,7 +20,7 @@ class RegistrationStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_id = "@my-user:test" self.tokens = ["AbCdEfGhIjKlMnOpQrStUvWxYz", "BcDeFgHiJkLmNoPqRsTuVwXyZa"] diff --git a/tests/storage/test_rollback_worker.py b/tests/storage/test_rollback_worker.py index cfc8098af67e..0baa54312e33 100644 --- a/tests/storage/test_rollback_worker.py +++ b/tests/storage/test_rollback_worker.py @@ -56,7 +56,7 @@ def default_config(self): def test_rolling_back(self): """Test that workers can start if the DB is a newer schema version""" - db_pool = self.hs.get_datastore().db_pool + db_pool = self.hs.get_datastores().main.db_pool db_conn = LoggingDatabaseConnection( db_pool._db_pool.connect(), db_pool.engine, @@ -72,7 +72,7 @@ def test_rolling_back(self): def test_not_upgraded_old_schema_version(self): """Test that workers don't start if the DB has an older schema version""" - db_pool = self.hs.get_datastore().db_pool + db_pool = self.hs.get_datastores().main.db_pool db_conn = LoggingDatabaseConnection( db_pool._db_pool.connect(), db_pool.engine, @@ -92,7 +92,7 @@ def test_not_upgraded_current_schema_version_with_outstanding_deltas(self): Test that workers don't start if the DB is on the current schema version, but there are still outstanding delta migrations to run. """ - db_pool = self.hs.get_datastore().db_pool + db_pool = self.hs.get_datastores().main.db_pool db_conn = LoggingDatabaseConnection( db_pool._db_pool.connect(), db_pool.engine, diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 31ce7f62528f..42bfca2a8366 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -23,7 +23,7 @@ class RoomStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): # We can't test RoomStore on its own without the DirectoryStore, for # management of the 'room_aliases' table - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.room = RoomID.from_string("!abcde:test") self.alias = RoomAlias.from_string("#a-room-name:test") @@ -71,7 +71,7 @@ class RoomEventsStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): # Room events need the full datastore, for persist_event() and # get_room_state() - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.event_factory = hs.get_event_factory() diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index 8971ecccbd6d..befaa0fcee08 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -46,7 +46,7 @@ def test_null_byte(self): self.assertIn("event_id", response) # Check that search works for the message where the null byte was replaced - store = self.hs.get_datastore() + store = self.hs.get_datastores().main result = self.get_success( store.search_msgs([room_id], "hi bob", ["content.body"]) ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 5cfdfe9b852e..7028f0dfb000 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -35,7 +35,7 @@ def prepare(self, reactor, clock, hs: TestHomeServer): # We can't test the RoomMemberStore on its own without the other event # storage logic - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.u_alice = self.register_user("alice", "pass") self.t_alice = self.login("alice", "pass") @@ -212,7 +212,7 @@ def test__null_byte_in_display_name_properly_handled(self): class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main self.room_creator = homeserver.get_room_creation_handler() def test_can_rerun_update(self): diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 28c767ecfdfe..f88f1c55fc6f 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -28,7 +28,7 @@ class StateStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, hs): - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_datastore = self.storage.state.stores.state self.event_builder_factory = hs.get_event_builder_factory() diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index ce782c7e1d4d..6a1cf3305455 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -115,7 +115,7 @@ def _filter_messages(self, filter: JsonDict) -> List[EventBase]: ) events, next_key = self.get_success( - self.hs.get_datastore().paginate_room_events( + self.hs.get_datastores().main.paginate_room_events( room_id=self.room_id, from_key=from_token.room_key, to_key=None, diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index bea9091d3089..e05daa285e4c 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -20,7 +20,7 @@ class TransactionStoreTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main def test_get_set_transactions(self): """Tests that we can successfully get a non-existent entry for diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 48f1e9d8411b..7f1964eb6a3d 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -149,7 +149,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main self.user_dir_helper = GetUserDirectoryTables(self.store) def _purge_and_rebuild_user_dir(self) -> None: @@ -415,7 +415,7 @@ async def mocked_process_users(*args: Any, **kwargs: Any) -> int: class UserDirectoryStoreTestCase(HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.store = hs.get_datastore() + self.store = hs.get_datastores().main # alice and bob are both in !room_id. bobby is not but shares # a homeserver with alice. diff --git a/tests/test_federation.py b/tests/test_federation.py index 2b9804aba005..c39816de855d 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -52,11 +52,13 @@ def setUp(self): ) )[0]["room_id"] - self.store = self.homeserver.get_datastore() + self.store = self.homeserver.get_datastores().main # Figure out what the most recent event is most_recent = self.get_success( - self.homeserver.get_datastore().get_latest_event_ids_in_room(self.room_id) + self.homeserver.get_datastores().main.get_latest_event_ids_in_room( + self.room_id + ) )[0] join_event = make_event_from_dict( @@ -185,7 +187,7 @@ def query_user_devices(destination, user_id): # Register a mock on the store so that the incoming update doesn't fail because # we don't share a room with the user. - store = self.homeserver.get_datastore() + store = self.homeserver.get_datastores().main store.get_rooms_for_user = Mock(return_value=make_awaitable(["!someroom:test"])) # Manually inject a fake device list update. We need this update to include at diff --git a/tests/test_mau.py b/tests/test_mau.py index 80ab40e255ea..46bd3075de7b 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -52,7 +52,7 @@ def default_config(self): return config def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastore() + self.store = homeserver.get_datastores().main def test_simple_deny_mau(self): # Create and sync so that the MAU counts get updated diff --git a/tests/test_state.py b/tests/test_state.py index 76e0e8ca7ff6..90800421fb86 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -162,7 +162,7 @@ def setUp(self): hs = Mock( spec_set=[ "config", - "get_datastore", + "get_datastores", "get_storage", "get_auth", "get_state_handler", @@ -173,7 +173,7 @@ def setUp(self): ] ) hs.config = default_config("tesths", True) - hs.get_datastore.return_value = self.store + hs.get_datastores.return_value = Mock(main=self.store) hs.get_state_handler.return_value = None hs.get_clock.return_value = MockClock() hs.get_auth.return_value = Auth(hs) diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index e9ec9e085b53..c654e36ee4f4 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -85,7 +85,9 @@ async def create_event( **kwargs, ) -> Tuple[EventBase, EventContext]: if room_version is None: - room_version = await hs.get_datastore().get_room_version_id(kwargs["room_id"]) + room_version = await hs.get_datastores().main.get_room_version_id( + kwargs["room_id"] + ) builder = hs.get_event_builder_factory().for_room_version( KNOWN_ROOM_VERSIONS[room_version], kwargs diff --git a/tests/test_visibility.py b/tests/test_visibility.py index e0b08d67d435..219b5660b117 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -93,7 +93,9 @@ def test_erased_user(self) -> None: events_to_filter.append(evt) # the erasey user gets erased - self.get_success(self.hs.get_datastore().mark_user_erased("@erased:local_hs")) + self.get_success( + self.hs.get_datastores().main.mark_user_erased("@erased:local_hs") + ) # ... and the filtering happens. filtered = self.get_success( diff --git a/tests/unittest.py b/tests/unittest.py index 7983c1e8b860..0caa8e7a45fe 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -280,7 +280,7 @@ def setUp(self): # We need a valid token ID to satisfy foreign key constraints. token_id = self.get_success( - self.hs.get_datastore().add_access_token_to_user( + self.hs.get_datastores().main.add_access_token_to_user( self.helper.auth_user_id, "some_fake_token", None, @@ -337,7 +337,7 @@ def wait_on_thread(self, deferred, timeout=10): def wait_for_background_updates(self) -> None: """Block until all background database updates have completed.""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main while not self.get_success( store.db_pool.updates.has_completed_background_updates() ): @@ -504,7 +504,7 @@ async def run_bg_updates(): self.get_success(stor.db_pool.updates.run_background_updates(False)) hs = setup_test_homeserver(self.addCleanup, *args, **kwargs) - stor = hs.get_datastore() + stor = hs.get_datastores().main # Run the database background updates, when running against "master". if hs.__class__.__name__ == "TestHomeServer": @@ -722,14 +722,16 @@ def add_extremity(self, room_id, event_id): Add the given event as an extremity to the room. """ self.get_success( - self.hs.get_datastore().db_pool.simple_insert( + self.hs.get_datastores().main.db_pool.simple_insert( table="event_forward_extremities", values={"room_id": room_id, "event_id": event_id}, desc="test_add_extremity", ) ) - self.hs.get_datastore().get_latest_event_ids_in_room.invalidate((room_id,)) + self.hs.get_datastores().main.get_latest_event_ids_in_room.invalidate( + (room_id,) + ) def attempt_wrong_password_login(self, username, password): """Attempts to login as the user with the given password, asserting @@ -775,7 +777,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): verify_key_id = "%s:%s" % (verify_key.alg, verify_key.version) self.get_success( - hs.get_datastore().store_server_verify_keys( + hs.get_datastores().main.store_server_verify_keys( from_server=self.OTHER_SERVER_NAME, ts_added_ms=clock.time_msec(), verify_keys=[ diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 9e1bebdc8335..26cb71c640c3 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -24,7 +24,7 @@ class RetryLimiterTestCase(HomeserverTestCase): def test_new_destination(self): """A happy-path case with a new destination and a successful operation""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store)) # advance the clock a bit before making the request @@ -38,7 +38,7 @@ def test_new_destination(self): def test_limiter(self): """General test case which walks through the process of a failing request""" - store = self.hs.get_datastore() + store = self.hs.get_datastores().main limiter = self.get_success(get_retry_limiter("test_dest", self.clock, store)) diff --git a/tests/utils.py b/tests/utils.py index c06fc320f3df..ef99c72e0b50 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -367,7 +367,7 @@ async def create_room(hs, room_id: str, creator_id: str): """Creates and persist a creation event for the given room""" persistence_store = hs.get_storage().persistence - store = hs.get_datastore() + store = hs.get_datastores().main event_builder_factory = hs.get_event_builder_factory() event_creation_handler = hs.get_event_creation_handler() From 5b2b36809fc3543ed0c9ec587398a09f2e176265 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Feb 2022 12:35:53 +0000 Subject: [PATCH 294/474] Remove more references to `get_datastore` (#12067) These have snuck in since #12031 was started. Also a couple of other cleanups while we're in the area. --- changelog.d/12067.feature | 1 + synapse/handlers/account.py | 4 ++-- synapse/rest/client/account.py | 3 --- tests/rest/client/test_account.py | 4 +++- 4 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12067.feature diff --git a/changelog.d/12067.feature b/changelog.d/12067.feature new file mode 100644 index 000000000000..dc1153c49ef4 --- /dev/null +++ b/changelog.d/12067.feature @@ -0,0 +1 @@ +Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). diff --git a/synapse/handlers/account.py b/synapse/handlers/account.py index f8cfe9f6dec7..d5badf635bf7 100644 --- a/synapse/handlers/account.py +++ b/synapse/handlers/account.py @@ -23,7 +23,7 @@ class AccountHandler: def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() + self._main_store = hs.get_datastores().main self._is_mine = hs.is_mine self._federation_client = hs.get_federation_client() @@ -98,7 +98,7 @@ async def _get_local_account_status(self, user_id: UserID) -> JsonDict: """ status = {"exists": False} - userinfo = await self._store.get_userinfo_by_id(user_id.to_string()) + userinfo = await self._main_store.get_userinfo_by_id(user_id.to_string()) if userinfo is not None: status = { diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 4b217882e812..5587cae98a61 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -904,9 +904,6 @@ class AccountStatusRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() self._auth = hs.get_auth() - self._store = hs.get_datastore() - self._is_mine = hs.is_mine - self._federation_client = hs.get_federation_client() self._account_handler = hs.get_account_handler() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index aa019c9a445b..008d635b705c 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -1119,7 +1119,9 @@ def test_local_user_deactivated(self): """Tests that the account status endpoint correctly reports a deactivated user.""" user = self.register_user("someuser", "password") self.get_success( - self.hs.get_datastore().set_user_deactivated_status(user, deactivated=True) + self.hs.get_datastores().main.set_user_deactivated_status( + user, deactivated=True + ) ) self._test_status( From 64c73c6ac88a740ee480a0ad1f9afc8596bccfa4 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 23 Feb 2022 14:33:19 +0100 Subject: [PATCH 295/474] Add type hints to `tests/rest/client` (#12066) --- changelog.d/12066.misc | 1 + tests/rest/client/test_auth.py | 70 ++++++++------- tests/rest/client/test_capabilities.py | 30 ++++--- tests/rest/client/test_login.py | 120 ++++++++++++++----------- tests/rest/client/test_sync.py | 47 +++++----- 5 files changed, 149 insertions(+), 119 deletions(-) create mode 100644 changelog.d/12066.misc diff --git a/changelog.d/12066.misc b/changelog.d/12066.misc new file mode 100644 index 000000000000..0360dbd61edc --- /dev/null +++ b/changelog.d/12066.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/client`. diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 4a68d6657321..9653f458379f 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -13,17 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus -from typing import Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union from twisted.internet.defer import succeed +from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import Resource import synapse.rest.admin from synapse.api.constants import LoginType from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker from synapse.rest.client import account, auth, devices, login, logout, register from synapse.rest.synapse.client import build_synapse_client_resource_tree +from synapse.server import HomeServer from synapse.storage.database import LoggingTransaction from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.handlers.test_oidc import HAS_OIDC @@ -33,11 +37,11 @@ class DummyRecaptchaChecker(UserInteractiveAuthChecker): - def __init__(self, hs): + def __init__(self, hs: HomeServer) -> None: super().__init__(hs) - self.recaptcha_attempts = [] + self.recaptcha_attempts: List[Tuple[dict, str]] = [] - def check_auth(self, authdict, clientip): + def check_auth(self, authdict: dict, clientip: str) -> Any: self.recaptcha_attempts.append((authdict, clientip)) return succeed(True) @@ -50,7 +54,7 @@ class FallbackAuthTests(unittest.HomeserverTestCase): ] hijack_auth = False - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() @@ -61,7 +65,7 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(config=config) return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.recaptcha_checker = DummyRecaptchaChecker(hs) auth_handler = hs.get_auth_handler() auth_handler.checkers[LoginType.RECAPTCHA] = self.recaptcha_checker @@ -101,7 +105,7 @@ def recaptcha( self.assertEqual(len(attempts), 1) self.assertEqual(attempts[0][0]["response"], "a") - def test_fallback_captcha(self): + def test_fallback_captcha(self) -> None: """Ensure that fallback auth via a captcha works.""" # Returns a 401 as per the spec channel = self.register( @@ -132,7 +136,7 @@ def test_fallback_captcha(self): # We're given a registered user. self.assertEqual(channel.json_body["user_id"], "@user:test") - def test_complete_operation_unknown_session(self): + def test_complete_operation_unknown_session(self) -> None: """ Attempting to mark an invalid session as complete should error. """ @@ -165,7 +169,7 @@ class UIAuthTests(unittest.HomeserverTestCase): register.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # public_baseurl uses an http:// scheme because FakeChannel.isSecure() returns @@ -182,12 +186,12 @@ def default_config(self): return config - def create_resource_dict(self): + def create_resource_dict(self) -> Dict[str, Resource]: resource_dict = super().create_resource_dict() resource_dict.update(build_synapse_client_resource_tree(self.hs)) return resource_dict - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_pass = "pass" self.user = self.register_user("test", self.user_pass) self.device_id = "dev1" @@ -229,7 +233,7 @@ def delete_devices(self, expected_response: int, body: JsonDict) -> FakeChannel: return channel - def test_ui_auth(self): + def test_ui_auth(self) -> None: """ Test user interactive authentication outside of registration. """ @@ -259,7 +263,7 @@ def test_ui_auth(self): }, ) - def test_grandfathered_identifier(self): + def test_grandfathered_identifier(self) -> None: """Check behaviour without "identifier" dict Synapse used to require clients to submit a "user" field for m.login.password @@ -286,7 +290,7 @@ def test_grandfathered_identifier(self): }, ) - def test_can_change_body(self): + def test_can_change_body(self) -> None: """ The client dict can be modified during the user interactive authentication session. @@ -325,7 +329,7 @@ def test_can_change_body(self): }, ) - def test_cannot_change_uri(self): + def test_cannot_change_uri(self) -> None: """ The initial requested URI cannot be modified during the user interactive authentication session. """ @@ -362,7 +366,7 @@ def test_cannot_change_uri(self): ) @unittest.override_config({"ui_auth": {"session_timeout": "5s"}}) - def test_can_reuse_session(self): + def test_can_reuse_session(self) -> None: """ The session can be reused if configured. @@ -409,7 +413,7 @@ def test_can_reuse_session(self): @skip_unless(HAS_OIDC, "requires OIDC") @override_config({"oidc_config": TEST_OIDC_CONFIG}) - def test_ui_auth_via_sso(self): + def test_ui_auth_via_sso(self) -> None: """Test a successful UI Auth flow via SSO This includes: @@ -452,7 +456,7 @@ def test_ui_auth_via_sso(self): @skip_unless(HAS_OIDC, "requires OIDC") @override_config({"oidc_config": TEST_OIDC_CONFIG}) - def test_does_not_offer_password_for_sso_user(self): + def test_does_not_offer_password_for_sso_user(self) -> None: login_resp = self.helper.login_via_oidc("username") user_tok = login_resp["access_token"] device_id = login_resp["device_id"] @@ -464,7 +468,7 @@ def test_does_not_offer_password_for_sso_user(self): flows = channel.json_body["flows"] self.assertEqual(flows, [{"stages": ["m.login.sso"]}]) - def test_does_not_offer_sso_for_password_user(self): + def test_does_not_offer_sso_for_password_user(self) -> None: channel = self.delete_device( self.user_tok, self.device_id, HTTPStatus.UNAUTHORIZED ) @@ -474,7 +478,7 @@ def test_does_not_offer_sso_for_password_user(self): @skip_unless(HAS_OIDC, "requires OIDC") @override_config({"oidc_config": TEST_OIDC_CONFIG}) - def test_offers_both_flows_for_upgraded_user(self): + def test_offers_both_flows_for_upgraded_user(self) -> None: """A user that had a password and then logged in with SSO should get both flows""" login_resp = self.helper.login_via_oidc(UserID.from_string(self.user).localpart) self.assertEqual(login_resp["user_id"], self.user) @@ -491,7 +495,7 @@ def test_offers_both_flows_for_upgraded_user(self): @skip_unless(HAS_OIDC, "requires OIDC") @override_config({"oidc_config": TEST_OIDC_CONFIG}) - def test_ui_auth_fails_for_incorrect_sso_user(self): + def test_ui_auth_fails_for_incorrect_sso_user(self) -> None: """If the user tries to authenticate with the wrong SSO user, they get an error""" # log the user in login_resp = self.helper.login_via_oidc(UserID.from_string(self.user).localpart) @@ -534,7 +538,7 @@ class RefreshAuthTests(unittest.HomeserverTestCase): ] hijack_auth = False - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_pass = "pass" self.user = self.register_user("test", self.user_pass) @@ -548,7 +552,7 @@ def use_refresh_token(self, refresh_token: str) -> FakeChannel: {"refresh_token": refresh_token}, ) - def is_access_token_valid(self, access_token) -> bool: + def is_access_token_valid(self, access_token: str) -> bool: """ Checks whether an access token is valid, returning whether it is or not. """ @@ -561,7 +565,7 @@ def is_access_token_valid(self, access_token) -> bool: return code == HTTPStatus.OK - def test_login_issue_refresh_token(self): + def test_login_issue_refresh_token(self) -> None: """ A login response should include a refresh_token only if asked. """ @@ -591,7 +595,7 @@ def test_login_issue_refresh_token(self): self.assertIn("refresh_token", login_with_refresh.json_body) self.assertIn("expires_in_ms", login_with_refresh.json_body) - def test_register_issue_refresh_token(self): + def test_register_issue_refresh_token(self) -> None: """ A register response should include a refresh_token only if asked. """ @@ -627,7 +631,7 @@ def test_register_issue_refresh_token(self): self.assertIn("refresh_token", register_with_refresh.json_body) self.assertIn("expires_in_ms", register_with_refresh.json_body) - def test_token_refresh(self): + def test_token_refresh(self) -> None: """ A refresh token can be used to issue a new access token. """ @@ -665,7 +669,7 @@ def test_token_refresh(self): ) @override_config({"refreshable_access_token_lifetime": "1m"}) - def test_refreshable_access_token_expiration(self): + def test_refreshable_access_token_expiration(self) -> None: """ The access token should have some time as specified in the config. """ @@ -722,7 +726,9 @@ def test_refreshable_access_token_expiration(self): "nonrefreshable_access_token_lifetime": "10m", } ) - def test_different_expiry_for_refreshable_and_nonrefreshable_access_tokens(self): + def test_different_expiry_for_refreshable_and_nonrefreshable_access_tokens( + self, + ) -> None: """ Tests that the expiry times for refreshable and non-refreshable access tokens can be different. @@ -782,7 +788,7 @@ def test_different_expiry_for_refreshable_and_nonrefreshable_access_tokens(self) @override_config( {"refreshable_access_token_lifetime": "1m", "refresh_token_lifetime": "2m"} ) - def test_refresh_token_expiry(self): + def test_refresh_token_expiry(self) -> None: """ The refresh token can be configured to have a limited lifetime. When that lifetime has ended, the refresh token can no longer be used to @@ -834,7 +840,7 @@ def test_refresh_token_expiry(self): "session_lifetime": "3m", } ) - def test_ultimate_session_expiry(self): + def test_ultimate_session_expiry(self) -> None: """ The session can be configured to have an ultimate, limited lifetime. """ @@ -882,7 +888,7 @@ def test_ultimate_session_expiry(self): refresh_response.code, HTTPStatus.FORBIDDEN, refresh_response.result ) - def test_refresh_token_invalidation(self): + def test_refresh_token_invalidation(self) -> None: """Refresh tokens are invalidated after first use of the next token. A refresh token is considered invalid if: @@ -987,7 +993,7 @@ def test_refresh_token_invalidation(self): fifth_refresh_response.code, HTTPStatus.OK, fifth_refresh_response.result ) - def test_many_token_refresh(self): + def test_many_token_refresh(self) -> None: """ If a refresh is performed many times during a session, there shouldn't be extra 'cruft' built up over time. diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index 989e80176820..d1751e1557f7 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -13,9 +13,13 @@ # limitations under the License. from http import HTTPStatus +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.rest.client import capabilities, login +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.unittest import override_config @@ -29,24 +33,24 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.url = b"/capabilities" hs = self.setup_test_homeserver() self.config = hs.config self.auth_handler = hs.get_auth_handler() return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.localpart = "user" self.password = "pass" self.user = self.register_user(self.localpart, self.password) - def test_check_auth_required(self): + def test_check_auth_required(self) -> None: channel = self.make_request("GET", self.url) self.assertEqual(channel.code, 401) - def test_get_room_version_capabilities(self): + def test_get_room_version_capabilities(self) -> None: access_token = self.login(self.localpart, self.password) channel = self.make_request("GET", self.url, access_token=access_token) @@ -61,7 +65,7 @@ def test_get_room_version_capabilities(self): capabilities["m.room_versions"]["default"], ) - def test_get_change_password_capabilities_password_login(self): + def test_get_change_password_capabilities_password_login(self) -> None: access_token = self.login(self.localpart, self.password) channel = self.make_request("GET", self.url, access_token=access_token) @@ -71,7 +75,7 @@ def test_get_change_password_capabilities_password_login(self): self.assertTrue(capabilities["m.change_password"]["enabled"]) @override_config({"password_config": {"localdb_enabled": False}}) - def test_get_change_password_capabilities_localdb_disabled(self): + def test_get_change_password_capabilities_localdb_disabled(self) -> None: access_token = self.get_success( self.auth_handler.create_access_token_for_user_id( self.user, device_id=None, valid_until_ms=None @@ -85,7 +89,7 @@ def test_get_change_password_capabilities_localdb_disabled(self): self.assertFalse(capabilities["m.change_password"]["enabled"]) @override_config({"password_config": {"enabled": False}}) - def test_get_change_password_capabilities_password_disabled(self): + def test_get_change_password_capabilities_password_disabled(self) -> None: access_token = self.get_success( self.auth_handler.create_access_token_for_user_id( self.user, device_id=None, valid_until_ms=None @@ -98,7 +102,7 @@ def test_get_change_password_capabilities_password_disabled(self): self.assertEqual(channel.code, 200) self.assertFalse(capabilities["m.change_password"]["enabled"]) - def test_get_change_users_attributes_capabilities(self): + def test_get_change_users_attributes_capabilities(self) -> None: """Test that server returns capabilities by default.""" access_token = self.login(self.localpart, self.password) @@ -112,7 +116,7 @@ def test_get_change_users_attributes_capabilities(self): self.assertTrue(capabilities["m.3pid_changes"]["enabled"]) @override_config({"enable_set_displayname": False}) - def test_get_set_displayname_capabilities_displayname_disabled(self): + def test_get_set_displayname_capabilities_displayname_disabled(self) -> None: """Test if set displayname is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) @@ -123,7 +127,7 @@ def test_get_set_displayname_capabilities_displayname_disabled(self): self.assertFalse(capabilities["m.set_displayname"]["enabled"]) @override_config({"enable_set_avatar_url": False}) - def test_get_set_avatar_url_capabilities_avatar_url_disabled(self): + def test_get_set_avatar_url_capabilities_avatar_url_disabled(self) -> None: """Test if set avatar_url is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) @@ -134,7 +138,7 @@ def test_get_set_avatar_url_capabilities_avatar_url_disabled(self): self.assertFalse(capabilities["m.set_avatar_url"]["enabled"]) @override_config({"enable_3pid_changes": False}) - def test_get_change_3pid_capabilities_3pid_disabled(self): + def test_get_change_3pid_capabilities_3pid_disabled(self) -> None: """Test if change 3pid is disabled that the server responds it.""" access_token = self.login(self.localpart, self.password) @@ -145,7 +149,7 @@ def test_get_change_3pid_capabilities_3pid_disabled(self): self.assertFalse(capabilities["m.3pid_changes"]["enabled"]) @override_config({"experimental_features": {"msc3244_enabled": False}}) - def test_get_does_not_include_msc3244_fields_when_disabled(self): + def test_get_does_not_include_msc3244_fields_when_disabled(self) -> None: access_token = self.get_success( self.auth_handler.create_access_token_for_user_id( self.user, device_id=None, valid_until_ms=None @@ -160,7 +164,7 @@ def test_get_does_not_include_msc3244_fields_when_disabled(self): "org.matrix.msc3244.room_capabilities", capabilities["m.room_versions"] ) - def test_get_does_include_msc3244_fields_when_enabled(self): + def test_get_does_include_msc3244_fields_when_enabled(self) -> None: access_token = self.get_success( self.auth_handler.create_access_token_for_user_id( self.user, device_id=None, valid_until_ms=None diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 26d0d83e00f2..d48defda6386 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -20,6 +20,7 @@ import pymacaroons +from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource import synapse.rest.admin @@ -27,12 +28,15 @@ from synapse.rest.client import devices, login, logout, register from synapse.rest.client.account import WhoamiRestServlet from synapse.rest.synapse.client import build_synapse_client_resource_tree +from synapse.server import HomeServer from synapse.types import create_requester +from synapse.util import Clock from tests import unittest from tests.handlers.test_oidc import HAS_OIDC from tests.handlers.test_saml import has_saml2 from tests.rest.client.utils import TEST_OIDC_AUTH_ENDPOINT, TEST_OIDC_CONFIG +from tests.server import FakeChannel from tests.test_utils.html_parsers import TestHtmlParser from tests.unittest import HomeserverTestCase, override_config, skip_unless @@ -95,7 +99,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): lambda hs, http_server: WhoamiRestServlet(hs).register(http_server), ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() self.hs.config.registration.enable_registration = True self.hs.config.registration.registrations_require_3pid = [] @@ -117,7 +121,7 @@ def make_homeserver(self, reactor, clock): } } ) - def test_POST_ratelimiting_per_address(self): + def test_POST_ratelimiting_per_address(self) -> None: # Create different users so we're sure not to be bothered by the per-user # ratelimiter. for i in range(0, 6): @@ -165,7 +169,7 @@ def test_POST_ratelimiting_per_address(self): } } ) - def test_POST_ratelimiting_per_account(self): + def test_POST_ratelimiting_per_account(self) -> None: self.register_user("kermit", "monkey") for i in range(0, 6): @@ -210,7 +214,7 @@ def test_POST_ratelimiting_per_account(self): } } ) - def test_POST_ratelimiting_per_account_failed_attempts(self): + def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: self.register_user("kermit", "monkey") for i in range(0, 6): @@ -243,7 +247,7 @@ def test_POST_ratelimiting_per_account_failed_attempts(self): self.assertEquals(channel.result["code"], b"403", channel.result) @override_config({"session_lifetime": "24h"}) - def test_soft_logout(self): + def test_soft_logout(self) -> None: self.register_user("kermit", "monkey") # we shouldn't be able to make requests without an access token @@ -298,7 +302,9 @@ def test_soft_logout(self): self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEquals(channel.json_body["soft_logout"], False) - def _delete_device(self, access_token, user_id, password, device_id): + def _delete_device( + self, access_token: str, user_id: str, password: str, device_id: str + ) -> None: """Perform the UI-Auth to delete a device""" channel = self.make_request( b"DELETE", "devices/" + device_id, access_token=access_token @@ -329,7 +335,7 @@ def _delete_device(self, access_token, user_id, password, device_id): self.assertEquals(channel.code, 200, channel.result) @override_config({"session_lifetime": "24h"}) - def test_session_can_hard_logout_after_being_soft_logged_out(self): + def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: self.register_user("kermit", "monkey") # log in as normal @@ -353,7 +359,9 @@ def test_session_can_hard_logout_after_being_soft_logged_out(self): self.assertEquals(channel.result["code"], b"200", channel.result) @override_config({"session_lifetime": "24h"}) - def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out(self): + def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out( + self, + ) -> None: self.register_user("kermit", "monkey") # log in as normal @@ -432,7 +440,7 @@ def create_resource_dict(self) -> Dict[str, Resource]: d.update(build_synapse_client_resource_tree(self.hs)) return d - def test_get_login_flows(self): + def test_get_login_flows(self) -> None: """GET /login should return password and SSO flows""" channel = self.make_request("GET", "/_matrix/client/r0/login") self.assertEqual(channel.code, 200, channel.result) @@ -459,12 +467,14 @@ def test_get_login_flows(self): ], ) - def test_multi_sso_redirect(self): + def test_multi_sso_redirect(self) -> None: """/login/sso/redirect should redirect to an identity picker""" # first hit the redirect url, which should redirect to our idp picker channel = self._make_sso_redirect_request(None) self.assertEqual(channel.code, 302, channel.result) - uri = channel.headers.getRawHeaders("Location")[0] + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + uri = location_headers[0] # hitting that picker should give us some HTML channel = self.make_request("GET", uri) @@ -487,7 +497,7 @@ def test_multi_sso_redirect(self): self.assertCountEqual(returned_idps, ["cas", "oidc", "oidc-idp1", "saml"]) - def test_multi_sso_redirect_to_cas(self): + def test_multi_sso_redirect_to_cas(self) -> None: """If CAS is chosen, should redirect to the CAS server""" channel = self.make_request( @@ -514,7 +524,7 @@ def test_multi_sso_redirect_to_cas(self): service_uri_params = urllib.parse.parse_qs(service_uri_query) self.assertEqual(service_uri_params["redirectUrl"][0], TEST_CLIENT_REDIRECT_URL) - def test_multi_sso_redirect_to_saml(self): + def test_multi_sso_redirect_to_saml(self) -> None: """If SAML is chosen, should redirect to the SAML server""" channel = self.make_request( "GET", @@ -536,7 +546,7 @@ def test_multi_sso_redirect_to_saml(self): relay_state_param = saml_uri_params["RelayState"][0] self.assertEqual(relay_state_param, TEST_CLIENT_REDIRECT_URL) - def test_login_via_oidc(self): + def test_login_via_oidc(self) -> None: """If OIDC is chosen, should redirect to the OIDC auth endpoint""" # pick the default OIDC provider @@ -604,7 +614,7 @@ def test_login_via_oidc(self): self.assertEqual(chan.code, 200, chan.result) self.assertEqual(chan.json_body["user_id"], "@user1:test") - def test_multi_sso_redirect_to_unknown(self): + def test_multi_sso_redirect_to_unknown(self) -> None: """An unknown IdP should cause a 400""" channel = self.make_request( "GET", @@ -612,23 +622,25 @@ def test_multi_sso_redirect_to_unknown(self): ) self.assertEqual(channel.code, 400, channel.result) - def test_client_idp_redirect_to_unknown(self): + def test_client_idp_redirect_to_unknown(self) -> None: """If the client tries to pick an unknown IdP, return a 404""" channel = self._make_sso_redirect_request("xxx") self.assertEqual(channel.code, 404, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") - def test_client_idp_redirect_to_oidc(self): + def test_client_idp_redirect_to_oidc(self) -> None: """If the client pick a known IdP, redirect to it""" channel = self._make_sso_redirect_request("oidc") self.assertEqual(channel.code, 302, channel.result) - oidc_uri = channel.headers.getRawHeaders("Location")[0] + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + oidc_uri = location_headers[0] oidc_uri_path, oidc_uri_query = oidc_uri.split("?", 1) # it should redirect us to the auth page of the OIDC server self.assertEqual(oidc_uri_path, TEST_OIDC_AUTH_ENDPOINT) - def _make_sso_redirect_request(self, idp_prov: Optional[str] = None): + def _make_sso_redirect_request(self, idp_prov: Optional[str] = None) -> FakeChannel: """Send a request to /_matrix/client/r0/login/sso/redirect ... possibly specifying an IDP provider @@ -659,7 +671,7 @@ class CASTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.base_url = "https://matrix.goodserver.com/" self.redirect_path = "_synapse/client/login/sso/redirect/confirm" @@ -675,7 +687,7 @@ def make_homeserver(self, reactor, clock): cas_user_id = "username" self.user_id = "@%s:test" % cas_user_id - async def get_raw(uri, args): + async def get_raw(uri: str, args: Any) -> bytes: """Return an example response payload from a call to the `/proxyValidate` endpoint of a CAS server, copied from https://apereo.github.io/cas/5.0.x/protocol/CAS-Protocol-V2-Specification.html#26-proxyvalidate-cas-20 @@ -709,10 +721,10 @@ async def get_raw(uri, args): return self.hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.deactivate_account_handler = hs.get_deactivate_account_handler() - def test_cas_redirect_confirm(self): + def test_cas_redirect_confirm(self) -> None: """Tests that the SSO login flow serves a confirmation page before redirecting a user to the redirect URL. """ @@ -754,15 +766,15 @@ def test_cas_redirect_confirm(self): } } ) - def test_cas_redirect_whitelisted(self): + def test_cas_redirect_whitelisted(self) -> None: """Tests that the SSO login flow serves a redirect to a whitelisted url""" self._test_redirect("https://legit-site.com/") @override_config({"public_baseurl": "https://example.com"}) - def test_cas_redirect_login_fallback(self): + def test_cas_redirect_login_fallback(self) -> None: self._test_redirect("https://example.com/_matrix/static/client/login") - def _test_redirect(self, redirect_url): + def _test_redirect(self, redirect_url: str) -> None: """Tests that the SSO login flow serves a redirect for the given redirect URL.""" cas_ticket_url = ( "/_matrix/client/r0/login/cas/ticket?redirectUrl=%s&ticket=ticket" @@ -778,7 +790,7 @@ def _test_redirect(self, redirect_url): self.assertEqual(location_headers[0][: len(redirect_url)], redirect_url) @override_config({"sso": {"client_whitelist": ["https://legit-site.com/"]}}) - def test_deactivated_user(self): + def test_deactivated_user(self) -> None: """Logging in as a deactivated account should error.""" redirect_url = "https://legit-site.com/" @@ -821,7 +833,7 @@ class JWTTestCase(unittest.HomeserverTestCase): "algorithm": jwt_algorithm, } - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # If jwt_config has been defined (eg via @override_config), don't replace it. @@ -837,23 +849,23 @@ def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str: return result.decode("ascii") return result - def jwt_login(self, *args): + def jwt_login(self, *args: Any) -> FakeChannel: params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} channel = self.make_request(b"POST", LOGIN_URL, params) return channel - def test_login_jwt_valid_registered(self): + def test_login_jwt_valid_registered(self) -> None: self.register_user("kermit", "monkey") channel = self.jwt_login({"sub": "kermit"}) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") - def test_login_jwt_valid_unregistered(self): + def test_login_jwt_valid_unregistered(self) -> None: channel = self.jwt_login({"sub": "frog"}) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") - def test_login_jwt_invalid_signature(self): + def test_login_jwt_invalid_signature(self) -> None: channel = self.jwt_login({"sub": "frog"}, "notsecret") self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -862,7 +874,7 @@ def test_login_jwt_invalid_signature(self): "JWT validation failed: Signature verification failed", ) - def test_login_jwt_expired(self): + def test_login_jwt_expired(self) -> None: channel = self.jwt_login({"sub": "frog", "exp": 864000}) self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -870,7 +882,7 @@ def test_login_jwt_expired(self): channel.json_body["error"], "JWT validation failed: Signature has expired" ) - def test_login_jwt_not_before(self): + def test_login_jwt_not_before(self) -> None: now = int(time.time()) channel = self.jwt_login({"sub": "frog", "nbf": now + 3600}) self.assertEqual(channel.result["code"], b"403", channel.result) @@ -880,14 +892,14 @@ def test_login_jwt_not_before(self): "JWT validation failed: The token is not yet valid (nbf)", ) - def test_login_no_sub(self): + def test_login_no_sub(self) -> None: channel = self.jwt_login({"username": "root"}) self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual(channel.json_body["error"], "Invalid JWT") @override_config({"jwt_config": {**base_config, "issuer": "test-issuer"}}) - def test_login_iss(self): + def test_login_iss(self) -> None: """Test validating the issuer claim.""" # A valid issuer. channel = self.jwt_login({"sub": "kermit", "iss": "test-issuer"}) @@ -911,14 +923,14 @@ def test_login_iss(self): 'JWT validation failed: Token is missing the "iss" claim', ) - def test_login_iss_no_config(self): + def test_login_iss_no_config(self) -> None: """Test providing an issuer claim without requiring it in the configuration.""" channel = self.jwt_login({"sub": "kermit", "iss": "invalid"}) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") @override_config({"jwt_config": {**base_config, "audiences": ["test-audience"]}}) - def test_login_aud(self): + def test_login_aud(self) -> None: """Test validating the audience claim.""" # A valid audience. channel = self.jwt_login({"sub": "kermit", "aud": "test-audience"}) @@ -942,7 +954,7 @@ def test_login_aud(self): 'JWT validation failed: Token is missing the "aud" claim', ) - def test_login_aud_no_config(self): + def test_login_aud_no_config(self) -> None: """Test providing an audience without requiring it in the configuration.""" channel = self.jwt_login({"sub": "kermit", "aud": "invalid"}) self.assertEqual(channel.result["code"], b"403", channel.result) @@ -951,20 +963,20 @@ def test_login_aud_no_config(self): channel.json_body["error"], "JWT validation failed: Invalid audience" ) - def test_login_default_sub(self): + def test_login_default_sub(self) -> None: """Test reading user ID from the default subject claim.""" channel = self.jwt_login({"sub": "kermit"}) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") @override_config({"jwt_config": {**base_config, "subject_claim": "username"}}) - def test_login_custom_sub(self): + def test_login_custom_sub(self) -> None: """Test reading user ID from a custom subject claim.""" channel = self.jwt_login({"username": "frog"}) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") - def test_login_no_token(self): + def test_login_no_token(self) -> None: params = {"type": "org.matrix.login.jwt"} channel = self.make_request(b"POST", LOGIN_URL, params) self.assertEqual(channel.result["code"], b"403", channel.result) @@ -1026,7 +1038,7 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): ] ) - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["jwt_config"] = { "enabled": True, @@ -1042,17 +1054,17 @@ def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> s return result.decode("ascii") return result - def jwt_login(self, *args): + def jwt_login(self, *args: Any) -> FakeChannel: params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} channel = self.make_request(b"POST", LOGIN_URL, params) return channel - def test_login_jwt_valid(self): + def test_login_jwt_valid(self) -> None: channel = self.jwt_login({"sub": "kermit"}) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["user_id"], "@kermit:test") - def test_login_jwt_invalid_signature(self): + def test_login_jwt_invalid_signature(self) -> None: channel = self.jwt_login({"sub": "frog"}, self.bad_privatekey) self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -1071,7 +1083,7 @@ class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): register.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() self.service = ApplicationService( @@ -1105,7 +1117,7 @@ def make_homeserver(self, reactor, clock): self.hs.get_datastores().main.services_cache.append(self.another_service) return self.hs - def test_login_appservice_user(self): + def test_login_appservice_user(self) -> None: """Test that an appservice user can use /login""" self.register_appservice_user(AS_USER, self.service.token) @@ -1119,7 +1131,7 @@ def test_login_appservice_user(self): self.assertEquals(channel.result["code"], b"200", channel.result) - def test_login_appservice_user_bot(self): + def test_login_appservice_user_bot(self) -> None: """Test that the appservice bot can use /login""" self.register_appservice_user(AS_USER, self.service.token) @@ -1133,7 +1145,7 @@ def test_login_appservice_user_bot(self): self.assertEquals(channel.result["code"], b"200", channel.result) - def test_login_appservice_wrong_user(self): + def test_login_appservice_wrong_user(self) -> None: """Test that non-as users cannot login with the as token""" self.register_appservice_user(AS_USER, self.service.token) @@ -1147,7 +1159,7 @@ def test_login_appservice_wrong_user(self): self.assertEquals(channel.result["code"], b"403", channel.result) - def test_login_appservice_wrong_as(self): + def test_login_appservice_wrong_as(self) -> None: """Test that as users cannot login with wrong as token""" self.register_appservice_user(AS_USER, self.service.token) @@ -1161,7 +1173,7 @@ def test_login_appservice_wrong_as(self): self.assertEquals(channel.result["code"], b"403", channel.result) - def test_login_appservice_no_token(self): + def test_login_appservice_no_token(self) -> None: """Test that users must provide a token when using the appservice login method """ @@ -1182,7 +1194,7 @@ class UsernamePickerTestCase(HomeserverTestCase): servlets = [login.register_servlets] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL @@ -1202,7 +1214,7 @@ def create_resource_dict(self) -> Dict[str, Resource]: d.update(build_synapse_client_resource_tree(self.hs)) return d - def test_username_picker(self): + def test_username_picker(self) -> None: """Test the happy path of a username picker flow.""" # do the start of the login flow diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index e06256136548..69b4ef537843 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -13,9 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import json +from typing import List, Optional from parameterized import parameterized +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import ( EventContentFields, @@ -24,6 +27,9 @@ RelationTypes, ) from synapse.rest.client import devices, knock, login, read_marker, receipts, room, sync +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.federation.transport.test_knocking import ( @@ -43,7 +49,7 @@ class FilterTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] - def test_sync_argless(self): + def test_sync_argless(self) -> None: channel = self.make_request("GET", "/sync") self.assertEqual(channel.code, 200) @@ -58,7 +64,7 @@ class SyncFilterTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] - def test_sync_filter_labels(self): + def test_sync_filter_labels(self) -> None: """Test that we can filter by a label.""" sync_filter = json.dumps( { @@ -77,7 +83,7 @@ def test_sync_filter_labels(self): self.assertEqual(events[0]["content"]["body"], "with right label", events[0]) self.assertEqual(events[1]["content"]["body"], "with right label", events[1]) - def test_sync_filter_not_labels(self): + def test_sync_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label.""" sync_filter = json.dumps( { @@ -99,7 +105,7 @@ def test_sync_filter_not_labels(self): events[2]["content"]["body"], "with two wrong labels", events[2] ) - def test_sync_filter_labels_not_labels(self): + def test_sync_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label.""" sync_filter = json.dumps( { @@ -118,7 +124,7 @@ def test_sync_filter_labels_not_labels(self): self.assertEqual(len(events), 1, [event["content"] for event in events]) self.assertEqual(events[0]["content"]["body"], "with wrong label", events[0]) - def _test_sync_filter_labels(self, sync_filter): + def _test_sync_filter_labels(self, sync_filter: str) -> List[JsonDict]: user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") @@ -194,7 +200,7 @@ class SyncTypingTests(unittest.HomeserverTestCase): user_id = True hijack_auth = False - def test_sync_backwards_typing(self): + def test_sync_backwards_typing(self) -> None: """ If the typing serial goes backwards and the typing handler is then reset (such as when the master restarts and sets the typing serial to 0), we @@ -298,7 +304,7 @@ class SyncKnockTestCase( knock.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.url = "/sync?since=%s" self.next_batch = "s0" @@ -336,7 +342,7 @@ def prepare(self, reactor, clock, hs): ) @override_config({"experimental_features": {"msc2403_enabled": True}}) - def test_knock_room_state(self): + def test_knock_room_state(self) -> None: """Tests that /sync returns state from a room after knocking on it.""" # Knock on a room channel = self.make_request( @@ -383,7 +389,7 @@ class ReadReceiptsTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.url = "/sync?since=%s" self.next_batch = "s0" @@ -402,7 +408,7 @@ def prepare(self, reactor, clock, hs): self.helper.join(room=self.room_id, user=self.user2, tok=self.tok2) @override_config({"experimental_features": {"msc2285_enabled": True}}) - def test_hidden_read_receipts(self): + def test_hidden_read_receipts(self) -> None: # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) @@ -441,8 +447,8 @@ def test_hidden_read_receipts(self): ] ) def test_read_receipt_with_empty_body( - self, name, user_agent: str, expected_status_code: int - ): + self, name: str, user_agent: str, expected_status_code: int + ) -> None: # Send a message as the first user res = self.helper.send(self.room_id, body="hello", tok=self.tok) @@ -455,11 +461,11 @@ def test_read_receipt_with_empty_body( ) self.assertEqual(channel.code, expected_status_code) - def _get_read_receipt(self): + def _get_read_receipt(self) -> Optional[JsonDict]: """Syncs and returns the read receipt.""" # Checks if event is a read receipt - def is_read_receipt(event): + def is_read_receipt(event: JsonDict) -> bool: return event["type"] == "m.receipt" # Sync @@ -477,7 +483,8 @@ def is_read_receipt(event): ephemeral_events = channel.json_body["rooms"]["join"][self.room_id][ "ephemeral" ]["events"] - return next(filter(is_read_receipt, ephemeral_events), None) + receipt_event = filter(is_read_receipt, ephemeral_events) + return next(receipt_event, None) class UnreadMessagesTestCase(unittest.HomeserverTestCase): @@ -490,7 +497,7 @@ class UnreadMessagesTestCase(unittest.HomeserverTestCase): receipts.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.url = "/sync?since=%s" self.next_batch = "s0" @@ -533,7 +540,7 @@ def prepare(self, reactor, clock, hs): tok=self.tok, ) - def test_unread_counts(self): + def test_unread_counts(self) -> None: """Tests that /sync returns the right value for the unread count (MSC2654).""" # Check that our own messages don't increase the unread count. @@ -640,7 +647,7 @@ def test_unread_counts(self): ) self._check_unread_count(5) - def _check_unread_count(self, expected_count: int): + def _check_unread_count(self, expected_count: int) -> None: """Syncs and compares the unread count with the expected value.""" channel = self.make_request( @@ -669,7 +676,7 @@ class SyncCacheTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] - def test_noop_sync_does_not_tightloop(self): + def test_noop_sync_does_not_tightloop(self) -> None: """If the sync times out, we shouldn't cache the result Essentially a regression test for #8518. @@ -720,7 +727,7 @@ class DeviceListSyncTestCase(unittest.HomeserverTestCase): devices.register_servlets, ] - def test_user_with_no_rooms_receives_self_device_list_updates(self): + def test_user_with_no_rooms_receives_self_device_list_updates(self) -> None: """Tests that a user with no rooms still receives their own device list updates""" device_id = "TESTDEVICE" From a711ae78a8f8ba406ff122035c8bf096fac9a26c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Feb 2022 14:22:22 +0000 Subject: [PATCH 296/474] Add logging to `/sync` for debugging #11916 (#12068) --- changelog.d/12068.misc | 1 + synapse/handlers/sync.py | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelog.d/12068.misc diff --git a/changelog.d/12068.misc b/changelog.d/12068.misc new file mode 100644 index 000000000000..72b211e4f55f --- /dev/null +++ b/changelog.d/12068.misc @@ -0,0 +1 @@ +Add some logging to `/sync` to try and track down #11916. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 98eaad33184e..0aa3052fd6ac 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -697,6 +697,15 @@ async def get_state_at( else: # no events in this room - so presumably no state state = {} + + # (erikj) This should be rarely hit, but we've had some reports that + # we get more state down gappy syncs than we should, so let's add + # some logging. + logger.info( + "Failed to find any events in room %s at %s", + room_id, + stream_position.room_key, + ) return state async def compute_summary( From c56bfb08bc071368db23f3b1c593724eb4f205f0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 23 Feb 2022 17:49:04 -0500 Subject: [PATCH 297/474] Add documentation for missing worker types. (#11599) And clean-up the endpoints which should be routed to workers. --- changelog.d/11599.doc | 1 + docs/workers.md | 90 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 77 insertions(+), 14 deletions(-) create mode 100644 changelog.d/11599.doc diff --git a/changelog.d/11599.doc b/changelog.d/11599.doc new file mode 100644 index 000000000000..f07cfbef4e4c --- /dev/null +++ b/changelog.d/11599.doc @@ -0,0 +1 @@ +Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. diff --git a/docs/workers.md b/docs/workers.md index dadde4d7265d..b82a6900acf4 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -178,8 +178,11 @@ recommend the use of `systemd` where available: for information on setting up ### `synapse.app.generic_worker` -This worker can handle API requests matching the following regular -expressions: +This worker can handle API requests matching the following regular expressions. +These endpoints can be routed to any worker. If a worker is set up to handle a +stream then, for maximum efficiency, additional endpoints should be routed to that +worker: refer to the [stream writers](#stream-writers) section below for further +information. # Sync requests ^/_matrix/client/(v2_alpha|r0|v3)/sync$ @@ -225,19 +228,23 @@ expressions: ^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/spaces$ ^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$ ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/account/3pid$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/devices$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/query$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/changes$ + ^/_matrix/client/(r0|v3|unstable)/account/3pid$ + ^/_matrix/client/(r0|v3|unstable)/devices$ ^/_matrix/client/versions$ ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_groups$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/publicised_groups$ - ^/_matrix/client/(api/v1|r0|v3|unstable)/publicised_groups/ + ^/_matrix/client/(r0|v3|unstable)/joined_groups$ + ^/_matrix/client/(r0|v3|unstable)/publicised_groups$ + ^/_matrix/client/(r0|v3|unstable)/publicised_groups/ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ + # Encryption requests + ^/_matrix/client/(r0|v3|unstable)/keys/query$ + ^/_matrix/client/(r0|v3|unstable)/keys/changes$ + ^/_matrix/client/(r0|v3|unstable)/keys/claim$ + ^/_matrix/client/(r0|v3|unstable)/room_keys/ + # Registration/login requests ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ ^/_matrix/client/(r0|v3|unstable)/register$ @@ -251,6 +258,20 @@ expressions: ^/_matrix/client/(api/v1|r0|v3|unstable)/join/ ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ + # Device requests + ^/_matrix/client/(r0|v3|unstable)/sendToDevice/ + + # Account data requests + ^/_matrix/client/(r0|v3|unstable)/.*/tags + ^/_matrix/client/(r0|v3|unstable)/.*/account_data + + # Receipts requests + ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt + ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers + + # Presence requests + ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ + Additionally, the following REST endpoints can be handled for GET requests: @@ -330,12 +351,10 @@ Additionally, there is *experimental* support for moving writing of specific streams (such as events) off of the main process to a particular worker. (This is only supported with Redis-based replication.) -Currently supported streams are `events` and `typing`. - To enable this, the worker must have a HTTP replication listener configured, -have a `worker_name` and be listed in the `instance_map` config. For example to -move event persistence off to a dedicated worker, the shared configuration would -include: +have a `worker_name` and be listed in the `instance_map` config. The same worker +can handle multiple streams. For example, to move event persistence off to a +dedicated worker, the shared configuration would include: ```yaml instance_map: @@ -347,6 +366,12 @@ stream_writers: events: event_persister1 ``` +Some of the streams have associated endpoints which, for maximum efficiency, should +be routed to the workers handling that stream. See below for the currently supported +streams and the endpoints associated with them: + +##### The `events` stream + The `events` stream also experimentally supports having multiple writers, where work is sharded between them by room ID. Note that you *must* restart all worker instances when adding or removing event persisters. An example `stream_writers` @@ -359,6 +384,43 @@ stream_writers: - event_persister2 ``` +##### The `typing` stream + +The following endpoints should be routed directly to the workers configured as +stream writers for the `typing` stream: + + ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing + +##### The `to_device` stream + +The following endpoints should be routed directly to the workers configured as +stream writers for the `to_device` stream: + + ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/ + +##### The `account_data` stream + +The following endpoints should be routed directly to the workers configured as +stream writers for the `account_data` stream: + + ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags + ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data + +##### The `receipts` stream + +The following endpoints should be routed directly to the workers configured as +stream writers for the `receipts` stream: + + ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt + ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers + +##### The `presence` stream + +The following endpoints should be routed directly to the workers configured as +stream writers for the `presence` stream: + + ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ + #### Background tasks There is also *experimental* support for moving background tasks to a separate From 41cf4c2cf6432336cc7477f130a2847449cff99a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 24 Feb 2022 11:52:28 +0000 Subject: [PATCH 298/474] Fix non-strings in the `event_search` table (#12037) Don't attempt to add non-string `value`s to `event_search` and add a background update to clear out bad rows from `event_search` when using sqlite. Signed-off-by: Sean Quah --- changelog.d/12037.bugfix | 1 + synapse/storage/databases/main/events.py | 18 +-- synapse/storage/databases/main/search.py | 26 ++++ ...e_non_strings_from_event_search.sql.sqlite | 22 ++++ tests/storage/test_room_search.py | 117 +++++++++++++++++- 5 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12037.bugfix create mode 100644 synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite diff --git a/changelog.d/12037.bugfix b/changelog.d/12037.bugfix new file mode 100644 index 000000000000..9295cb4dc0d9 --- /dev/null +++ b/changelog.d/12037.bugfix @@ -0,0 +1 @@ +Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index a1d7a9b41300..e53e84054ad0 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1473,10 +1473,10 @@ def _store_rejected_events_txn(self, txn, events_and_contexts): def _update_metadata_tables_txn( self, - txn, + txn: LoggingTransaction, *, - events_and_contexts, - all_events_and_contexts, + events_and_contexts: List[Tuple[EventBase, EventContext]], + all_events_and_contexts: List[Tuple[EventBase, EventContext]], inhibit_local_membership_updates: bool = False, ): """Update all the miscellaneous tables for new events @@ -1953,20 +1953,20 @@ def _handle_redaction(self, txn, redacted_event_id): txn, table="event_relations", keyvalues={"event_id": redacted_event_id} ) - def _store_room_topic_txn(self, txn, event): - if hasattr(event, "content") and "topic" in event.content: + def _store_room_topic_txn(self, txn: LoggingTransaction, event: EventBase): + if isinstance(event.content.get("topic"), str): self.store_event_search_txn( txn, event, "content.topic", event.content["topic"] ) - def _store_room_name_txn(self, txn, event): - if hasattr(event, "content") and "name" in event.content: + def _store_room_name_txn(self, txn: LoggingTransaction, event: EventBase): + if isinstance(event.content.get("name"), str): self.store_event_search_txn( txn, event, "content.name", event.content["name"] ) - def _store_room_message_txn(self, txn, event): - if hasattr(event, "content") and "body" in event.content: + def _store_room_message_txn(self, txn: LoggingTransaction, event: EventBase): + if isinstance(event.content.get("body"), str): self.store_event_search_txn( txn, event, "content.body", event.content["body"] ) diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index acea300ed343..e23b1190726b 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -115,6 +115,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" + EVENT_SEARCH_DELETE_NON_STRINGS = "event_search_sqlite_delete_non_strings" def __init__( self, @@ -147,6 +148,10 @@ def __init__( self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search ) + self.db_pool.updates.register_background_update_handler( + self.EVENT_SEARCH_DELETE_NON_STRINGS, self._background_delete_non_strings + ) + async def _background_reindex_search(self, progress, batch_size): # we work through the events table from highest stream id to lowest target_min_stream_id = progress["target_min_stream_id_inclusive"] @@ -372,6 +377,27 @@ def reindex_search_txn(txn): return num_rows + async def _background_delete_non_strings( + self, progress: JsonDict, batch_size: int + ) -> int: + """Deletes rows with non-string `value`s from `event_search` if using sqlite. + + Prior to Synapse 1.44.0, malformed events received over federation could cause integers + to be inserted into the `event_search` table when using sqlite. + """ + + def delete_non_strings_txn(txn: LoggingTransaction) -> None: + txn.execute("DELETE FROM event_search WHERE typeof(value) != 'text'") + + await self.db_pool.runInteraction( + self.EVENT_SEARCH_DELETE_NON_STRINGS, delete_non_strings_txn + ) + + await self.db_pool.updates._end_background_update( + self.EVENT_SEARCH_DELETE_NON_STRINGS + ) + return 1 + class SearchStore(SearchBackgroundUpdateStore): def __init__( diff --git a/synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite b/synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite new file mode 100644 index 000000000000..140df652642d --- /dev/null +++ b/synapse/storage/schema/main/delta/68/05_delete_non_strings_from_event_search.sql.sqlite @@ -0,0 +1,22 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- Delete rows with non-string `value`s from `event_search` if using sqlite. +-- +-- Prior to Synapse 1.44.0, malformed events received over federation could +-- cause integers to be inserted into the `event_search` table. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (6805, 'event_search_sqlite_delete_non_strings', '{}'); diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index befaa0fcee08..d62e01726cba 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -13,13 +13,16 @@ # limitations under the License. import synapse.rest.admin +from synapse.api.constants import EventTypes +from synapse.api.errors import StoreError from synapse.rest.client import login, room from synapse.storage.engines import PostgresEngine -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, skip_unless +from tests.utils import USE_POSTGRES_FOR_TESTS -class NullByteInsertionTest(HomeserverTestCase): +class EventSearchInsertionTest(HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -72,3 +75,113 @@ def test_null_byte(self): ) if isinstance(store.database_engine, PostgresEngine): self.assertIn("alice", result.get("highlights")) + + def test_non_string(self): + """Test that non-string `value`s are not inserted into `event_search`. + + This is particularly important when using sqlite, since a sqlite column can hold + both strings and integers. When using Postgres, integers are automatically + converted to strings. + + Regression test for #11918. + """ + store = self.hs.get_datastores().main + + # Register a user and create a room + user_id = self.register_user("alice", "password") + access_token = self.login("alice", "password") + room_id = self.helper.create_room_as("alice", tok=access_token) + room_version = self.get_success(store.get_room_version(room_id)) + + # Construct a message with a numeric body to be received over federation + # The message can't be sent using the client API, since Synapse's event + # validation will reject it. + prev_event_ids = self.get_success(store.get_prev_events_for_room(room_id)) + prev_event = self.get_success(store.get_event(prev_event_ids[0])) + prev_state_map = self.get_success( + self.hs.get_storage().state.get_state_ids_for_event(prev_event_ids[0]) + ) + + event_dict = { + "type": EventTypes.Message, + "content": {"msgtype": "m.text", "body": 2}, + "room_id": room_id, + "sender": user_id, + "depth": prev_event.depth + 1, + "prev_events": prev_event_ids, + "origin_server_ts": self.clock.time_msec(), + } + builder = self.hs.get_event_builder_factory().for_room_version( + room_version, event_dict + ) + event = self.get_success( + builder.build( + prev_event_ids=prev_event_ids, + auth_event_ids=self.hs.get_event_auth_handler().compute_auth_events( + builder, + prev_state_map, + for_verification=False, + ), + depth=event_dict["depth"], + ) + ) + + # Receive the event + self.get_success( + self.hs.get_federation_event_handler().on_receive_pdu( + self.hs.hostname, event + ) + ) + + # The event should not have an entry in the `event_search` table + f = self.get_failure( + store.db_pool.simple_select_one_onecol( + "event_search", + {"room_id": room_id, "event_id": event.event_id}, + "event_id", + ), + StoreError, + ) + self.assertEqual(f.value.code, 404) + + @skip_unless(not USE_POSTGRES_FOR_TESTS, "requires sqlite") + def test_sqlite_non_string_deletion_background_update(self): + """Test the background update to delete bad rows from `event_search`.""" + store = self.hs.get_datastores().main + + # Populate `event_search` with dummy data + self.get_success( + store.db_pool.simple_insert_many( + "event_search", + keys=["event_id", "room_id", "key", "value"], + values=[ + ("event1", "room_id", "content.body", "hi"), + ("event2", "room_id", "content.body", "2"), + ("event3", "room_id", "content.body", 3), + ], + desc="populate_event_search", + ) + ) + + # Run the background update + store.db_pool.updates._all_done = False + self.get_success( + store.db_pool.simple_insert( + "background_updates", + { + "update_name": "event_search_sqlite_delete_non_strings", + "progress_json": "{}", + }, + ) + ) + self.wait_for_background_updates() + + # The non-string `value`s ought to be gone now. + values = self.get_success( + store.db_pool.simple_select_onecol( + "event_search", + {"room_id": "room_id"}, + "value", + ), + ) + self.assertCountEqual(values, ["hi", "2"]) From 2cc5ea933dbe65445e3711bb3f05022b007029ea Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 24 Feb 2022 17:55:45 +0000 Subject: [PATCH 299/474] Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. (#11617) Co-authored-by: Erik Johnston --- changelog.d/11617.feature | 1 + synapse/appservice/__init__.py | 16 ++ synapse/appservice/api.py | 20 +- synapse/appservice/scheduler.py | 98 ++++++++- synapse/config/appservice.py | 13 +- synapse/config/experimental.py | 16 +- synapse/storage/databases/main/appservice.py | 33 ++- .../storage/databases/main/end_to_end_keys.py | 112 ++++++++++ tests/appservice/test_scheduler.py | 55 +++-- tests/handlers/test_appservice.py | 194 +++++++++++++++++- tests/storage/test_appservice.py | 8 +- 11 files changed, 528 insertions(+), 38 deletions(-) create mode 100644 changelog.d/11617.feature diff --git a/changelog.d/11617.feature b/changelog.d/11617.feature new file mode 100644 index 000000000000..cf03f00e7c4d --- /dev/null +++ b/changelog.d/11617.feature @@ -0,0 +1 @@ +Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. \ No newline at end of file diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index a340a8c9c703..4d3f8e492384 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -31,6 +31,14 @@ logger = logging.getLogger(__name__) +# Type for the `device_one_time_key_counts` field in an appservice transaction +# user ID -> {device ID -> {algorithm -> count}} +TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]] + +# Type for the `device_unused_fallback_keys` field in an appservice transaction +# user ID -> {device ID -> [algorithm]} +TransactionUnusedFallbackKeys = Dict[str, Dict[str, List[str]]] + class ApplicationServiceState(Enum): DOWN = "down" @@ -72,6 +80,7 @@ def __init__( rate_limited: bool = True, ip_range_whitelist: Optional[IPSet] = None, supports_ephemeral: bool = False, + msc3202_transaction_extensions: bool = False, ): self.token = token self.url = ( @@ -84,6 +93,7 @@ def __init__( self.id = id self.ip_range_whitelist = ip_range_whitelist self.supports_ephemeral = supports_ephemeral + self.msc3202_transaction_extensions = msc3202_transaction_extensions if "|" in self.id: raise Exception("application service ID cannot contain '|' character") @@ -339,12 +349,16 @@ def __init__( events: List[EventBase], ephemeral: List[JsonDict], to_device_messages: List[JsonDict], + one_time_key_counts: TransactionOneTimeKeyCounts, + unused_fallback_keys: TransactionUnusedFallbackKeys, ): self.service = service self.id = id self.events = events self.ephemeral = ephemeral self.to_device_messages = to_device_messages + self.one_time_key_counts = one_time_key_counts + self.unused_fallback_keys = unused_fallback_keys async def send(self, as_api: "ApplicationServiceApi") -> bool: """Sends this transaction using the provided AS API interface. @@ -359,6 +373,8 @@ async def send(self, as_api: "ApplicationServiceApi") -> bool: events=self.events, ephemeral=self.ephemeral, to_device_messages=self.to_device_messages, + one_time_key_counts=self.one_time_key_counts, + unused_fallback_keys=self.unused_fallback_keys, txn_id=self.id, ) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 73be7ff3d4a1..a0ea958af62a 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -19,6 +19,11 @@ from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind from synapse.api.errors import CodeMessageException +from synapse.appservice import ( + ApplicationService, + TransactionOneTimeKeyCounts, + TransactionUnusedFallbackKeys, +) from synapse.events import EventBase from synapse.events.utils import serialize_event from synapse.http.client import SimpleHttpClient @@ -26,7 +31,6 @@ from synapse.util.caches.response_cache import ResponseCache if TYPE_CHECKING: - from synapse.appservice import ApplicationService from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -219,6 +223,8 @@ async def push_bulk( events: List[EventBase], ephemeral: List[JsonDict], to_device_messages: List[JsonDict], + one_time_key_counts: TransactionOneTimeKeyCounts, + unused_fallback_keys: TransactionUnusedFallbackKeys, txn_id: Optional[int] = None, ) -> bool: """ @@ -252,7 +258,7 @@ async def push_bulk( uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id))) # Never send ephemeral events to appservices that do not support it - body: Dict[str, List[JsonDict]] = {"events": serialized_events} + body: JsonDict = {"events": serialized_events} if service.supports_ephemeral: body.update( { @@ -262,6 +268,16 @@ async def push_bulk( } ) + if service.msc3202_transaction_extensions: + if one_time_key_counts: + body[ + "org.matrix.msc3202.device_one_time_key_counts" + ] = one_time_key_counts + if unused_fallback_keys: + body[ + "org.matrix.msc3202.device_unused_fallback_keys" + ] = unused_fallback_keys + try: await self.put_json( uri=uri, diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index b4e602e880b2..72417151ba04 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -54,12 +54,19 @@ Callable, Collection, Dict, + Iterable, List, Optional, Set, + Tuple, ) -from synapse.appservice import ApplicationService, ApplicationServiceState +from synapse.appservice import ( + ApplicationService, + ApplicationServiceState, + TransactionOneTimeKeyCounts, + TransactionUnusedFallbackKeys, +) from synapse.appservice.api import ApplicationServiceApi from synapse.events import EventBase from synapse.logging.context import run_in_background @@ -96,7 +103,7 @@ def __init__(self, hs: "HomeServer"): self.as_api = hs.get_application_service_api() self.txn_ctrl = _TransactionController(self.clock, self.store, self.as_api) - self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock) + self.queuer = _ServiceQueuer(self.txn_ctrl, self.clock, hs) async def start(self) -> None: logger.info("Starting appservice scheduler") @@ -153,7 +160,9 @@ class _ServiceQueuer: appservice at a given time. """ - def __init__(self, txn_ctrl: "_TransactionController", clock: Clock): + def __init__( + self, txn_ctrl: "_TransactionController", clock: Clock, hs: "HomeServer" + ): # dict of {service_id: [events]} self.queued_events: Dict[str, List[EventBase]] = {} # dict of {service_id: [events]} @@ -165,6 +174,10 @@ def __init__(self, txn_ctrl: "_TransactionController", clock: Clock): self.requests_in_flight: Set[str] = set() self.txn_ctrl = txn_ctrl self.clock = clock + self._msc3202_transaction_extensions_enabled: bool = ( + hs.config.experimental.msc3202_transaction_extensions + ) + self._store = hs.get_datastores().main def start_background_request(self, service: ApplicationService) -> None: # start a sender for this appservice if we don't already have one @@ -202,15 +215,84 @@ async def _send_request(self, service: ApplicationService) -> None: if not events and not ephemeral and not to_device_messages_to_send: return + one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None + unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None + + if ( + self._msc3202_transaction_extensions_enabled + and service.msc3202_transaction_extensions + ): + # Compute the one-time key counts and fallback key usage states + # for the users which are mentioned in this transaction, + # as well as the appservice's sender. + ( + one_time_key_counts, + unused_fallback_keys, + ) = await self._compute_msc3202_otk_counts_and_fallback_keys( + service, events, ephemeral, to_device_messages_to_send + ) + try: await self.txn_ctrl.send( - service, events, ephemeral, to_device_messages_to_send + service, + events, + ephemeral, + to_device_messages_to_send, + one_time_key_counts, + unused_fallback_keys, ) except Exception: logger.exception("AS request failed") finally: self.requests_in_flight.discard(service.id) + async def _compute_msc3202_otk_counts_and_fallback_keys( + self, + service: ApplicationService, + events: Iterable[EventBase], + ephemerals: Iterable[JsonDict], + to_device_messages: Iterable[JsonDict], + ) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]: + """ + Given a list of the events, ephemeral messages and to-device messages, + - first computes a list of application services users that may have + interesting updates to the one-time key counts or fallback key usage. + - then computes one-time key counts and fallback key usages for those users. + Given a list of application service users that are interesting, + compute one-time key counts and fallback key usages for the users. + """ + + # Set of 'interesting' users who may have updates + users: Set[str] = set() + + # The sender is always included + users.add(service.sender) + + # All AS users that would receive the PDUs or EDUs sent to these rooms + # are classed as 'interesting'. + rooms_of_interesting_users: Set[str] = set() + # PDUs + rooms_of_interesting_users.update(event.room_id for event in events) + # EDUs + rooms_of_interesting_users.update( + ephemeral["room_id"] for ephemeral in ephemerals + ) + + # Look up the AS users in those rooms + for room_id in rooms_of_interesting_users: + users.update( + await self._store.get_app_service_users_in_room(room_id, service) + ) + + # Add recipients of to-device messages. + # device_message["user_id"] is the ID of the recipient. + users.update(device_message["user_id"] for device_message in to_device_messages) + + # Compute and return the counts / fallback key usage states + otk_counts = await self._store.count_bulk_e2e_one_time_keys_for_as(users) + unused_fbks = await self._store.get_e2e_bulk_unused_fallback_key_types(users) + return otk_counts, unused_fbks + class _TransactionController: """Transaction manager. @@ -238,6 +320,8 @@ async def send( events: List[EventBase], ephemeral: Optional[List[JsonDict]] = None, to_device_messages: Optional[List[JsonDict]] = None, + one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None, + unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None, ) -> None: """ Create a transaction with the given data and send to the provided @@ -248,6 +332,10 @@ async def send( events: The persistent events to include in the transaction. ephemeral: The ephemeral events to include in the transaction. to_device_messages: The to-device messages to include in the transaction. + one_time_key_counts: Counts of remaining one-time keys for relevant + appservice devices in the transaction. + unused_fallback_keys: Lists of unused fallback keys for relevant + appservice devices in the transaction. """ try: txn = await self.store.create_appservice_txn( @@ -255,6 +343,8 @@ async def send( events=events, ephemeral=ephemeral or [], to_device_messages=to_device_messages or [], + one_time_key_counts=one_time_key_counts or {}, + unused_fallback_keys=unused_fallback_keys or {}, ) service_is_up = await self._is_service_up(service) if service_is_up: diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 7fad2e0422b8..439bfe15269c 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -166,6 +166,16 @@ def _load_appservice( supports_ephemeral = as_info.get("de.sorunome.msc2409.push_ephemeral", False) + # Opt-in flag for the MSC3202-specific transactional behaviour. + # When enabled, appservice transactions contain the following information: + # - device One-Time Key counts + # - device unused fallback key usage states + msc3202_transaction_extensions = as_info.get("org.matrix.msc3202", False) + if not isinstance(msc3202_transaction_extensions, bool): + raise ValueError( + "The `org.matrix.msc3202` option should be true or false if specified." + ) + return ApplicationService( token=as_info["as_token"], hostname=hostname, @@ -174,8 +184,9 @@ def _load_appservice( hs_token=as_info["hs_token"], sender=user_id, id=as_info["id"], - supports_ephemeral=supports_ephemeral, protocols=protocols, rate_limited=rate_limited, ip_range_whitelist=ip_range_whitelist, + supports_ephemeral=supports_ephemeral, + msc3202_transaction_extensions=msc3202_transaction_extensions, ) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 772eb3501382..41338b39df21 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -47,11 +47,6 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3030 (Jump to date API endpoint) self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False) - # The portion of MSC3202 which is related to device masquerading. - self.msc3202_device_masquerading_enabled: bool = experimental.get( - "msc3202_device_masquerading", False - ) - # MSC2409 (this setting only relates to optionally sending to-device messages). # Presence, typing and read receipt EDUs are already sent to application services that # have opted in to receive them. If enabled, this adds to-device messages to that list. @@ -59,6 +54,17 @@ def read_config(self, config: JsonDict, **kwargs): "msc2409_to_device_messages_enabled", False ) + # The portion of MSC3202 which is related to device masquerading. + self.msc3202_device_masquerading_enabled: bool = experimental.get( + "msc3202_device_masquerading", False + ) + + # Portion of MSC3202 related to transaction extensions: + # sending one-time key counts and fallback key usage to application services. + self.msc3202_transaction_extensions: bool = experimental.get( + "msc3202_transaction_extensions", False + ) + # MSC3706 (server-side support for partial state in /send_join responses) self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 304814af5dc5..06944465582b 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -20,14 +20,18 @@ ApplicationService, ApplicationServiceState, AppServiceTransaction, + TransactionOneTimeKeyCounts, + TransactionUnusedFallbackKeys, ) from synapse.config.appservice import load_appservices from synapse.events import EventBase -from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage._base import db_to_json from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.databases.main.roommember import RoomMemberWorkerStore from synapse.types import JsonDict from synapse.util import json_encoder +from synapse.util.caches.descriptors import _CacheContext, cached if TYPE_CHECKING: from synapse.server import HomeServer @@ -56,7 +60,7 @@ def _make_exclusive_regex( return exclusive_user_pattern -class ApplicationServiceWorkerStore(SQLBaseStore): +class ApplicationServiceWorkerStore(RoomMemberWorkerStore): def __init__( self, database: DatabasePool, @@ -124,6 +128,18 @@ def get_app_service_by_id(self, as_id: str) -> Optional[ApplicationService]: return service return None + @cached(iterable=True, cache_context=True) + async def get_app_service_users_in_room( + self, + room_id: str, + app_service: "ApplicationService", + cache_context: _CacheContext, + ) -> List[str]: + users_in_room = await self.get_users_in_room( + room_id, on_invalidate=cache_context.invalidate + ) + return list(filter(app_service.is_interested_in_user, users_in_room)) + class ApplicationServiceStore(ApplicationServiceWorkerStore): # This is currently empty due to there not being any AS storage functions @@ -199,6 +215,8 @@ async def create_appservice_txn( events: List[EventBase], ephemeral: List[JsonDict], to_device_messages: List[JsonDict], + one_time_key_counts: TransactionOneTimeKeyCounts, + unused_fallback_keys: TransactionUnusedFallbackKeys, ) -> AppServiceTransaction: """Atomically creates a new transaction for this application service with the given list of events. Ephemeral events are NOT persisted to the @@ -209,6 +227,10 @@ async def create_appservice_txn( events: A list of persistent events to put in the transaction. ephemeral: A list of ephemeral events to put in the transaction. to_device_messages: A list of to-device messages to put in the transaction. + one_time_key_counts: Counts of remaining one-time keys for relevant + appservice devices in the transaction. + unused_fallback_keys: Lists of unused fallback keys for relevant + appservice devices in the transaction. Returns: A new transaction. @@ -244,6 +266,8 @@ def _create_appservice_txn(txn): events=events, ephemeral=ephemeral, to_device_messages=to_device_messages, + one_time_key_counts=one_time_key_counts, + unused_fallback_keys=unused_fallback_keys, ) return await self.db_pool.runInteraction( @@ -335,12 +359,17 @@ def _get_oldest_unsent_txn(txn): events = await self.get_events_as_list(event_ids) + # TODO: to-device messages, one-time key counts and unused fallback keys + # are not yet populated for catch-up transactions. + # We likely want to populate those for reliability. return AppServiceTransaction( service=service, id=entry["txn_id"], events=events, ephemeral=[], to_device_messages=[], + one_time_key_counts={}, + unused_fallback_keys={}, ) def _get_last_txn(self, txn, service_id: Optional[str]) -> int: diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 1f8447b5076f..9b293475c871 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -29,6 +29,10 @@ from canonicaljson import encode_canonical_json from synapse.api.constants import DeviceKeyAlgorithms +from synapse.appservice import ( + TransactionOneTimeKeyCounts, + TransactionUnusedFallbackKeys, +) from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -439,6 +443,114 @@ def _count_e2e_one_time_keys(txn: LoggingTransaction) -> Dict[str, int]: "count_e2e_one_time_keys", _count_e2e_one_time_keys ) + async def count_bulk_e2e_one_time_keys_for_as( + self, user_ids: Collection[str] + ) -> TransactionOneTimeKeyCounts: + """ + Counts, in bulk, the one-time keys for all the users specified. + Intended to be used by application services for populating OTK counts in + transactions. + + Return structure is of the shape: + user_id -> device_id -> algorithm -> count + Empty algorithm -> count dicts are created if needed to represent a + lack of unused one-time keys. + """ + + def _count_bulk_e2e_one_time_keys_txn( + txn: LoggingTransaction, + ) -> TransactionOneTimeKeyCounts: + user_in_where_clause, user_parameters = make_in_list_sql_clause( + self.database_engine, "user_id", user_ids + ) + sql = f""" + SELECT user_id, device_id, algorithm, COUNT(key_id) + FROM devices + LEFT JOIN e2e_one_time_keys_json USING (user_id, device_id) + WHERE {user_in_where_clause} + GROUP BY user_id, device_id, algorithm + """ + txn.execute(sql, user_parameters) + + result: TransactionOneTimeKeyCounts = {} + + for user_id, device_id, algorithm, count in txn: + # We deliberately construct empty dictionaries for + # users and devices without any unused one-time keys. + # We *could* omit these empty dicts if there have been no + # changes since the last transaction, but we currently don't + # do any change tracking! + device_count_by_algo = result.setdefault(user_id, {}).setdefault( + device_id, {} + ) + if algorithm is not None: + # algorithm will be None if this device has no keys. + device_count_by_algo[algorithm] = count + + return result + + return await self.db_pool.runInteraction( + "count_bulk_e2e_one_time_keys", _count_bulk_e2e_one_time_keys_txn + ) + + async def get_e2e_bulk_unused_fallback_key_types( + self, user_ids: Collection[str] + ) -> TransactionUnusedFallbackKeys: + """ + Finds, in bulk, the types of unused fallback keys for all the users specified. + Intended to be used by application services for populating unused fallback + keys in transactions. + + Return structure is of the shape: + user_id -> device_id -> algorithms + Empty lists are created for devices if there are no unused fallback + keys. This matches the response structure of MSC3202. + """ + if len(user_ids) == 0: + return {} + + def _get_bulk_e2e_unused_fallback_keys_txn( + txn: LoggingTransaction, + ) -> TransactionUnusedFallbackKeys: + user_in_where_clause, user_parameters = make_in_list_sql_clause( + self.database_engine, "devices.user_id", user_ids + ) + # We can't use USING here because we require the `.used` condition + # to be part of the JOIN condition so that we generate empty lists + # when all keys are used (as opposed to just when there are no keys at all). + sql = f""" + SELECT devices.user_id, devices.device_id, algorithm + FROM devices + LEFT JOIN e2e_fallback_keys_json AS fallback_keys + ON devices.user_id = fallback_keys.user_id + AND devices.device_id = fallback_keys.device_id + AND NOT fallback_keys.used + WHERE + {user_in_where_clause} + """ + txn.execute(sql, user_parameters) + + result: TransactionUnusedFallbackKeys = {} + + for user_id, device_id, algorithm in txn: + # We deliberately construct empty dictionaries and lists for + # users and devices without any unused fallback keys. + # We *could* omit these empty dicts if there have been no + # changes since the last transaction, but we currently don't + # do any change tracking! + device_unused_keys = result.setdefault(user_id, {}).setdefault( + device_id, [] + ) + if algorithm is not None: + # algorithm will be None if this device has no keys. + device_unused_keys.append(algorithm) + + return result + + return await self.db_pool.runInteraction( + "_get_bulk_e2e_unused_fallback_keys", _get_bulk_e2e_unused_fallback_keys_txn + ) + async def set_e2e_fallback_keys( self, user_id: str, device_id: str, fallback_keys: JsonDict ) -> None: diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 8fb6687f89eb..b9dc4dfe1b1d 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -68,6 +68,8 @@ def test_single_service_up_txn_sent(self): events=events, ephemeral=[], to_device_messages=[], # txn made and saved + one_time_key_counts={}, + unused_fallback_keys={}, ) self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed @@ -92,6 +94,8 @@ def test_single_service_down(self): events=events, ephemeral=[], to_device_messages=[], # txn made and saved + one_time_key_counts={}, + unused_fallback_keys={}, ) self.assertEquals(0, txn.send.call_count) # txn not sent though self.assertEquals(0, txn.complete.call_count) # or completed @@ -114,7 +118,12 @@ def test_single_service_up_txn_not_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[], to_device_messages=[] + service=service, + events=events, + ephemeral=[], + to_device_messages=[], + one_time_key_counts={}, + unused_fallback_keys={}, ) self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made self.assertEquals(1, self.recoverer.recover.call_count) # and invoked @@ -216,7 +225,7 @@ def test_send_single_event_no_queue(self): service = Mock(id=4) event = Mock() self.scheduler.enqueue_for_appservice(service, events=[event]) - self.txn_ctrl.send.assert_called_once_with(service, [event], [], []) + self.txn_ctrl.send.assert_called_once_with(service, [event], [], [], None, None) def test_send_single_event_with_queue(self): d = defer.Deferred() @@ -231,11 +240,13 @@ def test_send_single_event_with_queue(self): # (call enqueue_for_appservice multiple times deliberately) self.scheduler.enqueue_for_appservice(service, events=[event2]) self.scheduler.enqueue_for_appservice(service, events=[event3]) - self.txn_ctrl.send.assert_called_with(service, [event], [], []) + self.txn_ctrl.send.assert_called_with(service, [event], [], [], None, None) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve the send event: expect the queued events to be sent d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [event2, event3], [], []) + self.txn_ctrl.send.assert_called_with( + service, [event2, event3], [], [], None, None + ) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_multiple_service_queues(self): @@ -261,15 +272,15 @@ def do_send(*args, **kwargs): # send events for different ASes and make sure they are sent self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event]) self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event2]) - self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [], []) + self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [], [], None, None) self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event]) self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event2]) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [], []) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [], [], None, None) # make sure callbacks for a service only send queued events for THAT # service srv_2_defer.callback(srv2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], []) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], [], None, None) self.assertEquals(3, self.txn_ctrl.send.call_count) def test_send_large_txns(self): @@ -288,13 +299,19 @@ def do_send(*args, **kwargs): self.scheduler.enqueue_for_appservice(service, [event], []) # Expect the first event to be sent immediately. - self.txn_ctrl.send.assert_called_with(service, [event_list[0]], [], []) + self.txn_ctrl.send.assert_called_with( + service, [event_list[0]], [], [], None, None + ) srv_1_defer.callback(service) # Then send the next 100 events - self.txn_ctrl.send.assert_called_with(service, event_list[1:101], [], []) + self.txn_ctrl.send.assert_called_with( + service, event_list[1:101], [], [], None, None + ) srv_2_defer.callback(service) # Then the final 99 events - self.txn_ctrl.send.assert_called_with(service, event_list[101:], [], []) + self.txn_ctrl.send.assert_called_with( + service, event_list[101:], [], [], None, None + ) self.assertEquals(3, self.txn_ctrl.send.call_count) def test_send_single_ephemeral_no_queue(self): @@ -302,14 +319,18 @@ def test_send_single_ephemeral_no_queue(self): service = Mock(id=4, name="service") event_list = [Mock(name="event")] self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], event_list, []) + self.txn_ctrl.send.assert_called_once_with( + service, [], event_list, [], None, None + ) def test_send_multiple_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")] self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], event_list, []) + self.txn_ctrl.send.assert_called_once_with( + service, [], event_list, [], None, None + ) def test_send_single_ephemeral_with_queue(self): d = defer.Deferred() @@ -324,13 +345,13 @@ def test_send_single_ephemeral_with_queue(self): # Send more events: expect send() to NOT be called multiple times. self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_2) self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_3) - self.txn_ctrl.send.assert_called_with(service, [], event_list_1, []) + self.txn_ctrl.send.assert_called_with(service, [], event_list_1, [], None, None) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve txn_ctrl.send d.callback(service) # Expect the queued events to be sent self.txn_ctrl.send.assert_called_with( - service, [], event_list_2 + event_list_3, [] + service, [], event_list_2 + event_list_3, [], None, None ) self.assertEquals(2, self.txn_ctrl.send.call_count) @@ -343,7 +364,9 @@ def test_send_large_txns_ephemeral(self): second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)] event_list = first_chunk + second_chunk self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk, []) + self.txn_ctrl.send.assert_called_once_with( + service, [], first_chunk, [], None, None + ) d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [], second_chunk, []) + self.txn_ctrl.send.assert_called_with(service, [], second_chunk, [], None, None) self.assertEquals(2, self.txn_ctrl.send.call_count) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 9918ff68079e..6e0ec379630b 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -16,17 +16,25 @@ from unittest.mock import Mock from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin import synapse.storage -from synapse.appservice import ApplicationService +from synapse.appservice import ( + ApplicationService, + TransactionOneTimeKeyCounts, + TransactionUnusedFallbackKeys, +) from synapse.handlers.appservice import ApplicationServicesHandler -from synapse.rest.client import login, receipts, room, sendtodevice +from synapse.rest.client import login, receipts, register, room, sendtodevice +from synapse.server import HomeServer from synapse.types import RoomStreamToken +from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest from tests.test_utils import make_awaitable, simple_async_mock +from tests.unittest import override_config from tests.utils import MockClock @@ -428,7 +436,14 @@ def test_application_services_receive_local_to_device(self): # # The uninterested application service should not have been notified at all. self.send_mock.assert_called_once() - service, _events, _ephemeral, to_device_messages = self.send_mock.call_args[0] + ( + service, + _events, + _ephemeral, + to_device_messages, + _otks, + _fbks, + ) = self.send_mock.call_args[0] # Assert that this was the same to-device message that local_user sent self.assertEqual(service, interested_appservice) @@ -540,7 +555,7 @@ def test_application_services_receive_bursts_of_to_device(self): service_id_to_message_count: Dict[str, int] = {} for call in self.send_mock.call_args_list: - service, _events, _ephemeral, to_device_messages = call[0] + service, _events, _ephemeral, to_device_messages, _otks, _fbks = call[0] # Check that this was made to an interested service self.assertIn(service, interested_appservices) @@ -582,3 +597,174 @@ def _register_application_service( self._services.append(appservice) return appservice + + +class ApplicationServicesHandlerOtkCountsTestCase(unittest.HomeserverTestCase): + # Argument indices for pulling out arguments from a `send_mock`. + ARG_OTK_COUNTS = 4 + ARG_FALLBACK_KEYS = 5 + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + room.register_servlets, + sendtodevice.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + # Mock the ApplicationServiceScheduler's _TransactionController's send method so that + # we can track what's going out + self.send_mock = simple_async_mock() + hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock # type: ignore[assignment] # We assign to a method. + + # Define an application service for the tests + self._service_token = "VERYSECRET" + self._service = ApplicationService( + self._service_token, + "as1.invalid", + "as1", + "@as.sender:test", + namespaces={ + "users": [ + {"regex": "@_as_.*:test", "exclusive": True}, + {"regex": "@as.sender:test", "exclusive": True}, + ] + }, + msc3202_transaction_extensions=True, + ) + self.hs.get_datastores().main.services_cache = [self._service] + + # Register some appservice users + self._sender_user, self._sender_device = self.register_appservice_user( + "as.sender", self._service_token + ) + self._namespaced_user, self._namespaced_device = self.register_appservice_user( + "_as_user1", self._service_token + ) + + # Register a real user as well. + self._real_user = self.register_user("real.user", "meow") + self._real_user_token = self.login("real.user", "meow") + + async def _add_otks_for_device( + self, user_id: str, device_id: str, otk_count: int + ) -> None: + """ + Add some dummy keys. It doesn't matter if they're not a real algorithm; + that should be opaque to the server anyway. + """ + await self.hs.get_datastores().main.add_e2e_one_time_keys( + user_id, + device_id, + self.clock.time_msec(), + [("algo", f"k{i}", "{}") for i in range(otk_count)], + ) + + async def _add_fallback_key_for_device( + self, user_id: str, device_id: str, used: bool + ) -> None: + """ + Adds a fake fallback key to a device, optionally marking it as used + right away. + """ + store = self.hs.get_datastores().main + await store.set_e2e_fallback_keys(user_id, device_id, {"algo:fk": "fall back!"}) + if used is True: + # Mark the key as used + await store.db_pool.simple_update_one( + table="e2e_fallback_keys_json", + keyvalues={ + "user_id": user_id, + "device_id": device_id, + "algorithm": "algo", + "key_id": "fk", + }, + updatevalues={"used": True}, + desc="_get_fallback_key_set_used", + ) + + def _set_up_devices_and_a_room(self) -> str: + """ + Helper to set up devices for all the users + and a room for the users to talk in. + """ + + async def preparation(): + await self._add_otks_for_device(self._sender_user, self._sender_device, 42) + await self._add_fallback_key_for_device( + self._sender_user, self._sender_device, used=True + ) + await self._add_otks_for_device( + self._namespaced_user, self._namespaced_device, 36 + ) + await self._add_fallback_key_for_device( + self._namespaced_user, self._namespaced_device, used=False + ) + + # Register a device for the real user, too, so that we can later ensure + # that we don't leak information to the AS about the non-AS user. + await self.hs.get_datastores().main.store_device( + self._real_user, "REALDEV", "UltraMatrix 3000" + ) + await self._add_otks_for_device(self._real_user, "REALDEV", 50) + + self.get_success(preparation()) + + room_id = self.helper.create_room_as( + self._real_user, is_public=True, tok=self._real_user_token + ) + self.helper.join( + room_id, + self._namespaced_user, + tok=self._service_token, + appservice_user_id=self._namespaced_user, + ) + + # Check it was called for sanity. (This was to send the join event to the AS.) + self.send_mock.assert_called() + self.send_mock.reset_mock() + + return room_id + + @override_config( + {"experimental_features": {"msc3202_transaction_extensions": True}} + ) + def test_application_services_receive_otk_counts_and_fallback_key_usages_with_pdus( + self, + ) -> None: + """ + Tests that: + - the AS receives one-time key counts and unused fallback keys for: + - the specified sender; and + - any user who is in receipt of the PDUs + """ + + room_id = self._set_up_devices_and_a_room() + + # Send a message into the AS's room + self.helper.send(room_id, "woof woof", tok=self._real_user_token) + + # Capture what was sent as an AS transaction. + self.send_mock.assert_called() + last_args, _last_kwargs = self.send_mock.call_args + otks: Optional[TransactionOneTimeKeyCounts] = last_args[self.ARG_OTK_COUNTS] + unused_fallbacks: Optional[TransactionUnusedFallbackKeys] = last_args[ + self.ARG_FALLBACK_KEYS + ] + + self.assertEqual( + otks, + { + "@as.sender:test": {self._sender_device: {"algo": 42}}, + "@_as_user1:test": {self._namespaced_device: {"algo": 36}}, + }, + ) + self.assertEqual( + unused_fallbacks, + { + "@as.sender:test": {self._sender_device: []}, + "@_as_user1:test": {self._namespaced_device: ["algo"]}, + }, + ) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 50703ccaee0a..d2f654214ede 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -267,7 +267,7 @@ def test_create_appservice_txn_first( events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) txn = self.get_success( defer.ensureDeferred( - self.store.create_appservice_txn(service, events, [], []) + self.store.create_appservice_txn(service, events, [], [], {}, {}) ) ) self.assertEquals(txn.id, 1) @@ -283,7 +283,7 @@ def test_create_appservice_txn_older_last_txn( self.get_success(self._insert_txn(service.id, 9644, events)) self.get_success(self._insert_txn(service.id, 9645, events)) txn = self.get_success( - self.store.create_appservice_txn(service, events, [], []) + self.store.create_appservice_txn(service, events, [], [], {}, {}) ) self.assertEquals(txn.id, 9646) self.assertEquals(txn.events, events) @@ -296,7 +296,7 @@ def test_create_appservice_txn_up_to_date_last_txn( events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) self.get_success(self._set_last_txn(service.id, 9643)) txn = self.get_success( - self.store.create_appservice_txn(service, events, [], []) + self.store.create_appservice_txn(service, events, [], [], {}, {}) ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) @@ -320,7 +320,7 @@ def test_create_appservice_txn_up_fuzzing( self.get_success(self._insert_txn(self.as_list[3]["id"], 9643, events)) txn = self.get_success( - self.store.create_appservice_txn(service, events, [], []) + self.store.create_appservice_txn(service, events, [], [], {}, {}) ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) From 54e74cc15f30585f5874780437614c0df6f639d9 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 24 Feb 2022 19:56:38 +0100 Subject: [PATCH 300/474] Add type hints to `tests/rest/client` (#12072) --- changelog.d/12072.misc | 1 + tests/rest/client/test_consent.py | 19 +++-- tests/rest/client/test_device_lists.py | 16 ++-- tests/rest/client/test_ephemeral_message.py | 19 +++-- tests/rest/client/test_identity.py | 13 +++- tests/rest/client/test_keys.py | 6 +- tests/rest/client/test_password_policy.py | 39 +++++----- tests/rest/client/test_power_levels.py | 47 +++++++----- tests/rest/client/test_presence.py | 15 ++-- tests/rest/client/test_room_batch.py | 2 +- tests/rest/client/utils.py | 85 +++++++++++++-------- 11 files changed, 160 insertions(+), 102 deletions(-) create mode 100644 changelog.d/12072.misc diff --git a/changelog.d/12072.misc b/changelog.d/12072.misc new file mode 100644 index 000000000000..0360dbd61edc --- /dev/null +++ b/changelog.d/12072.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/client`. diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index fcdc565814cf..b1ca81a91191 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -13,11 +13,16 @@ # limitations under the License. import os +from http import HTTPStatus + +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.urls import ConsentURIBuilder from synapse.rest.client import login, room from synapse.rest.consent import consent_resource +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request @@ -32,7 +37,7 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): user_id = True hijack_auth = False - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["form_secret"] = "123abc" @@ -56,7 +61,7 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(config=config) return hs - def test_render_public_consent(self): + def test_render_public_consent(self) -> None: """You can observe the terms form without specifying a user""" resource = consent_resource.ConsentResource(self.hs) channel = make_request( @@ -66,9 +71,9 @@ def test_render_public_consent(self): "/consent?v=1", shorthand=False, ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) - def test_accept_consent(self): + def test_accept_consent(self) -> None: """ A user can use the consent form to accept the terms. """ @@ -92,7 +97,7 @@ def test_accept_consent(self): access_token=access_token, shorthand=False, ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) # Get the version from the body, and whether we've consented version, consented = channel.result["body"].decode("ascii").split(",") @@ -107,7 +112,7 @@ def test_accept_consent(self): access_token=access_token, shorthand=False, ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) # Fetch the consent page, to get the consent version -- it should have # changed @@ -119,7 +124,7 @@ def test_accept_consent(self): access_token=access_token, shorthand=False, ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) # Get the version from the body, and check that it's the version we # agreed to, and that we've consented to it. diff --git a/tests/rest/client/test_device_lists.py b/tests/rest/client/test_device_lists.py index 16070cf0270f..a8af4e2435ac 100644 --- a/tests/rest/client/test_device_lists.py +++ b/tests/rest/client/test_device_lists.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus + from synapse.rest import admin, devices, room, sync from synapse.rest.client import account, login, register @@ -30,7 +32,7 @@ class DeviceListsTestCase(unittest.HomeserverTestCase): devices.register_servlets, ] - def test_receiving_local_device_list_changes(self): + def test_receiving_local_device_list_changes(self) -> None: """Tests that a local users that share a room receive each other's device list changes. """ @@ -84,7 +86,7 @@ def test_receiving_local_device_list_changes(self): }, access_token=alice_access_token, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Check that bob's incremental sync contains the updated device list. # If not, the client would only receive the device list update on the @@ -97,7 +99,7 @@ def test_receiving_local_device_list_changes(self): ) self.assertIn(alice_user_id, changed_device_lists, bob_sync_channel.json_body) - def test_not_receiving_local_device_list_changes(self): + def test_not_receiving_local_device_list_changes(self) -> None: """Tests a local users DO NOT receive device updates from each other if they do not share a room. """ @@ -119,7 +121,7 @@ def test_not_receiving_local_device_list_changes(self): "/sync", access_token=bob_access_token, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) next_batch_token = channel.json_body["next_batch"] # ...and then an incremental sync. This should block until the sync stream is woken up, @@ -141,11 +143,13 @@ def test_not_receiving_local_device_list_changes(self): }, access_token=alice_access_token, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Check that bob's incremental sync does not contain the updated device list. bob_sync_channel.await_result() - self.assertEqual(bob_sync_channel.code, 200, bob_sync_channel.json_body) + self.assertEqual( + bob_sync_channel.code, HTTPStatus.OK, bob_sync_channel.json_body + ) changed_device_lists = bob_sync_channel.json_body.get("device_lists", {}).get( "changed", [] diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py index 3d7aa8ec8683..9fa1f82dfee0 100644 --- a/tests/rest/client/test_ephemeral_message.py +++ b/tests/rest/client/test_ephemeral_message.py @@ -11,9 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus + +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventContentFields, EventTypes from synapse.rest import admin from synapse.rest.client import room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest @@ -27,7 +34,7 @@ class EphemeralMessageTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["enable_ephemeral_messages"] = True @@ -35,10 +42,10 @@ def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver(config=config) return self.hs - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_message_expiry_no_delay(self): + def test_message_expiry_no_delay(self) -> None: """Tests that sending a message sent with a m.self_destruct_after field set to the past results in that event being deleted right away. """ @@ -61,7 +68,7 @@ def test_message_expiry_no_delay(self): event_content = self.get_event(self.room_id, event_id)["content"] self.assertFalse(bool(event_content), event_content) - def test_message_expiry_delay(self): + def test_message_expiry_delay(self) -> None: """Tests that sending a message with a m.self_destruct_after field set to the future results in that event not being deleted right away, but advancing the clock to after that expiry timestamp causes the event to be deleted. @@ -89,7 +96,9 @@ def test_message_expiry_delay(self): event_content = self.get_event(self.room_id, event_id)["content"] self.assertFalse(bool(event_content), event_content) - def get_event(self, room_id, event_id, expected_code=200): + def get_event( + self, room_id: str, event_id: str, expected_code: int = HTTPStatus.OK + ) -> JsonDict: url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) channel = self.make_request("GET", url) diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index becb4e8dccf5..299b9d21e24b 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -13,9 +13,14 @@ # limitations under the License. import json +from http import HTTPStatus + +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -28,7 +33,7 @@ class IdentityTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["enable_3pid_lookup"] = False @@ -36,14 +41,14 @@ def make_homeserver(self, reactor, clock): return self.hs - def test_3pid_lookup_disabled(self): + def test_3pid_lookup_disabled(self) -> None: self.hs.config.registration.enable_3pid_lookup = False self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") channel = self.make_request(b"POST", "/createRoom", b"{}", access_token=tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) room_id = channel.json_body["room_id"] params = { @@ -56,4 +61,4 @@ def test_3pid_lookup_disabled(self): channel = self.make_request( b"POST", request_url, request_data, access_token=tok ) - self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index d7fa635eae1d..bbc8e7424351 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -28,7 +28,7 @@ class KeyQueryTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def test_rejects_device_id_ice_key_outside_of_list(self): + def test_rejects_device_id_ice_key_outside_of_list(self) -> None: self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") bob = self.register_user("bob", "uncle") @@ -49,7 +49,7 @@ def test_rejects_device_id_ice_key_outside_of_list(self): channel.result, ) - def test_rejects_device_key_given_as_map_to_bool(self): + def test_rejects_device_key_given_as_map_to_bool(self) -> None: self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") bob = self.register_user("bob", "uncle") @@ -73,7 +73,7 @@ def test_rejects_device_key_given_as_map_to_bool(self): channel.result, ) - def test_requires_device_key(self): + def test_requires_device_key(self) -> None: """`device_keys` is required. We should complain if it's missing.""" self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") diff --git a/tests/rest/client/test_password_policy.py b/tests/rest/client/test_password_policy.py index 3cf5871899e1..3a74d2e96c49 100644 --- a/tests/rest/client/test_password_policy.py +++ b/tests/rest/client/test_password_policy.py @@ -13,11 +13,16 @@ # limitations under the License. import json +from http import HTTPStatus + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import account, login, password_policy, register +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -46,7 +51,7 @@ class PasswordPolicyTestCase(unittest.HomeserverTestCase): account.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.register_url = "/_matrix/client/r0/register" self.policy = { "enabled": True, @@ -65,12 +70,12 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(config=config) return hs - def test_get_policy(self): + def test_get_policy(self) -> None: """Tests if the /password_policy endpoint returns the configured policy.""" channel = self.make_request("GET", "/_matrix/client/r0/password_policy") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual( channel.json_body, { @@ -83,70 +88,70 @@ def test_get_policy(self): channel.result, ) - def test_password_too_short(self): + def test_password_too_short(self) -> None: request_data = json.dumps({"username": "kermit", "password": "shorty"}) channel = self.make_request("POST", self.register_url, request_data) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.PASSWORD_TOO_SHORT, channel.result, ) - def test_password_no_digit(self): + def test_password_no_digit(self) -> None: request_data = json.dumps({"username": "kermit", "password": "longerpassword"}) channel = self.make_request("POST", self.register_url, request_data) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT, channel.result, ) - def test_password_no_symbol(self): + def test_password_no_symbol(self) -> None: request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"}) channel = self.make_request("POST", self.register_url, request_data) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.PASSWORD_NO_SYMBOL, channel.result, ) - def test_password_no_uppercase(self): + def test_password_no_uppercase(self) -> None: request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"}) channel = self.make_request("POST", self.register_url, request_data) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.PASSWORD_NO_UPPERCASE, channel.result, ) - def test_password_no_lowercase(self): + def test_password_no_lowercase(self) -> None: request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"}) channel = self.make_request("POST", self.register_url, request_data) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.PASSWORD_NO_LOWERCASE, channel.result, ) - def test_password_compliant(self): + def test_password_compliant(self) -> None: request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"}) channel = self.make_request("POST", self.register_url, request_data) # Getting a 401 here means the password has passed validation and the server has # responded with a list of registration flows. - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) - def test_password_change(self): + def test_password_change(self) -> None: """This doesn't test every possible use case, only that hitting /account/password triggers the password validation code. """ @@ -173,5 +178,5 @@ def test_password_change(self): access_token=tok, ) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) self.assertEqual(channel.json_body["errcode"], Codes.PASSWORD_NO_DIGIT) diff --git a/tests/rest/client/test_power_levels.py b/tests/rest/client/test_power_levels.py index c0de4c93a806..27dcfc83d204 100644 --- a/tests/rest/client/test_power_levels.py +++ b/tests/rest/client/test_power_levels.py @@ -11,11 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.events.utils import CANONICALJSON_MAX_INT, CANONICALJSON_MIN_INT from synapse.rest import admin from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -30,12 +35,12 @@ class PowerLevelsTestCase(HomeserverTestCase): sync.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() return self.setup_test_homeserver(config=config) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # register a room admin, moderator and regular user self.admin_user_id = self.register_user("admin", "pass") self.admin_access_token = self.login("admin", "pass") @@ -88,7 +93,7 @@ def prepare(self, reactor, clock, hs): tok=self.admin_access_token, ) - def test_non_admins_cannot_enable_room_encryption(self): + def test_non_admins_cannot_enable_room_encryption(self) -> None: # have the mod try to enable room encryption self.helper.send_state( self.room_id, @@ -104,10 +109,10 @@ def test_non_admins_cannot_enable_room_encryption(self): "m.room.encryption", {"algorithm": "m.megolm.v1.aes-sha2"}, tok=self.user_access_token, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure ) - def test_non_admins_cannot_send_server_acl(self): + def test_non_admins_cannot_send_server_acl(self) -> None: # have the mod try to send a server ACL self.helper.send_state( self.room_id, @@ -118,7 +123,7 @@ def test_non_admins_cannot_send_server_acl(self): "deny": ["*.evil.com", "evil.com"], }, tok=self.mod_access_token, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure ) # have the user try to send a server ACL @@ -131,10 +136,10 @@ def test_non_admins_cannot_send_server_acl(self): "deny": ["*.evil.com", "evil.com"], }, tok=self.user_access_token, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure ) - def test_non_admins_cannot_tombstone_room(self): + def test_non_admins_cannot_tombstone_room(self) -> None: # Create another room that will serve as our "upgraded room" self.upgraded_room_id = self.helper.create_room_as( self.admin_user_id, tok=self.admin_access_token @@ -149,7 +154,7 @@ def test_non_admins_cannot_tombstone_room(self): "replacement_room": self.upgraded_room_id, }, tok=self.mod_access_token, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure ) # have the user try to send a tombstone event @@ -164,17 +169,17 @@ def test_non_admins_cannot_tombstone_room(self): expect_code=403, # expect failure ) - def test_admins_can_enable_room_encryption(self): + def test_admins_can_enable_room_encryption(self) -> None: # have the admin try to enable room encryption self.helper.send_state( self.room_id, "m.room.encryption", {"algorithm": "m.megolm.v1.aes-sha2"}, tok=self.admin_access_token, - expect_code=200, # expect success + expect_code=HTTPStatus.OK, # expect success ) - def test_admins_can_send_server_acl(self): + def test_admins_can_send_server_acl(self) -> None: # have the admin try to send a server ACL self.helper.send_state( self.room_id, @@ -185,10 +190,10 @@ def test_admins_can_send_server_acl(self): "deny": ["*.evil.com", "evil.com"], }, tok=self.admin_access_token, - expect_code=200, # expect success + expect_code=HTTPStatus.OK, # expect success ) - def test_admins_can_tombstone_room(self): + def test_admins_can_tombstone_room(self) -> None: # Create another room that will serve as our "upgraded room" self.upgraded_room_id = self.helper.create_room_as( self.admin_user_id, tok=self.admin_access_token @@ -203,10 +208,10 @@ def test_admins_can_tombstone_room(self): "replacement_room": self.upgraded_room_id, }, tok=self.admin_access_token, - expect_code=200, # expect success + expect_code=HTTPStatus.OK, # expect success ) - def test_cannot_set_string_power_levels(self): + def test_cannot_set_string_power_levels(self) -> None: room_power_levels = self.helper.get_state( self.room_id, "m.room.power_levels", @@ -221,7 +226,7 @@ def test_cannot_set_string_power_levels(self): "m.room.power_levels", room_power_levels, tok=self.admin_access_token, - expect_code=400, # expect failure + expect_code=HTTPStatus.BAD_REQUEST, # expect failure ) self.assertEqual( @@ -230,7 +235,7 @@ def test_cannot_set_string_power_levels(self): body, ) - def test_cannot_set_unsafe_large_power_levels(self): + def test_cannot_set_unsafe_large_power_levels(self) -> None: room_power_levels = self.helper.get_state( self.room_id, "m.room.power_levels", @@ -247,7 +252,7 @@ def test_cannot_set_unsafe_large_power_levels(self): "m.room.power_levels", room_power_levels, tok=self.admin_access_token, - expect_code=400, # expect failure + expect_code=HTTPStatus.BAD_REQUEST, # expect failure ) self.assertEqual( @@ -256,7 +261,7 @@ def test_cannot_set_unsafe_large_power_levels(self): body, ) - def test_cannot_set_unsafe_small_power_levels(self): + def test_cannot_set_unsafe_small_power_levels(self) -> None: room_power_levels = self.helper.get_state( self.room_id, "m.room.power_levels", @@ -273,7 +278,7 @@ def test_cannot_set_unsafe_small_power_levels(self): "m.room.power_levels", room_power_levels, tok=self.admin_access_token, - expect_code=400, # expect failure + expect_code=HTTPStatus.BAD_REQUEST, # expect failure ) self.assertEqual( diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index 56fe1a3d0133..0abe378fe4d6 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from http import HTTPStatus from unittest.mock import Mock from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.handlers.presence import PresenceHandler from synapse.rest.client import presence +from synapse.server import HomeServer from synapse.types import UserID +from synapse.util import Clock from tests import unittest @@ -31,7 +34,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): user = UserID.from_string(user_id) servlets = [presence.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: presence_handler = Mock(spec=PresenceHandler) presence_handler.set_state.return_value = defer.succeed(None) @@ -45,7 +48,7 @@ def make_homeserver(self, reactor, clock): return hs - def test_put_presence(self): + def test_put_presence(self) -> None: """ PUT to the status endpoint with use_presence enabled will call set_state on the presence handler. @@ -57,11 +60,11 @@ def test_put_presence(self): "PUT", "/presence/%s/status" % (self.user_id,), body ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1) @unittest.override_config({"use_presence": False}) - def test_put_presence_disabled(self): + def test_put_presence_disabled(self) -> None: """ PUT to the status endpoint with use_presence disabled will NOT call set_state on the presence handler. @@ -72,5 +75,5 @@ def test_put_presence_disabled(self): "PUT", "/presence/%s/status" % (self.user_id,), body ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 0) diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py index e9f870403536..44f333a0ee77 100644 --- a/tests/rest/client/test_room_batch.py +++ b/tests/rest/client/test_room_batch.py @@ -134,7 +134,7 @@ def _create_test_room(self) -> Tuple[str, str, str, str]: return room_id, event_id_a, event_id_b, event_id_c @unittest.override_config({"experimental_features": {"msc2716_enabled": True}}) - def test_same_state_groups_for_whole_historical_batch(self): + def test_same_state_groups_for_whole_historical_batch(self) -> None: """Make sure that when using the `/batch_send` endpoint to import a bunch of historical messages, it re-uses the same `state_group` across the whole batch. This is an easy optimization to make sure we're getting diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 2b3fdadffa41..46cd5f70a8ed 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -19,6 +19,7 @@ import re import time import urllib.parse +from http import HTTPStatus from typing import ( Any, AnyStr, @@ -89,7 +90,7 @@ def create_room_as( is_public: Optional[bool] = None, room_version: Optional[str] = None, tok: Optional[str] = None, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, extra_content: Optional[Dict] = None, custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, ) -> Optional[str]: @@ -137,12 +138,19 @@ def create_room_as( assert channel.result["code"] == b"%d" % expect_code, channel.result self.auth_user_id = temp_id - if expect_code == 200: + if expect_code == HTTPStatus.OK: return channel.json_body["room_id"] else: return None - def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None): + def invite( + self, + room: Optional[str] = None, + src: Optional[str] = None, + targ: Optional[str] = None, + expect_code: int = HTTPStatus.OK, + tok: Optional[str] = None, + ) -> None: self.change_membership( room=room, src=src, @@ -156,7 +164,7 @@ def join( self, room: str, user: Optional[str] = None, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, tok: Optional[str] = None, appservice_user_id: Optional[str] = None, ) -> None: @@ -170,7 +178,14 @@ def join( expect_code=expect_code, ) - def knock(self, room=None, user=None, reason=None, expect_code=200, tok=None): + def knock( + self, + room: Optional[str] = None, + user: Optional[str] = None, + reason: Optional[str] = None, + expect_code: int = HTTPStatus.OK, + tok: Optional[str] = None, + ) -> None: temp_id = self.auth_user_id self.auth_user_id = user path = "/knock/%s" % room @@ -199,7 +214,13 @@ def knock(self, room=None, user=None, reason=None, expect_code=200, tok=None): self.auth_user_id = temp_id - def leave(self, room=None, user=None, expect_code=200, tok=None): + def leave( + self, + room: Optional[str] = None, + user: Optional[str] = None, + expect_code: int = HTTPStatus.OK, + tok: Optional[str] = None, + ) -> None: self.change_membership( room=room, src=user, @@ -209,7 +230,7 @@ def leave(self, room=None, user=None, expect_code=200, tok=None): expect_code=expect_code, ) - def ban(self, room: str, src: str, targ: str, **kwargs: object): + def ban(self, room: str, src: str, targ: str, **kwargs: object) -> None: """A convenience helper: `change_membership` with `membership` preset to "ban".""" self.change_membership( room=room, @@ -228,7 +249,7 @@ def change_membership( extra_data: Optional[dict] = None, tok: Optional[str] = None, appservice_user_id: Optional[str] = None, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, expect_errcode: Optional[str] = None, ) -> None: """ @@ -294,13 +315,13 @@ def change_membership( def send( self, - room_id, - body=None, - txn_id=None, - tok=None, - expect_code=200, + room_id: str, + body: Optional[str] = None, + txn_id: Optional[str] = None, + tok: Optional[str] = None, + expect_code: int = HTTPStatus.OK, custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, - ): + ) -> JsonDict: if body is None: body = "body_text_here" @@ -318,14 +339,14 @@ def send( def send_event( self, - room_id, - type, + room_id: str, + type: str, content: Optional[dict] = None, - txn_id=None, - tok=None, - expect_code=200, + txn_id: Optional[str] = None, + tok: Optional[str] = None, + expect_code: int = HTTPStatus.OK, custom_headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, - ): + ) -> JsonDict: if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -358,10 +379,10 @@ def _read_write_state( event_type: str, body: Optional[Dict[str, Any]], tok: str, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, state_key: str = "", method: str = "GET", - ) -> Dict: + ) -> JsonDict: """Read or write some state from a given room Args: @@ -410,9 +431,9 @@ def get_state( room_id: str, event_type: str, tok: str, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, state_key: str = "", - ): + ) -> JsonDict: """Gets some state from a room Args: @@ -438,9 +459,9 @@ def send_state( event_type: str, body: Dict[str, Any], tok: str, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, state_key: str = "", - ): + ) -> JsonDict: """Set some state in a room Args: @@ -467,8 +488,8 @@ def upload_media( image_data: bytes, tok: str, filename: str = "test.png", - expect_code: int = 200, - ) -> dict: + expect_code: int = HTTPStatus.OK, + ) -> JsonDict: """Upload a piece of test media to the media repo Args: resource: The resource that will handle the upload request @@ -513,7 +534,7 @@ def login_via_oidc(self, remote_user_id: str) -> JsonDict: channel = self.auth_via_oidc({"sub": remote_user_id}, client_redirect_url) # expect a confirmation page - assert channel.code == 200, channel.result + assert channel.code == HTTPStatus.OK, channel.result # fish the matrix login token out of the body of the confirmation page m = re.search( @@ -532,7 +553,7 @@ def login_via_oidc(self, remote_user_id: str) -> JsonDict: "/login", content={"type": "m.login.token", "token": login_token}, ) - assert channel.code == 200 + assert channel.code == HTTPStatus.OK return channel.json_body def auth_via_oidc( @@ -641,7 +662,7 @@ async def mock_req(method: str, uri: str, data=None, headers=None): (expected_uri, resp_obj) = expected_requests.pop(0) assert uri == expected_uri resp = FakeResponse( - code=200, + code=HTTPStatus.OK, phrase=b"OK", body=json.dumps(resp_obj).encode("utf-8"), ) @@ -739,7 +760,7 @@ def initiate_sso_ui_auth( self.hs.get_reactor(), self.site, "GET", sso_redirect_endpoint ) # that should serve a confirmation page - assert channel.code == 200, channel.text_body + assert channel.code == HTTPStatus.OK, channel.text_body channel.extract_cookies(cookies) # parse the confirmation page to fish out the link. From f3fd8558cdb5d91d0e54ca35b55a3dba2610b215 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 25 Feb 2022 10:19:49 +0000 Subject: [PATCH 301/474] Minor typing fixes for `synapse/storage/persist_events.py` (#12069) Signed-off-by: Sean Quah --- changelog.d/12069.misc | 1 + synapse/storage/databases/main/events.py | 23 ++++++++++++---------- synapse/storage/persist_events.py | 25 ++++++++++++------------ 3 files changed, 26 insertions(+), 23 deletions(-) create mode 100644 changelog.d/12069.misc diff --git a/changelog.d/12069.misc b/changelog.d/12069.misc new file mode 100644 index 000000000000..8374a63220d1 --- /dev/null +++ b/changelog.d/12069.misc @@ -0,0 +1 @@ +Minor typing fixes. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index e53e84054ad0..23fa089bca98 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -130,7 +130,7 @@ async def _persist_events_and_state_updates( *, current_state_for_room: Dict[str, StateMap[str]], state_delta_for_room: Dict[str, DeltaState], - new_forward_extremeties: Dict[str, List[str]], + new_forward_extremities: Dict[str, Set[str]], use_negative_stream_ordering: bool = False, inhibit_local_membership_updates: bool = False, ) -> None: @@ -143,7 +143,7 @@ async def _persist_events_and_state_updates( the room based on forward extremities state_delta_for_room: Map from room_id to the delta to apply to room state - new_forward_extremities: Map from room_id to list of event IDs + new_forward_extremities: Map from room_id to set of event IDs that are the new forward extremities of the room. use_negative_stream_ordering: Whether to start stream_ordering on the negative side and decrement. This should be set as True @@ -193,7 +193,7 @@ async def _persist_events_and_state_updates( events_and_contexts=events_and_contexts, inhibit_local_membership_updates=inhibit_local_membership_updates, state_delta_for_room=state_delta_for_room, - new_forward_extremeties=new_forward_extremeties, + new_forward_extremities=new_forward_extremities, ) persist_event_counter.inc(len(events_and_contexts)) @@ -220,7 +220,7 @@ async def _persist_events_and_state_updates( for room_id, new_state in current_state_for_room.items(): self.store.get_current_state_ids.prefill((room_id,), new_state) - for room_id, latest_event_ids in new_forward_extremeties.items(): + for room_id, latest_event_ids in new_forward_extremities.items(): self.store.get_latest_event_ids_in_room.prefill( (room_id,), list(latest_event_ids) ) @@ -334,8 +334,8 @@ def _persist_events_txn( events_and_contexts: List[Tuple[EventBase, EventContext]], inhibit_local_membership_updates: bool = False, state_delta_for_room: Optional[Dict[str, DeltaState]] = None, - new_forward_extremeties: Optional[Dict[str, List[str]]] = None, - ): + new_forward_extremities: Optional[Dict[str, Set[str]]] = None, + ) -> None: """Insert some number of room events into the necessary database tables. Rejected events are only inserted into the events table, the events_json table, @@ -353,13 +353,13 @@ def _persist_events_txn( from the database. This is useful when retrying due to IntegrityError. state_delta_for_room: The current-state delta for each room. - new_forward_extremetie: The new forward extremities for each room. + new_forward_extremities: The new forward extremities for each room. For each room, a list of the event ids which are the forward extremities. """ state_delta_for_room = state_delta_for_room or {} - new_forward_extremeties = new_forward_extremeties or {} + new_forward_extremities = new_forward_extremities or {} all_events_and_contexts = events_and_contexts @@ -372,7 +372,7 @@ def _persist_events_txn( self._update_forward_extremities_txn( txn, - new_forward_extremities=new_forward_extremeties, + new_forward_extremities=new_forward_extremities, max_stream_order=max_stream_order, ) @@ -1158,7 +1158,10 @@ def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str): ) def _update_forward_extremities_txn( - self, txn, new_forward_extremities, max_stream_order + self, + txn: LoggingTransaction, + new_forward_extremities: Dict[str, Set[str]], + max_stream_order: int, ): for room_id in new_forward_extremities.keys(): self.db_pool.simple_delete_txn( diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 428d66a617b1..7d543fdbe08a 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -427,21 +427,21 @@ async def _persist_event_batch( # NB: Assumes that we are only persisting events for one room # at a time. - # map room_id->list[event_ids] giving the new forward + # map room_id->set[event_ids] giving the new forward # extremities in each room - new_forward_extremeties = {} + new_forward_extremities: Dict[str, Set[str]] = {} # map room_id->(type,state_key)->event_id tracking the full # state in each room after adding these events. # This is simply used to prefill the get_current_state_ids # cache - current_state_for_room = {} + current_state_for_room: Dict[str, StateMap[str]] = {} # map room_id->(to_delete, to_insert) where to_delete is a list # of type/state keys to remove from current state, and to_insert # is a map (type,key)->event_id giving the state delta in each # room - state_delta_for_room = {} + state_delta_for_room: Dict[str, DeltaState] = {} # Set of remote users which were in rooms the server has left. We # should check if we still share any rooms and if not we mark their @@ -460,14 +460,13 @@ async def _persist_event_batch( ) for room_id, ev_ctx_rm in events_by_room.items(): - latest_event_ids = ( + latest_event_ids = set( await self.main_store.get_latest_event_ids_in_room(room_id) ) new_latest_event_ids = await self._calculate_new_extremities( room_id, ev_ctx_rm, latest_event_ids ) - latest_event_ids = set(latest_event_ids) if new_latest_event_ids == latest_event_ids: # No change in extremities, so no change in state continue @@ -478,7 +477,7 @@ async def _persist_event_batch( # extremities, so we'll `continue` above and skip this bit.) assert new_latest_event_ids, "No forward extremities left!" - new_forward_extremeties[room_id] = new_latest_event_ids + new_forward_extremities[room_id] = new_latest_event_ids len_1 = ( len(latest_event_ids) == 1 @@ -533,7 +532,7 @@ async def _persist_event_batch( # extremities, so we'll `continue` above and skip this bit.) assert new_latest_event_ids, "No forward extremities left!" - new_forward_extremeties[room_id] = new_latest_event_ids + new_forward_extremities[room_id] = new_latest_event_ids # If either are not None then there has been a change, # and we need to work out the delta (or use that @@ -567,7 +566,7 @@ async def _persist_event_batch( ) if not is_still_joined: logger.info("Server no longer in room %s", room_id) - latest_event_ids = [] + latest_event_ids = set() current_state = {} delta.no_longer_in_room = True @@ -582,7 +581,7 @@ async def _persist_event_batch( chunk, current_state_for_room=current_state_for_room, state_delta_for_room=state_delta_for_room, - new_forward_extremeties=new_forward_extremeties, + new_forward_extremities=new_forward_extremities, use_negative_stream_ordering=backfilled, inhibit_local_membership_updates=backfilled, ) @@ -596,7 +595,7 @@ async def _calculate_new_extremities( room_id: str, event_contexts: List[Tuple[EventBase, EventContext]], latest_event_ids: Collection[str], - ): + ) -> Set[str]: """Calculates the new forward extremities for a room given events to persist. @@ -906,9 +905,9 @@ async def _prune_extremities( # Ideally we'd figure out a way of still being able to drop old # dummy events that reference local events, but this is good enough # as a first cut. - events_to_check = [event] + events_to_check: Collection[EventBase] = [event] while events_to_check: - new_events = set() + new_events: Set[str] = set() for event_to_check in events_to_check: if self.is_mine_id(event_to_check.sender): if event_to_check.type != EventTypes.Dummy: From b43c3ef8e2306829074d847bed50575d5e7c7ea3 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 25 Feb 2022 10:20:40 +0000 Subject: [PATCH 302/474] Ensure that `get_datastores().main` is typed (#12070) Signed-off-by: Sean Quah --- changelog.d/12070.misc | 1 + synapse/storage/databases/__init__.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12070.misc diff --git a/changelog.d/12070.misc b/changelog.d/12070.misc new file mode 100644 index 000000000000..d4bedc6b97cb --- /dev/null +++ b/changelog.d/12070.misc @@ -0,0 +1 @@ +Remove legacy `HomeServer.get_datastore()`. diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index cfe887b7f73d..ce3d1d4e942e 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -24,6 +24,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -44,7 +45,7 @@ class Databases(Generic[DataStoreT]): """ databases: List[DatabasePool] - main: DataStoreT + main: "DataStore" # FIXME: #11165: actually an instance of `main_store_class` state: StateGroupDataStore persist_events: Optional[PersistEventsStore] From ab3ef49059e465198754a3d818d1f3b21771f5ef Mon Sep 17 00:00:00 2001 From: lukasdenk <63459921+lukasdenk@users.noreply.github.com> Date: Mon, 28 Feb 2022 12:42:13 +0100 Subject: [PATCH 303/474] synctl: print warning if synctl_cache_factor is set in config (#11865) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/11865.removal | 1 + synctl | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/11865.removal diff --git a/changelog.d/11865.removal b/changelog.d/11865.removal new file mode 100644 index 000000000000..9fcabfc7200e --- /dev/null +++ b/changelog.d/11865.removal @@ -0,0 +1 @@ +Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. diff --git a/synctl b/synctl index 0e54f4847bbd..1ab36949c741 100755 --- a/synctl +++ b/synctl @@ -37,6 +37,13 @@ YELLOW = "\x1b[1;33m" RED = "\x1b[1;31m" NORMAL = "\x1b[m" +SYNCTL_CACHE_FACTOR_WARNING = """\ +Setting 'synctl_cache_factor' in the config is deprecated. Instead, please do +one of the following: + - Either set the environment variable 'SYNAPSE_CACHE_FACTOR' + - or set 'caches.global_factor' in the homeserver config. +--------------------------------------------------------------------------------""" + def pid_running(pid): try: @@ -228,6 +235,7 @@ def main(): start_stop_synapse = True if cache_factor: + write(SYNCTL_CACHE_FACTOR_WARNING) os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor) cache_factors = config.get("synctl_cache_factors", {}) From 02d708568b476f2f7716000b35c0adfa4cbd31b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Feb 2022 07:12:29 -0500 Subject: [PATCH 304/474] Replace assertEquals and friends with non-deprecated versions. (#12092) --- changelog.d/12092.misc | 1 + tests/api/test_auth.py | 36 +-- tests/api/test_filtering.py | 20 +- tests/api/test_ratelimiting.py | 28 +- tests/appservice/test_scheduler.py | 62 ++--- tests/crypto/test_event_signing.py | 8 +- tests/crypto/test_keyring.py | 6 +- tests/events/test_utils.py | 16 +- tests/federation/test_complexity.py | 4 +- tests/federation/test_federation_server.py | 12 +- tests/federation/transport/test_knocking.py | 16 +- tests/federation/transport/test_server.py | 4 +- tests/handlers/test_appservice.py | 18 +- tests/handlers/test_directory.py | 28 +- tests/handlers/test_presence.py | 78 +++--- tests/handlers/test_profile.py | 20 +- tests/handlers/test_receipts.py | 2 +- tests/handlers/test_register.py | 4 +- tests/handlers/test_sync.py | 6 +- tests/handlers/test_typing.py | 40 +-- tests/handlers/test_user_directory.py | 4 +- tests/http/federation/test_srv_resolver.py | 24 +- .../replication/slave/storage/test_events.py | 2 +- tests/rest/admin/test_room.py | 14 +- tests/rest/client/test_account.py | 22 +- tests/rest/client/test_events.py | 10 +- tests/rest/client/test_filter.py | 10 +- tests/rest/client/test_groups.py | 12 +- tests/rest/client/test_login.py | 78 +++--- tests/rest/client/test_profile.py | 2 +- tests/rest/client/test_register.py | 166 ++++++------ tests/rest/client/test_relations.py | 248 +++++++++--------- tests/rest/client/test_rooms.py | 192 +++++++------- tests/rest/client/test_shadow_banned.py | 20 +- tests/rest/client/test_shared_rooms.py | 24 +- tests/rest/client/test_sync.py | 16 +- tests/rest/client/test_third_party_rules.py | 14 +- tests/rest/client/test_typing.py | 18 +- tests/rest/client/test_upgrade_room.py | 18 +- tests/rest/media/v1/test_media_storage.py | 2 +- .../databases/main/test_events_worker.py | 12 +- tests/storage/test_appservice.py | 86 +++--- tests/storage/test_base.py | 6 +- tests/storage/test_directory.py | 2 +- tests/storage/test_event_push_actions.py | 2 +- tests/storage/test_main.py | 8 +- tests/storage/test_profile.py | 4 +- tests/storage/test_registration.py | 6 +- tests/storage/test_room.py | 4 +- tests/storage/test_room_search.py | 6 +- tests/storage/test_roommember.py | 2 +- tests/test_distributor.py | 2 +- tests/test_terms_auth.py | 6 +- tests/test_test_utils.py | 2 +- tests/test_types.py | 16 +- tests/unittest.py | 6 +- tests/util/caches/test_deferred_cache.py | 2 +- tests/util/caches/test_descriptors.py | 70 ++--- tests/util/test_expiring_cache.py | 40 +-- tests/util/test_logcontext.py | 2 +- tests/util/test_lrucache.py | 140 +++++----- tests/util/test_treecache.py | 48 ++-- 62 files changed, 888 insertions(+), 889 deletions(-) create mode 100644 changelog.d/12092.misc diff --git a/changelog.d/12092.misc b/changelog.d/12092.misc new file mode 100644 index 000000000000..62653d6f8d9e --- /dev/null +++ b/changelog.d/12092.misc @@ -0,0 +1 @@ +User `assertEqual` instead of the deprecated `assertEquals` in test code. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 686d17c0de50..3e057899236d 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -71,7 +71,7 @@ def test_get_user_by_req_user_valid_token(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) - self.assertEquals(requester.user.to_string(), self.test_user) + self.assertEqual(requester.user.to_string(), self.test_user) def test_get_user_by_req_user_bad_token(self): self.store.get_user_by_access_token = simple_async_mock(None) @@ -109,7 +109,7 @@ def test_get_user_by_req_appservice_valid_token(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) - self.assertEquals(requester.user.to_string(), self.test_user) + self.assertEqual(requester.user.to_string(), self.test_user) def test_get_user_by_req_appservice_valid_token_good_ip(self): from netaddr import IPSet @@ -128,7 +128,7 @@ def test_get_user_by_req_appservice_valid_token_good_ip(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) - self.assertEquals(requester.user.to_string(), self.test_user) + self.assertEqual(requester.user.to_string(), self.test_user) def test_get_user_by_req_appservice_valid_token_bad_ip(self): from netaddr import IPSet @@ -195,7 +195,7 @@ def test_get_user_by_req_appservice_valid_token_valid_user_id(self): request.args[b"user_id"] = [masquerading_user_id] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) - self.assertEquals( + self.assertEqual( requester.user.to_string(), masquerading_user_id.decode("utf8") ) @@ -242,10 +242,10 @@ def test_get_user_by_req_appservice_valid_token_valid_device_id(self): request.args[b"org.matrix.msc3202.device_id"] = [masquerading_device_id] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) - self.assertEquals( + self.assertEqual( requester.user.to_string(), masquerading_user_id.decode("utf8") ) - self.assertEquals(requester.device_id, masquerading_device_id.decode("utf8")) + self.assertEqual(requester.device_id, masquerading_device_id.decode("utf8")) @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) def test_get_user_by_req_appservice_valid_token_invalid_device_id(self): @@ -275,8 +275,8 @@ def test_get_user_by_req_appservice_valid_token_invalid_device_id(self): request.requestHeaders.getRawHeaders = mock_getRawHeaders() failure = self.get_failure(self.auth.get_user_by_req(request), AuthError) - self.assertEquals(failure.value.code, 400) - self.assertEquals(failure.value.errcode, Codes.EXCLUSIVE) + self.assertEqual(failure.value.code, 400) + self.assertEqual(failure.value.errcode, Codes.EXCLUSIVE) def test_get_user_by_req__puppeted_token__not_tracking_puppeted_mau(self): self.store.get_user_by_access_token = simple_async_mock( @@ -309,7 +309,7 @@ def test_get_user_by_req__puppeted_token__tracking_puppeted_mau(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() self.get_success(self.auth.get_user_by_req(request)) - self.assertEquals(self.store.insert_client_ip.call_count, 2) + self.assertEqual(self.store.insert_client_ip.call_count, 2) def test_get_user_from_macaroon(self): self.store.get_user_by_access_token = simple_async_mock( @@ -369,9 +369,9 @@ def test_blocking_mau(self): self.store.get_monthly_active_count = simple_async_mock(lots_of_users) e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) - self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact) - self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - self.assertEquals(e.value.code, 403) + self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) + self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + self.assertEqual(e.value.code, 403) # Ensure does not throw an error self.store.get_monthly_active_count = simple_async_mock(small_number_of_users) @@ -473,9 +473,9 @@ def test_hs_disabled(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) - self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact) - self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - self.assertEquals(e.value.code, 403) + self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) + self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + self.assertEqual(e.value.code, 403) def test_hs_disabled_no_server_notices_user(self): """Check that 'hs_disabled_message' works correctly when there is no @@ -488,9 +488,9 @@ def test_hs_disabled_no_server_notices_user(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) - self.assertEquals(e.value.admin_contact, self.hs.config.server.admin_contact) - self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - self.assertEquals(e.value.code, 403) + self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) + self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + self.assertEqual(e.value.code, 403) def test_server_notices_mxid_special_cased(self): self.auth_blocking._hs_disabled = True diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 973f0f7fa176..2525018e9515 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -364,7 +364,7 @@ def test_filter_presence_match(self): ) results = self.get_success(user_filter.filter_presence(events=events)) - self.assertEquals(events, results) + self.assertEqual(events, results) def test_filter_presence_no_match(self): user_filter_json = {"presence": {"types": ["m.*"]}} @@ -388,7 +388,7 @@ def test_filter_presence_no_match(self): ) results = self.get_success(user_filter.filter_presence(events=events)) - self.assertEquals([], results) + self.assertEqual([], results) def test_filter_room_state_match(self): user_filter_json = {"room": {"state": {"types": ["m.*"]}}} @@ -407,7 +407,7 @@ def test_filter_room_state_match(self): ) results = self.get_success(user_filter.filter_room_state(events=events)) - self.assertEquals(events, results) + self.assertEqual(events, results) def test_filter_room_state_no_match(self): user_filter_json = {"room": {"state": {"types": ["m.*"]}}} @@ -428,7 +428,7 @@ def test_filter_room_state_no_match(self): ) results = self.get_success(user_filter.filter_room_state(events)) - self.assertEquals([], results) + self.assertEqual([], results) def test_filter_rooms(self): definition = { @@ -444,7 +444,7 @@ def test_filter_rooms(self): filtered_room_ids = list(Filter(self.hs, definition).filter_rooms(room_ids)) - self.assertEquals(filtered_room_ids, ["!allowed:example.com"]) + self.assertEqual(filtered_room_ids, ["!allowed:example.com"]) @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_filter_relations(self): @@ -486,7 +486,7 @@ async def events_have_relations(*args, **kwargs): Filter(self.hs, definition)._check_event_relations(events) ) ) - self.assertEquals(filtered_events, events[1:]) + self.assertEqual(filtered_events, events[1:]) def test_add_filter(self): user_filter_json = {"room": {"state": {"types": ["m.*"]}}} @@ -497,8 +497,8 @@ def test_add_filter(self): ) ) - self.assertEquals(filter_id, 0) - self.assertEquals( + self.assertEqual(filter_id, 0) + self.assertEqual( user_filter_json, ( self.get_success( @@ -524,6 +524,6 @@ def test_get_filter(self): ) ) - self.assertEquals(filter.get_filter_json(), user_filter_json) + self.assertEqual(filter.get_filter_json(), user_filter_json) - self.assertRegexpMatches(repr(filter), r"") + self.assertRegex(repr(filter), r"") diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 4ef754a186c1..483d5463add2 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -14,19 +14,19 @@ def test_allowed_via_can_do_action(self): limiter.can_do_action(None, key="test_id", _time_now_s=0) ) self.assertTrue(allowed) - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", _time_now_s=5) ) self.assertFalse(allowed) - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", _time_now_s=10) ) self.assertTrue(allowed) - self.assertEquals(20.0, time_allowed) + self.assertEqual(20.0, time_allowed) def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): appservice = ApplicationService( @@ -45,19 +45,19 @@ def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): limiter.can_do_action(as_requester, _time_now_s=0) ) self.assertTrue(allowed) - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=5) ) self.assertFalse(allowed) - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=10) ) self.assertTrue(allowed) - self.assertEquals(20.0, time_allowed) + self.assertEqual(20.0, time_allowed) def test_allowed_appservice_via_can_requester_do_action(self): appservice = ApplicationService( @@ -76,19 +76,19 @@ def test_allowed_appservice_via_can_requester_do_action(self): limiter.can_do_action(as_requester, _time_now_s=0) ) self.assertTrue(allowed) - self.assertEquals(-1, time_allowed) + self.assertEqual(-1, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=5) ) self.assertTrue(allowed) - self.assertEquals(-1, time_allowed) + self.assertEqual(-1, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=10) ) self.assertTrue(allowed) - self.assertEquals(-1, time_allowed) + self.assertEqual(-1, time_allowed) def test_allowed_via_ratelimit(self): limiter = Ratelimiter( @@ -246,7 +246,7 @@ def test_multiple_actions(self): limiter.can_do_action(None, key="test_id", n_actions=3, _time_now_s=0) ) self.assertTrue(allowed) - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) # Test that, after doing these 3 actions, we can't do any more action without # waiting. @@ -254,7 +254,7 @@ def test_multiple_actions(self): limiter.can_do_action(None, key="test_id", n_actions=1, _time_now_s=0) ) self.assertFalse(allowed) - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) # Test that after waiting we can do only 1 action. allowed, time_allowed = self.get_success_or_raise( @@ -269,7 +269,7 @@ def test_multiple_actions(self): self.assertTrue(allowed) # The time allowed is the current time because we could still repeat the action # once. - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=10) @@ -277,7 +277,7 @@ def test_multiple_actions(self): self.assertFalse(allowed) # The time allowed doesn't change despite allowed being False because, while we # don't allow 2 actions, we could still do 1. - self.assertEquals(10.0, time_allowed) + self.assertEqual(10.0, time_allowed) # Test that after waiting a bit more we can do 2 actions. allowed, time_allowed = self.get_success_or_raise( @@ -286,4 +286,4 @@ def test_multiple_actions(self): self.assertTrue(allowed) # The time allowed is the current time because we could still repeat the action # once. - self.assertEquals(20.0, time_allowed) + self.assertEqual(20.0, time_allowed) diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index b9dc4dfe1b1d..1cbb059357fa 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -71,7 +71,7 @@ def test_single_service_up_txn_sent(self): one_time_key_counts={}, unused_fallback_keys={}, ) - self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made + self.assertEqual(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed def test_single_service_down(self): @@ -97,8 +97,8 @@ def test_single_service_down(self): one_time_key_counts={}, unused_fallback_keys={}, ) - self.assertEquals(0, txn.send.call_count) # txn not sent though - self.assertEquals(0, txn.complete.call_count) # or completed + self.assertEqual(0, txn.send.call_count) # txn not sent though + self.assertEqual(0, txn.complete.call_count) # or completed def test_single_service_up_txn_not_sent(self): # Test: The AS is up and the txn is not sent. A Recoverer is made and @@ -125,10 +125,10 @@ def test_single_service_up_txn_not_sent(self): one_time_key_counts={}, unused_fallback_keys={}, ) - self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made - self.assertEquals(1, self.recoverer.recover.call_count) # and invoked - self.assertEquals(1, len(self.txnctrl.recoverers)) # and stored - self.assertEquals(0, txn.complete.call_count) # txn not completed + self.assertEqual(1, self.recoverer_fn.call_count) # recoverer made + self.assertEqual(1, self.recoverer.recover.call_count) # and invoked + self.assertEqual(1, len(self.txnctrl.recoverers)) # and stored + self.assertEqual(0, txn.complete.call_count) # txn not completed self.store.set_appservice_state.assert_called_once_with( service, ApplicationServiceState.DOWN # service marked as down ) @@ -161,17 +161,17 @@ def take_txn(*args, **kwargs): self.recoverer.recover() # shouldn't have called anything prior to waiting for exp backoff - self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count) + self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) txn.send = simple_async_mock(True) txn.complete = simple_async_mock(None) # wait for exp backoff self.clock.advance_time(2) - self.assertEquals(1, txn.send.call_count) - self.assertEquals(1, txn.complete.call_count) + self.assertEqual(1, txn.send.call_count) + self.assertEqual(1, txn.complete.call_count) # 2 because it needs to get None to know there are no more txns - self.assertEquals(2, self.store.get_oldest_unsent_txn.call_count) + self.assertEqual(2, self.store.get_oldest_unsent_txn.call_count) self.callback.assert_called_once_with(self.recoverer) - self.assertEquals(self.recoverer.service, self.service) + self.assertEqual(self.recoverer.service, self.service) def test_recover_retry_txn(self): txn = Mock() @@ -187,26 +187,26 @@ def take_txn(*args, **kwargs): self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn) self.recoverer.recover() - self.assertEquals(0, self.store.get_oldest_unsent_txn.call_count) + self.assertEqual(0, self.store.get_oldest_unsent_txn.call_count) txn.send = simple_async_mock(False) txn.complete = simple_async_mock(None) self.clock.advance_time(2) - self.assertEquals(1, txn.send.call_count) - self.assertEquals(0, txn.complete.call_count) - self.assertEquals(0, self.callback.call_count) + self.assertEqual(1, txn.send.call_count) + self.assertEqual(0, txn.complete.call_count) + self.assertEqual(0, self.callback.call_count) self.clock.advance_time(4) - self.assertEquals(2, txn.send.call_count) - self.assertEquals(0, txn.complete.call_count) - self.assertEquals(0, self.callback.call_count) + self.assertEqual(2, txn.send.call_count) + self.assertEqual(0, txn.complete.call_count) + self.assertEqual(0, self.callback.call_count) self.clock.advance_time(8) - self.assertEquals(3, txn.send.call_count) - self.assertEquals(0, txn.complete.call_count) - self.assertEquals(0, self.callback.call_count) + self.assertEqual(3, txn.send.call_count) + self.assertEqual(0, txn.complete.call_count) + self.assertEqual(0, self.callback.call_count) txn.send = simple_async_mock(True) # successfully send the txn pop_txn = True # returns the txn the first time, then no more. self.clock.advance_time(16) - self.assertEquals(1, txn.send.call_count) # new mock reset call count - self.assertEquals(1, txn.complete.call_count) + self.assertEqual(1, txn.send.call_count) # new mock reset call count + self.assertEqual(1, txn.complete.call_count) self.callback.assert_called_once_with(self.recoverer) @@ -241,13 +241,13 @@ def test_send_single_event_with_queue(self): self.scheduler.enqueue_for_appservice(service, events=[event2]) self.scheduler.enqueue_for_appservice(service, events=[event3]) self.txn_ctrl.send.assert_called_with(service, [event], [], [], None, None) - self.assertEquals(1, self.txn_ctrl.send.call_count) + self.assertEqual(1, self.txn_ctrl.send.call_count) # Resolve the send event: expect the queued events to be sent d.callback(service) self.txn_ctrl.send.assert_called_with( service, [event2, event3], [], [], None, None ) - self.assertEquals(2, self.txn_ctrl.send.call_count) + self.assertEqual(2, self.txn_ctrl.send.call_count) def test_multiple_service_queues(self): # Tests that each service has its own queue, and that they don't block @@ -281,7 +281,7 @@ def do_send(*args, **kwargs): # service srv_2_defer.callback(srv2) self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], [], None, None) - self.assertEquals(3, self.txn_ctrl.send.call_count) + self.assertEqual(3, self.txn_ctrl.send.call_count) def test_send_large_txns(self): srv_1_defer = defer.Deferred() @@ -312,7 +312,7 @@ def do_send(*args, **kwargs): self.txn_ctrl.send.assert_called_with( service, event_list[101:], [], [], None, None ) - self.assertEquals(3, self.txn_ctrl.send.call_count) + self.assertEqual(3, self.txn_ctrl.send.call_count) def test_send_single_ephemeral_no_queue(self): # Expect the event to be sent immediately. @@ -346,14 +346,14 @@ def test_send_single_ephemeral_with_queue(self): self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_2) self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_3) self.txn_ctrl.send.assert_called_with(service, [], event_list_1, [], None, None) - self.assertEquals(1, self.txn_ctrl.send.call_count) + self.assertEqual(1, self.txn_ctrl.send.call_count) # Resolve txn_ctrl.send d.callback(service) # Expect the queued events to be sent self.txn_ctrl.send.assert_called_with( service, [], event_list_2 + event_list_3, [], None, None ) - self.assertEquals(2, self.txn_ctrl.send.call_count) + self.assertEqual(2, self.txn_ctrl.send.call_count) def test_send_large_txns_ephemeral(self): d = defer.Deferred() @@ -369,4 +369,4 @@ def test_send_large_txns_ephemeral(self): ) d.callback(service) self.txn_ctrl.send.assert_called_with(service, [], second_chunk, [], None, None) - self.assertEquals(2, self.txn_ctrl.send.call_count) + self.assertEqual(2, self.txn_ctrl.send.call_count) diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index a72a0103d3b8..694020fbef85 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -63,14 +63,14 @@ def test_sign_minimal(self): self.assertTrue(hasattr(event, "hashes")) self.assertIn("sha256", event.hashes) - self.assertEquals( + self.assertEqual( event.hashes["sha256"], "6tJjLpXtggfke8UxFhAKg82QVkJzvKOVOOSjUDK4ZSI" ) self.assertTrue(hasattr(event, "signatures")) self.assertIn(HOSTNAME, event.signatures) self.assertIn(KEY_NAME, event.signatures["domain"]) - self.assertEquals( + self.assertEqual( event.signatures[HOSTNAME][KEY_NAME], "2Wptgo4CwmLo/Y8B8qinxApKaCkBG2fjTWB7AbP5Uy+" "aIbygsSdLOFzvdDjww8zUVKCmI02eP9xtyJxc/cLiBA", @@ -97,14 +97,14 @@ def test_sign_message(self): self.assertTrue(hasattr(event, "hashes")) self.assertIn("sha256", event.hashes) - self.assertEquals( + self.assertEqual( event.hashes["sha256"], "onLKD1bGljeBWQhWZ1kaP9SorVmRQNdN5aM2JYU2n/g" ) self.assertTrue(hasattr(event, "signatures")) self.assertIn(HOSTNAME, event.signatures) self.assertIn(KEY_NAME, event.signatures["domain"]) - self.assertEquals( + self.assertEqual( event.signatures[HOSTNAME][KEY_NAME], "Wm+VzmOUOz08Ds+0NTWb1d4CZrVsJSikkeRxh6aCcUw" "u6pNC78FunoD7KNWzqFn241eYHYMGCA5McEiVPdhzBA", diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 3a4d5027195a..d00ef24ca806 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -76,7 +76,7 @@ class FakeRequest: @logcontext_clean class KeyringTestCase(unittest.HomeserverTestCase): def check_context(self, val, expected): - self.assertEquals(getattr(current_context(), "request", None), expected) + self.assertEqual(getattr(current_context(), "request", None), expected) return val def test_verify_json_objects_for_server_awaits_previous_requests(self): @@ -96,7 +96,7 @@ def test_verify_json_objects_for_server_awaits_previous_requests(self): async def first_lookup_fetch( server_name: str, key_ids: List[str], minimum_valid_until_ts: int ) -> Dict[str, FetchKeyResult]: - # self.assertEquals(current_context().request.id, "context_11") + # self.assertEqual(current_context().request.id, "context_11") self.assertEqual(server_name, "server10") self.assertEqual(key_ids, [get_key_id(key1)]) self.assertEqual(minimum_valid_until_ts, 0) @@ -137,7 +137,7 @@ async def first_lookup(): async def second_lookup_fetch( server_name: str, key_ids: List[str], minimum_valid_until_ts: int ) -> Dict[str, FetchKeyResult]: - # self.assertEquals(current_context().request.id, "context_12") + # self.assertEqual(current_context().request.id, "context_12") return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)} mock_fetcher.get_keys.reset_mock() diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 1dea09e4800d..45e3395b3361 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -395,7 +395,7 @@ def serialize(self, ev, fields): return serialize_event(ev, 1479807801915, only_event_fields=fields) def test_event_fields_works_with_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent(sender="@alice:localhost", room_id="!foo:bar"), ["room_id"] ), @@ -403,7 +403,7 @@ def test_event_fields_works_with_keys(self): ) def test_event_fields_works_with_nested_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", @@ -416,7 +416,7 @@ def test_event_fields_works_with_nested_keys(self): ) def test_event_fields_works_with_dot_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", @@ -429,7 +429,7 @@ def test_event_fields_works_with_dot_keys(self): ) def test_event_fields_works_with_nested_dot_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", @@ -445,7 +445,7 @@ def test_event_fields_works_with_nested_dot_keys(self): ) def test_event_fields_nops_with_unknown_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", @@ -458,7 +458,7 @@ def test_event_fields_nops_with_unknown_keys(self): ) def test_event_fields_nops_with_non_dict_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", @@ -471,7 +471,7 @@ def test_event_fields_nops_with_non_dict_keys(self): ) def test_event_fields_nops_with_array_keys(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", @@ -484,7 +484,7 @@ def test_event_fields_nops_with_array_keys(self): ) def test_event_fields_all_fields_if_empty(self): - self.assertEquals( + self.assertEqual( self.serialize( MockEvent( type="foo", diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 9336181c96a5..9f1115dd23b8 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -50,7 +50,7 @@ def test_complexity_simple(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) complexity = channel.json_body["v1"] self.assertTrue(complexity > 0, complexity) @@ -62,7 +62,7 @@ def test_complexity_simple(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index d084919ef7d7..30e7e5093a17 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -59,7 +59,7 @@ def test_bad_request(self, query_content): "/_matrix/federation/v1/get_missing_events/%s" % (room_1,), query_content, ) - self.assertEquals(400, channel.code, channel.result) + self.assertEqual(400, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON") @@ -125,7 +125,7 @@ def test_without_event_id(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/state/%s" % (room_1,) ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertEqual( channel.json_body["room_version"], @@ -157,7 +157,7 @@ def test_needs_to_be_in_room(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/state/%s" % (room_1,) ) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -189,7 +189,7 @@ def _make_join(self, user_id) -> JsonDict: f"/_matrix/federation/v1/make_join/{self._room_id}/{user_id}" f"?ver={DEFAULT_ROOM_VERSION}", ) - self.assertEquals(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, 200, channel.json_body) return channel.json_body def test_send_join(self): @@ -209,7 +209,7 @@ def test_send_join(self): f"/_matrix/federation/v2/send_join/{self._room_id}/x", content=join_event_dict, ) - self.assertEquals(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, 200, channel.json_body) # we should get complete room state back returned_state = [ @@ -266,7 +266,7 @@ def test_send_join_partial_state(self): f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", content=join_event_dict, ) - self.assertEquals(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, 200, channel.json_body) # expect a reduced room state returned_state = [ diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index adf0535d97e3..648a01618e8b 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -169,7 +169,7 @@ def check_knock_room_state_against_room_state( self.assertIn(event_type, expected_room_state) # Check the state content matches - self.assertEquals( + self.assertEqual( expected_room_state[event_type]["content"], event["content"] ) @@ -256,7 +256,7 @@ def test_room_state_returned_when_knocking(self): RoomVersions.V7.identifier, ), ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # Note: We don't expect the knock membership event to be sent over federation as # part of the stripped room state, as the knocking homeserver already has that @@ -266,11 +266,11 @@ def test_room_state_returned_when_knocking(self): knock_event = channel.json_body["event"] # Check that the event has things we expect in it - self.assertEquals(knock_event["room_id"], room_id) - self.assertEquals(knock_event["sender"], fake_knocking_user_id) - self.assertEquals(knock_event["state_key"], fake_knocking_user_id) - self.assertEquals(knock_event["type"], EventTypes.Member) - self.assertEquals(knock_event["content"]["membership"], Membership.KNOCK) + self.assertEqual(knock_event["room_id"], room_id) + self.assertEqual(knock_event["sender"], fake_knocking_user_id) + self.assertEqual(knock_event["state_key"], fake_knocking_user_id) + self.assertEqual(knock_event["type"], EventTypes.Member) + self.assertEqual(knock_event["content"]["membership"], Membership.KNOCK) # Turn the event json dict into a proper event. # We won't sign it properly, but that's OK as we stub out event auth in `prepare` @@ -294,7 +294,7 @@ def test_room_state_returned_when_knocking(self): % (room_id, signed_knock_event.event_id), signed_knock_event_json, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # Check that we got the stripped room state in return room_state_events = channel.json_body["knock_state_events"] diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index eb62addda8c6..ce49d094d727 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -26,7 +26,7 @@ def test_blocked_public_room_list_over_federation(self): "GET", "/_matrix/federation/v1/publicRooms", ) - self.assertEquals(403, channel.code) + self.assertEqual(403, channel.code) @override_config({"allow_public_rooms_over_federation": True}) def test_open_public_room_list_over_federation(self): @@ -37,4 +37,4 @@ def test_open_public_room_list_over_federation(self): "GET", "/_matrix/federation/v1/publicRooms", ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 6e0ec379630b..072e6bbcdd6e 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -147,8 +147,8 @@ def test_query_room_alias_exists(self): self.mock_as_api.query_alias.assert_called_once_with( interested_service, room_alias_str ) - self.assertEquals(result.room_id, room_id) - self.assertEquals(result.servers, servers) + self.assertEqual(result.room_id, room_id) + self.assertEqual(result.servers, servers) def test_get_3pe_protocols_no_appservices(self): self.mock_store.get_app_services.return_value = [] @@ -156,7 +156,7 @@ def test_get_3pe_protocols_no_appservices(self): defer.ensureDeferred(self.handler.get_3pe_protocols("my-protocol")) ) self.mock_as_api.get_3pe_protocol.assert_not_called() - self.assertEquals(response, {}) + self.assertEqual(response, {}) def test_get_3pe_protocols_no_protocols(self): service = self._mkservice(False, []) @@ -165,7 +165,7 @@ def test_get_3pe_protocols_no_protocols(self): defer.ensureDeferred(self.handler.get_3pe_protocols()) ) self.mock_as_api.get_3pe_protocol.assert_not_called() - self.assertEquals(response, {}) + self.assertEqual(response, {}) def test_get_3pe_protocols_protocol_no_response(self): service = self._mkservice(False, ["my-protocol"]) @@ -177,7 +177,7 @@ def test_get_3pe_protocols_protocol_no_response(self): self.mock_as_api.get_3pe_protocol.assert_called_once_with( service, "my-protocol" ) - self.assertEquals(response, {}) + self.assertEqual(response, {}) def test_get_3pe_protocols_select_one_protocol(self): service = self._mkservice(False, ["my-protocol"]) @@ -191,7 +191,7 @@ def test_get_3pe_protocols_select_one_protocol(self): self.mock_as_api.get_3pe_protocol.assert_called_once_with( service, "my-protocol" ) - self.assertEquals( + self.assertEqual( response, {"my-protocol": {"x-protocol-data": 42, "instances": []}} ) @@ -207,7 +207,7 @@ def test_get_3pe_protocols_one_protocol(self): self.mock_as_api.get_3pe_protocol.assert_called_once_with( service, "my-protocol" ) - self.assertEquals( + self.assertEqual( response, {"my-protocol": {"x-protocol-data": 42, "instances": []}} ) @@ -222,7 +222,7 @@ def test_get_3pe_protocols_multiple_protocol(self): defer.ensureDeferred(self.handler.get_3pe_protocols()) ) self.mock_as_api.get_3pe_protocol.assert_called() - self.assertEquals( + self.assertEqual( response, { "my-protocol": {"x-protocol-data": 42, "instances": []}, @@ -254,7 +254,7 @@ async def get_3pe_protocol(service, unusedProtocol): defer.ensureDeferred(self.handler.get_3pe_protocols()) ) # It's expected that the second service's data doesn't appear in the response - self.assertEquals( + self.assertEqual( response, { "my-protocol": { diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 65ab107d0e37..6e403a87c5d0 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -63,7 +63,7 @@ def test_get_local_association(self): result = self.get_success(self.handler.get_association(self.my_room)) - self.assertEquals({"room_id": "!8765qwer:test", "servers": ["test"]}, result) + self.assertEqual({"room_id": "!8765qwer:test", "servers": ["test"]}, result) def test_get_remote_association(self): self.mock_federation.make_query.return_value = make_awaitable( @@ -72,7 +72,7 @@ def test_get_remote_association(self): result = self.get_success(self.handler.get_association(self.remote_room)) - self.assertEquals( + self.assertEqual( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]}, result ) self.mock_federation.make_query.assert_called_with( @@ -94,7 +94,7 @@ def test_incoming_fed_query(self): self.handler.on_directory_query({"room_alias": "#your-room:test"}) ) - self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) + self.assertEqual({"room_id": "!8765asdf:test", "servers": ["test"]}, response) class TestCreateAlias(unittest.HomeserverTestCase): @@ -224,7 +224,7 @@ def test_delete_alias_creator(self): create_requester(self.test_user), self.room_alias ) ) - self.assertEquals(self.room_id, result) + self.assertEqual(self.room_id, result) # Confirm the alias is gone. self.get_failure( @@ -243,7 +243,7 @@ def test_delete_alias_admin(self): create_requester(self.admin_user), self.room_alias ) ) - self.assertEquals(self.room_id, result) + self.assertEqual(self.room_id, result) # Confirm the alias is gone. self.get_failure( @@ -269,7 +269,7 @@ def test_delete_alias_sufficient_power(self): create_requester(self.test_user), self.room_alias ) ) - self.assertEquals(self.room_id, result) + self.assertEqual(self.room_id, result) # Confirm the alias is gone. self.get_failure( @@ -411,7 +411,7 @@ def test_denied(self): b"directory/room/%23test%3Atest", {"room_id": room_id}, ) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) def test_allowed(self): room_id = self.helper.create_room_as(self.user_id) @@ -421,7 +421,7 @@ def test_allowed(self): b"directory/room/%23unofficial_test%3Atest", {"room_id": room_id}, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) def test_denied_during_creation(self): """A room alias that is not allowed should be rejected during creation.""" @@ -443,8 +443,8 @@ def test_allowed_during_creation(self): "GET", b"directory/room/%23unofficial_test%3Atest", ) - self.assertEquals(200, channel.code, channel.result) - self.assertEquals(channel.json_body["room_id"], room_id) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(channel.json_body["room_id"], room_id) class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): @@ -572,7 +572,7 @@ def prepare(self, reactor, clock, hs): channel = self.make_request( "PUT", b"directory/list/room/%s" % (room_id.encode("ascii"),), b"{}" ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.room_list_handler = hs.get_room_list_handler() self.directory_handler = hs.get_directory_handler() @@ -585,7 +585,7 @@ def test_disabling_room_list(self): # Room list is enabled so we should get some results channel = self.make_request("GET", b"publicRooms") - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["chunk"]) > 0) self.room_list_handler.enable_room_list_search = False @@ -593,7 +593,7 @@ def test_disabling_room_list(self): # Room list disabled so we should get no results channel = self.make_request("GET", b"publicRooms") - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["chunk"]) == 0) # Room list disabled so we shouldn't be allowed to publish rooms @@ -601,4 +601,4 @@ def test_disabling_room_list(self): channel = self.make_request( "PUT", b"directory/list/room/%s" % (room_id.encode("ascii"),), b"{}" ) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 61d28603ae30..6ddec9ecf1fe 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -61,11 +61,11 @@ def test_offline_to_online(self): self.assertTrue(persist_and_notify) self.assertTrue(state.currently_active) - self.assertEquals(new_state.state, state.state) - self.assertEquals(new_state.status_msg, state.status_msg) - self.assertEquals(state.last_federation_update_ts, now) + self.assertEqual(new_state.state, state.state) + self.assertEqual(new_state.status_msg, state.status_msg) + self.assertEqual(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 3) + self.assertEqual(wheel_timer.insert.call_count, 3) wheel_timer.insert.assert_has_calls( [ call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER), @@ -104,11 +104,11 @@ def test_online_to_online(self): self.assertFalse(persist_and_notify) self.assertTrue(federation_ping) self.assertTrue(state.currently_active) - self.assertEquals(new_state.state, state.state) - self.assertEquals(new_state.status_msg, state.status_msg) - self.assertEquals(state.last_federation_update_ts, now) + self.assertEqual(new_state.state, state.state) + self.assertEqual(new_state.status_msg, state.status_msg) + self.assertEqual(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 3) + self.assertEqual(wheel_timer.insert.call_count, 3) wheel_timer.insert.assert_has_calls( [ call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER), @@ -149,11 +149,11 @@ def test_online_to_online_last_active_noop(self): self.assertFalse(persist_and_notify) self.assertTrue(federation_ping) self.assertTrue(state.currently_active) - self.assertEquals(new_state.state, state.state) - self.assertEquals(new_state.status_msg, state.status_msg) - self.assertEquals(state.last_federation_update_ts, now) + self.assertEqual(new_state.state, state.state) + self.assertEqual(new_state.status_msg, state.status_msg) + self.assertEqual(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 3) + self.assertEqual(wheel_timer.insert.call_count, 3) wheel_timer.insert.assert_has_calls( [ call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER), @@ -191,11 +191,11 @@ def test_online_to_online_last_active(self): self.assertTrue(persist_and_notify) self.assertFalse(state.currently_active) - self.assertEquals(new_state.state, state.state) - self.assertEquals(new_state.status_msg, state.status_msg) - self.assertEquals(state.last_federation_update_ts, now) + self.assertEqual(new_state.state, state.state) + self.assertEqual(new_state.status_msg, state.status_msg) + self.assertEqual(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 2) + self.assertEqual(wheel_timer.insert.call_count, 2) wheel_timer.insert.assert_has_calls( [ call(now=now, obj=user_id, then=new_state.last_active_ts + IDLE_TIMER), @@ -227,10 +227,10 @@ def test_remote_ping_timer(self): self.assertFalse(persist_and_notify) self.assertFalse(federation_ping) self.assertFalse(state.currently_active) - self.assertEquals(new_state.state, state.state) - self.assertEquals(new_state.status_msg, state.status_msg) + self.assertEqual(new_state.state, state.state) + self.assertEqual(new_state.status_msg, state.status_msg) - self.assertEquals(wheel_timer.insert.call_count, 1) + self.assertEqual(wheel_timer.insert.call_count, 1) wheel_timer.insert.assert_has_calls( [ call( @@ -259,10 +259,10 @@ def test_online_to_offline(self): ) self.assertTrue(persist_and_notify) - self.assertEquals(new_state.state, state.state) - self.assertEquals(state.last_federation_update_ts, now) + self.assertEqual(new_state.state, state.state) + self.assertEqual(state.last_federation_update_ts, now) - self.assertEquals(wheel_timer.insert.call_count, 0) + self.assertEqual(wheel_timer.insert.call_count, 0) def test_online_to_idle(self): wheel_timer = Mock() @@ -281,12 +281,12 @@ def test_online_to_idle(self): ) self.assertTrue(persist_and_notify) - self.assertEquals(new_state.state, state.state) - self.assertEquals(state.last_federation_update_ts, now) - self.assertEquals(new_state.state, state.state) - self.assertEquals(new_state.status_msg, state.status_msg) + self.assertEqual(new_state.state, state.state) + self.assertEqual(state.last_federation_update_ts, now) + self.assertEqual(new_state.state, state.state) + self.assertEqual(new_state.status_msg, state.status_msg) - self.assertEquals(wheel_timer.insert.call_count, 1) + self.assertEqual(wheel_timer.insert.call_count, 1) wheel_timer.insert.assert_has_calls( [ call( @@ -357,8 +357,8 @@ def test_idle_timer(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) - self.assertEquals(new_state.state, PresenceState.UNAVAILABLE) - self.assertEquals(new_state.status_msg, status_msg) + self.assertEqual(new_state.state, PresenceState.UNAVAILABLE) + self.assertEqual(new_state.status_msg, status_msg) def test_busy_no_idle(self): """ @@ -380,8 +380,8 @@ def test_busy_no_idle(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) - self.assertEquals(new_state.state, PresenceState.BUSY) - self.assertEquals(new_state.status_msg, status_msg) + self.assertEqual(new_state.state, PresenceState.BUSY) + self.assertEqual(new_state.status_msg, status_msg) def test_sync_timeout(self): user_id = "@foo:bar" @@ -399,8 +399,8 @@ def test_sync_timeout(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) - self.assertEquals(new_state.state, PresenceState.OFFLINE) - self.assertEquals(new_state.status_msg, status_msg) + self.assertEqual(new_state.state, PresenceState.OFFLINE) + self.assertEqual(new_state.status_msg, status_msg) def test_sync_online(self): user_id = "@foo:bar" @@ -420,8 +420,8 @@ def test_sync_online(self): ) self.assertIsNotNone(new_state) - self.assertEquals(new_state.state, PresenceState.ONLINE) - self.assertEquals(new_state.status_msg, status_msg) + self.assertEqual(new_state.state, PresenceState.ONLINE) + self.assertEqual(new_state.status_msg, status_msg) def test_federation_ping(self): user_id = "@foo:bar" @@ -440,7 +440,7 @@ def test_federation_ping(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) - self.assertEquals(state, new_state) + self.assertEqual(state, new_state) def test_no_timeout(self): user_id = "@foo:bar" @@ -477,8 +477,8 @@ def test_federation_timeout(self): ) self.assertIsNotNone(new_state) - self.assertEquals(new_state.state, PresenceState.OFFLINE) - self.assertEquals(new_state.status_msg, status_msg) + self.assertEqual(new_state.state, PresenceState.OFFLINE) + self.assertEqual(new_state.status_msg, status_msg) def test_last_active(self): user_id = "@foo:bar" @@ -497,7 +497,7 @@ def test_last_active(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) - self.assertEquals(state, new_state) + self.assertEqual(state, new_state) class PresenceHandlerTestCase(unittest.HomeserverTestCase): diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 69e299fc1772..972cbac6e496 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -65,7 +65,7 @@ def test_get_my_name(self): displayname = self.get_success(self.handler.get_displayname(self.frank)) - self.assertEquals("Frank", displayname) + self.assertEqual("Frank", displayname) def test_set_my_name(self): self.get_success( @@ -74,7 +74,7 @@ def test_set_my_name(self): ) ) - self.assertEquals( + self.assertEqual( ( self.get_success( self.store.get_profile_displayname(self.frank.localpart) @@ -90,7 +90,7 @@ def test_set_my_name(self): ) ) - self.assertEquals( + self.assertEqual( ( self.get_success( self.store.get_profile_displayname(self.frank.localpart) @@ -118,7 +118,7 @@ def test_set_my_name_if_disabled(self): self.store.set_profile_displayname(self.frank.localpart, "Frank") ) - self.assertEquals( + self.assertEqual( ( self.get_success( self.store.get_profile_displayname(self.frank.localpart) @@ -150,7 +150,7 @@ def test_get_other_name(self): displayname = self.get_success(self.handler.get_displayname(self.alice)) - self.assertEquals(displayname, "Alice") + self.assertEqual(displayname, "Alice") self.mock_federation.make_query.assert_called_with( destination="remote", query_type="profile", @@ -172,7 +172,7 @@ def test_incoming_fed_query(self): ) ) - self.assertEquals({"displayname": "Caroline"}, response) + self.assertEqual({"displayname": "Caroline"}, response) def test_get_my_avatar(self): self.get_success( @@ -182,7 +182,7 @@ def test_get_my_avatar(self): ) avatar_url = self.get_success(self.handler.get_avatar_url(self.frank)) - self.assertEquals("http://my.server/me.png", avatar_url) + self.assertEqual("http://my.server/me.png", avatar_url) def test_set_my_avatar(self): self.get_success( @@ -193,7 +193,7 @@ def test_set_my_avatar(self): ) ) - self.assertEquals( + self.assertEqual( (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), "http://my.server/pic.gif", ) @@ -207,7 +207,7 @@ def test_set_my_avatar(self): ) ) - self.assertEquals( + self.assertEqual( (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), "http://my.server/me.png", ) @@ -235,7 +235,7 @@ def test_set_my_avatar_if_disabled(self): ) ) - self.assertEquals( + self.assertEqual( (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), "http://my.server/me.png", ) diff --git a/tests/handlers/test_receipts.py b/tests/handlers/test_receipts.py index 5de89c873b94..5081b97573a0 100644 --- a/tests/handlers/test_receipts.py +++ b/tests/handlers/test_receipts.py @@ -314,4 +314,4 @@ def _test_filters_hidden( ): """Tests that the _filter_out_hidden returns the expected output""" filtered_events = self.event_source.filter_out_hidden(events, "@me:server.org") - self.assertEquals(filtered_events, expected_output) + self.assertEqual(filtered_events, expected_output) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 51ee667ab459..45fd30cf4369 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -167,7 +167,7 @@ def test_user_is_created_and_logged_in_if_doesnt_exist(self): result_user_id, result_token = self.get_success( self.get_or_create_user(requester, frank.localpart, "Frankie") ) - self.assertEquals(result_user_id, user_id) + self.assertEqual(result_user_id, user_id) self.assertIsInstance(result_token, str) self.assertGreater(len(result_token), 20) @@ -183,7 +183,7 @@ def test_if_user_exists(self): result_user_id, result_token = self.get_success( self.get_or_create_user(requester, local_part, None) ) - self.assertEquals(result_user_id, user_id) + self.assertEqual(result_user_id, user_id) self.assertTrue(result_token is not None) @override_config({"limit_usage_by_mau": False}) diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 66b0bd4d1ae8..3aedc0767b0a 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -69,7 +69,7 @@ def test_wait_for_sync_for_user_auth_blocking(self): self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) - self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.auth_blocking._hs_disabled = False @@ -80,7 +80,7 @@ def test_wait_for_sync_for_user_auth_blocking(self): self.sync_handler.wait_for_sync_for_user(requester, sync_config), ResourceLimitError, ) - self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_unknown_room_version(self): """ @@ -122,7 +122,7 @@ def test_unknown_room_version(self): b"{}", tok, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # The rooms should appear in the sync response. result = self.get_success( diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index e461e03599e8..f91a80b9fa57 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -156,7 +156,7 @@ async def get_users_in_room(room_id): def test_started_typing_local(self): self.room_members = [U_APPLE, U_BANANA] - self.assertEquals(self.event_source.get_current_key(), 0) + self.assertEqual(self.event_source.get_current_key(), 0) self.get_success( self.handler.started_typing( @@ -169,13 +169,13 @@ def test_started_typing_local(self): self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) - self.assertEquals(self.event_source.get_current_key(), 1) + self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False ) ) - self.assertEquals( + self.assertEqual( events[0], [ { @@ -220,7 +220,7 @@ def test_started_typing_remote_send(self): def test_started_typing_remote_recv(self): self.room_members = [U_APPLE, U_ONION] - self.assertEquals(self.event_source.get_current_key(), 0) + self.assertEqual(self.event_source.get_current_key(), 0) channel = self.make_request( "PUT", @@ -239,13 +239,13 @@ def test_started_typing_remote_recv(self): self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) - self.assertEquals(self.event_source.get_current_key(), 1) + self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False ) ) - self.assertEquals( + self.assertEqual( events[0], [ { @@ -259,7 +259,7 @@ def test_started_typing_remote_recv(self): def test_started_typing_remote_recv_not_in_room(self): self.room_members = [U_APPLE, U_ONION] - self.assertEquals(self.event_source.get_current_key(), 0) + self.assertEqual(self.event_source.get_current_key(), 0) channel = self.make_request( "PUT", @@ -278,7 +278,7 @@ def test_started_typing_remote_recv_not_in_room(self): self.on_new_event.assert_not_called() - self.assertEquals(self.event_source.get_current_key(), 0) + self.assertEqual(self.event_source.get_current_key(), 0) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, @@ -288,8 +288,8 @@ def test_started_typing_remote_recv_not_in_room(self): is_guest=False, ) ) - self.assertEquals(events[0], []) - self.assertEquals(events[1], 0) + self.assertEqual(events[0], []) + self.assertEqual(events[1], 0) @override_config({"send_federation": True}) def test_stopped_typing(self): @@ -302,7 +302,7 @@ def test_stopped_typing(self): self.handler._member_typing_until[member] = 1002000 self.handler._room_typing[ROOM_ID] = {U_APPLE.to_string()} - self.assertEquals(self.event_source.get_current_key(), 0) + self.assertEqual(self.event_source.get_current_key(), 0) self.get_success( self.handler.stopped_typing( @@ -332,13 +332,13 @@ def test_stopped_typing(self): try_trailing_slash_on_400=True, ) - self.assertEquals(self.event_source.get_current_key(), 1) + self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False ) ) - self.assertEquals( + self.assertEqual( events[0], [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}], ) @@ -346,7 +346,7 @@ def test_stopped_typing(self): def test_typing_timeout(self): self.room_members = [U_APPLE, U_BANANA] - self.assertEquals(self.event_source.get_current_key(), 0) + self.assertEqual(self.event_source.get_current_key(), 0) self.get_success( self.handler.started_typing( @@ -360,7 +360,7 @@ def test_typing_timeout(self): self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) self.on_new_event.reset_mock() - self.assertEquals(self.event_source.get_current_key(), 1) + self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, @@ -370,7 +370,7 @@ def test_typing_timeout(self): is_guest=False, ) ) - self.assertEquals( + self.assertEqual( events[0], [ { @@ -385,7 +385,7 @@ def test_typing_timeout(self): self.on_new_event.assert_has_calls([call("typing_key", 2, rooms=[ROOM_ID])]) - self.assertEquals(self.event_source.get_current_key(), 2) + self.assertEqual(self.event_source.get_current_key(), 2) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, @@ -395,7 +395,7 @@ def test_typing_timeout(self): is_guest=False, ) ) - self.assertEquals( + self.assertEqual( events[0], [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}], ) @@ -414,7 +414,7 @@ def test_typing_timeout(self): self.on_new_event.assert_has_calls([call("typing_key", 3, rooms=[ROOM_ID])]) self.on_new_event.reset_mock() - self.assertEquals(self.event_source.get_current_key(), 3) + self.assertEqual(self.event_source.get_current_key(), 3) events = self.get_success( self.event_source.get_new_events( user=U_APPLE, @@ -424,7 +424,7 @@ def test_typing_timeout(self): is_guest=False, ) ) - self.assertEquals( + self.assertEqual( events[0], [ { diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index e159169e22d6..92012cd6f7e6 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -1042,7 +1042,7 @@ def test_disabling_room_list(self) -> None: b'{"search_term":"user2"}', access_token=u1_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) > 0) # Disable user directory and check search returns nothing @@ -1053,5 +1053,5 @@ def test_disabling_room_list(self) -> None: b'{"search_term":"user2"}', access_token=u1_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index c49be33b9f7c..77ce8432ac53 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -65,9 +65,9 @@ def do_lookup(): servers = self.successResultOf(test_d) - self.assertEquals(len(servers), 1) - self.assertEquals(servers, cache[service_name]) - self.assertEquals(servers[0].host, host_name) + self.assertEqual(len(servers), 1) + self.assertEqual(servers, cache[service_name]) + self.assertEqual(servers[0].host, host_name) @defer.inlineCallbacks def test_from_cache_expired_and_dns_fail(self): @@ -88,8 +88,8 @@ def test_from_cache_expired_and_dns_fail(self): dns_client_mock.lookupService.assert_called_once_with(service_name) - self.assertEquals(len(servers), 1) - self.assertEquals(servers, cache[service_name]) + self.assertEqual(len(servers), 1) + self.assertEqual(servers, cache[service_name]) @defer.inlineCallbacks def test_from_cache(self): @@ -114,8 +114,8 @@ def test_from_cache(self): self.assertFalse(dns_client_mock.lookupService.called) - self.assertEquals(len(servers), 1) - self.assertEquals(servers, cache[service_name]) + self.assertEqual(len(servers), 1) + self.assertEqual(servers, cache[service_name]) @defer.inlineCallbacks def test_empty_cache(self): @@ -144,8 +144,8 @@ def test_name_error(self): servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) - self.assertEquals(len(servers), 0) - self.assertEquals(len(cache), 0) + self.assertEqual(len(servers), 0) + self.assertEqual(len(cache), 0) def test_disabled_service(self): """ @@ -201,6 +201,6 @@ def test_non_srv_answer(self): servers = self.successResultOf(resolve_d) - self.assertEquals(len(servers), 1) - self.assertEquals(servers, cache[service_name]) - self.assertEquals(servers[0].host, b"host") + self.assertEqual(len(servers), 1) + self.assertEqual(servers, cache[service_name]) + self.assertEqual(servers[0].host, b"host") diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index eca6a443af78..17dc42fd37f0 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -59,7 +59,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): def setUp(self): # Patch up the equality operator for events so that we can check - # whether lists of events match using assertEquals + # whether lists of events match using assertEqual self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)] return super().setUp() diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 09c48e85c798..95282f078e77 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1909,7 +1909,7 @@ def test_join_public_room(self) -> None: "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.assertEquals(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(self.public_room_id, channel.json_body["joined_rooms"][0]) def test_join_private_room_if_not_member(self) -> None: @@ -1957,7 +1957,7 @@ def test_join_private_room_if_member(self) -> None: "/_matrix/client/r0/joined_rooms", access_token=self.admin_user_tok, ) - self.assertEquals(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) # Join user to room. @@ -1980,7 +1980,7 @@ def test_join_private_room_if_member(self) -> None: "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.assertEquals(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) def test_join_private_room_if_owner(self) -> None: @@ -2010,7 +2010,7 @@ def test_join_private_room_if_owner(self) -> None: "/_matrix/client/r0/joined_rooms", access_token=self.second_tok, ) - self.assertEquals(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(private_room_id, channel.json_body["joined_rooms"][0]) def test_context_as_non_admin(self) -> None: @@ -2044,7 +2044,7 @@ def test_context_as_non_admin(self) -> None: % (room_id, events[midway]["event_id"]), access_token=tok, ) - self.assertEquals(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) def test_context_as_admin(self) -> None: @@ -2074,8 +2074,8 @@ def test_context_as_admin(self) -> None: % (room_id, events[midway]["event_id"]), access_token=self.admin_user_tok, ) - self.assertEquals(HTTPStatus.OK, channel.code, msg=channel.json_body) - self.assertEquals( + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) + self.assertEqual( channel.json_body["event"]["event_id"], events[midway]["event_id"] ) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 008d635b705c..6c4462e74a53 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -104,7 +104,7 @@ def test_basic_password_reset(self): client_secret = "foobar" session_id = self._request_token(email, client_secret) - self.assertEquals(len(self.email_attempts), 1) + self.assertEqual(len(self.email_attempts), 1) link = self._get_link_from_email() self._validate_token(link) @@ -143,7 +143,7 @@ def reset(ip): client_secret = "foobar" session_id = self._request_token(email, client_secret, ip) - self.assertEquals(len(self.email_attempts), 1) + self.assertEqual(len(self.email_attempts), 1) link = self._get_link_from_email() self._validate_token(link) @@ -193,7 +193,7 @@ def test_basic_password_reset_canonicalise_email(self): client_secret = "foobar" session_id = self._request_token(email_passwort_reset, client_secret) - self.assertEquals(len(self.email_attempts), 1) + self.assertEqual(len(self.email_attempts), 1) link = self._get_link_from_email() self._validate_token(link) @@ -230,7 +230,7 @@ def test_cant_reset_password_without_clicking_link(self): client_secret = "foobar" session_id = self._request_token(email, client_secret) - self.assertEquals(len(self.email_attempts), 1) + self.assertEqual(len(self.email_attempts), 1) # Attempt to reset password without clicking the link self._reset_password(new_password, session_id, client_secret, expected_code=401) @@ -322,7 +322,7 @@ def _validate_token(self, link): shorthand=False, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # Now POST to the same endpoint, mimicking the same behaviour as clicking the # password reset confirm button @@ -337,7 +337,7 @@ def _validate_token(self, link): shorthand=False, content_is_form=True, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) def _get_link_from_email(self): assert self.email_attempts, "No emails have been sent" @@ -376,7 +376,7 @@ def _reset_password( }, }, ) - self.assertEquals(expected_code, channel.code, channel.result) + self.assertEqual(expected_code, channel.code, channel.result) class DeactivateTestCase(unittest.HomeserverTestCase): @@ -676,7 +676,7 @@ def test_add_email_if_disabled(self): client_secret = "foobar" session_id = self._request_token(self.email, client_secret) - self.assertEquals(len(self.email_attempts), 1) + self.assertEqual(len(self.email_attempts), 1) link = self._get_link_from_email() self._validate_token(link) @@ -780,7 +780,7 @@ def test_cant_add_email_without_clicking_link(self): client_secret = "foobar" session_id = self._request_token(self.email, client_secret) - self.assertEquals(len(self.email_attempts), 1) + self.assertEqual(len(self.email_attempts), 1) # Attempt to add email without clicking the link channel = self.make_request( @@ -981,7 +981,7 @@ def _validate_token(self, link): path = link.replace("https://example.com", "") channel = self.make_request("GET", path, shorthand=False) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) def _get_link_from_email(self): assert self.email_attempts, "No emails have been sent" @@ -1010,7 +1010,7 @@ def _add_email(self, request_email, expected_email): client_secret = "foobar" session_id = self._request_token(request_email, client_secret) - self.assertEquals(len(self.email_attempts) - previous_email_attempts, 1) + self.assertEqual(len(self.email_attempts) - previous_email_attempts, 1) link = self._get_link_from_email() self._validate_token(link) diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index a90294003eac..145f247836a0 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -65,13 +65,13 @@ def test_stream_basic_permissions(self): channel = self.make_request( "GET", "/events?access_token=%s" % ("invalid" + self.token,) ) - self.assertEquals(channel.code, 401, msg=channel.result) + self.assertEqual(channel.code, 401, msg=channel.result) # valid token, expect content channel = self.make_request( "GET", "/events?access_token=%s&timeout=0" % (self.token,) ) - self.assertEquals(channel.code, 200, msg=channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) self.assertTrue("chunk" in channel.json_body) self.assertTrue("start" in channel.json_body) self.assertTrue("end" in channel.json_body) @@ -89,10 +89,10 @@ def test_stream_room_permissions(self): channel = self.make_request( "GET", "/events?access_token=%s&timeout=0" % (self.token,) ) - self.assertEquals(channel.code, 200, msg=channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) # We may get a presence event for ourselves down - self.assertEquals( + self.assertEqual( 0, len( [ @@ -153,4 +153,4 @@ def test_get_event_via_events(self): "/events/" + event_id, access_token=self.token, ) - self.assertEquals(channel.code, 200, msg=channel.result) + self.assertEqual(channel.code, 200, msg=channel.result) diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index a573cc3c2ee7..5c31a54421df 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -45,7 +45,7 @@ def test_add_filter(self): self.assertEqual(channel.json_body, {"filter_id": "0"}) filter = self.store.get_user_filter(user_localpart="apple", filter_id=0) self.pump() - self.assertEquals(filter.result, self.EXAMPLE_FILTER) + self.assertEqual(filter.result, self.EXAMPLE_FILTER) def test_add_filter_for_other_user(self): channel = self.make_request( @@ -55,7 +55,7 @@ def test_add_filter_for_other_user(self): ) self.assertEqual(channel.result["code"], b"403") - self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN) + self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) def test_add_filter_non_local_user(self): _is_mine = self.hs.is_mine @@ -68,7 +68,7 @@ def test_add_filter_non_local_user(self): self.hs.is_mine = _is_mine self.assertEqual(channel.result["code"], b"403") - self.assertEquals(channel.json_body["errcode"], Codes.FORBIDDEN) + self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) def test_get_filter(self): filter_id = defer.ensureDeferred( @@ -83,7 +83,7 @@ def test_get_filter(self): ) self.assertEqual(channel.result["code"], b"200") - self.assertEquals(channel.json_body, self.EXAMPLE_FILTER) + self.assertEqual(channel.json_body, self.EXAMPLE_FILTER) def test_get_filter_non_existant(self): channel = self.make_request( @@ -91,7 +91,7 @@ def test_get_filter_non_existant(self): ) self.assertEqual(channel.result["code"], b"404") - self.assertEquals(channel.json_body["errcode"], Codes.NOT_FOUND) + self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) # Currently invalid params do not have an appropriate errcode # in errors.py diff --git a/tests/rest/client/test_groups.py b/tests/rest/client/test_groups.py index ad0425ae6591..c99f54cf4f7c 100644 --- a/tests/rest/client/test_groups.py +++ b/tests/rest/client/test_groups.py @@ -30,8 +30,8 @@ def test_rooms_limited_by_visibility(self): # Alice creates a group channel = self.make_request("POST", "/create_group", {"localpart": "spqr"}) - self.assertEquals(channel.code, 200, msg=channel.text_body) - self.assertEquals(channel.json_body, {"group_id": group_id}) + self.assertEqual(channel.code, 200, msg=channel.text_body) + self.assertEqual(channel.json_body, {"group_id": group_id}) # Bob creates a private room room_id = self.helper.create_room_as(self.room_creator_user_id, is_public=False) @@ -45,12 +45,12 @@ def test_rooms_limited_by_visibility(self): channel = self.make_request( "PUT", f"/groups/{group_id}/admin/rooms/{room_id}", {} ) - self.assertEquals(channel.code, 200, msg=channel.text_body) - self.assertEquals(channel.json_body, {}) + self.assertEqual(channel.code, 200, msg=channel.text_body) + self.assertEqual(channel.json_body, {}) # Alice now tries to retrieve the room list of the space. channel = self.make_request("GET", f"/groups/{group_id}/rooms") - self.assertEquals(channel.code, 200, msg=channel.text_body) - self.assertEquals( + self.assertEqual(channel.code, 200, msg=channel.text_body) + self.assertEqual( channel.json_body, {"chunk": [], "total_room_count_estimate": 0} ) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index d48defda6386..090d2d0a2927 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -136,10 +136,10 @@ def test_POST_ratelimiting_per_address(self) -> None: channel = self.make_request(b"POST", LOGIN_URL, params) if i == 5: - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. @@ -154,7 +154,7 @@ def test_POST_ratelimiting_per_address(self) -> None: } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) @override_config( { @@ -181,10 +181,10 @@ def test_POST_ratelimiting_per_account(self) -> None: channel = self.make_request(b"POST", LOGIN_URL, params) if i == 5: - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. @@ -199,7 +199,7 @@ def test_POST_ratelimiting_per_account(self) -> None: } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) @override_config( { @@ -226,10 +226,10 @@ def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: channel = self.make_request(b"POST", LOGIN_URL, params) if i == 5: - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.result["code"], b"403", channel.result) # Since we're ratelimiting at 1 request/min, retry_after_ms should be lower # than 1min. @@ -244,7 +244,7 @@ def test_POST_ratelimiting_per_account_failed_attempts(self) -> None: } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.result["code"], b"403", channel.result) @override_config({"session_lifetime": "24h"}) def test_soft_logout(self) -> None: @@ -252,8 +252,8 @@ def test_soft_logout(self) -> None: # we shouldn't be able to make requests without an access token channel = self.make_request(b"GET", TEST_URL) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], "M_MISSING_TOKEN") + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN") # log in as normal params = { @@ -263,22 +263,22 @@ def test_soft_logout(self) -> None: } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEquals(channel.code, 200, channel.result) + self.assertEqual(channel.code, 200, channel.result) access_token = channel.json_body["access_token"] device_id = channel.json_body["device_id"] # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 200, channel.result) + self.assertEqual(channel.code, 200, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 401, channel.result) - self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") - self.assertEquals(channel.json_body["soft_logout"], True) + self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEqual(channel.json_body["soft_logout"], True) # # test behaviour after deleting the expired device @@ -290,17 +290,17 @@ def test_soft_logout(self) -> None: # more requests with the expired token should still return a soft-logout self.reactor.advance(3600) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 401, channel.result) - self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") - self.assertEquals(channel.json_body["soft_logout"], True) + self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEqual(channel.json_body["soft_logout"], True) # ... but if we delete that device, it will be a proper logout self._delete_device(access_token_2, "kermit", "monkey", device_id) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 401, channel.result) - self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") - self.assertEquals(channel.json_body["soft_logout"], False) + self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEqual(channel.json_body["soft_logout"], False) def _delete_device( self, access_token: str, user_id: str, password: str, device_id: str @@ -309,7 +309,7 @@ def _delete_device( channel = self.make_request( b"DELETE", "devices/" + device_id, access_token=access_token ) - self.assertEquals(channel.code, 401, channel.result) + self.assertEqual(channel.code, 401, channel.result) # check it's a UI-Auth fail self.assertEqual( set(channel.json_body.keys()), @@ -332,7 +332,7 @@ def _delete_device( access_token=access_token, content={"auth": auth}, ) - self.assertEquals(channel.code, 200, channel.result) + self.assertEqual(channel.code, 200, channel.result) @override_config({"session_lifetime": "24h"}) def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: @@ -343,20 +343,20 @@ def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 200, channel.result) + self.assertEqual(channel.code, 200, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 401, channel.result) - self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") - self.assertEquals(channel.json_body["soft_logout"], True) + self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEqual(channel.json_body["soft_logout"], True) # Now try to hard logout this session channel = self.make_request(b"POST", "/logout", access_token=access_token) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) @override_config({"session_lifetime": "24h"}) def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out( @@ -369,20 +369,20 @@ def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out( # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 200, channel.result) + self.assertEqual(channel.code, 200, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEquals(channel.code, 401, channel.result) - self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") - self.assertEquals(channel.json_body["soft_logout"], True) + self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEqual(channel.json_body["soft_logout"], True) # Now try to hard log out all of the user's sessions channel = self.make_request(b"POST", "/logout/all", access_token=access_token) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) @skip_unless(has_saml2 and HAS_OIDC, "Requires SAML2 and OIDC") @@ -1129,7 +1129,7 @@ def test_login_appservice_user(self) -> None: b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) def test_login_appservice_user_bot(self) -> None: """Test that the appservice bot can use /login""" @@ -1143,7 +1143,7 @@ def test_login_appservice_user_bot(self) -> None: b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) def test_login_appservice_wrong_user(self) -> None: """Test that non-as users cannot login with the as token""" @@ -1157,7 +1157,7 @@ def test_login_appservice_wrong_user(self) -> None: b"POST", LOGIN_URL, params, access_token=self.service.token ) - self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.result["code"], b"403", channel.result) def test_login_appservice_wrong_as(self) -> None: """Test that as users cannot login with wrong as token""" @@ -1171,7 +1171,7 @@ def test_login_appservice_wrong_as(self) -> None: b"POST", LOGIN_URL, params, access_token=self.another_service.token ) - self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.result["code"], b"403", channel.result) def test_login_appservice_no_token(self) -> None: """Test that users must provide a token when using the appservice @@ -1185,7 +1185,7 @@ def test_login_appservice_no_token(self) -> None: } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) @skip_unless(HAS_OIDC, "requires OIDC") diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index b9647d5bd8cf..4239e1e61075 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -80,7 +80,7 @@ def test_set_displayname_too_long(self): def test_get_displayname_other(self): res = self._get_displayname(self.other) - self.assertEquals(res, "Bob") + self.assertEqual(res, "Bob") def test_set_displayname_other(self): channel = self.make_request( diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 2835d86e5b7c..4b95b8541c11 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -65,7 +65,7 @@ def test_POST_appservice_registration_valid(self): b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) det_data = {"user_id": user_id, "home_server": self.hs.hostname} self.assertDictContainsSubset(det_data, channel.json_body) @@ -87,7 +87,7 @@ def test_POST_appservice_registration_no_type(self): b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.assertEquals(channel.result["code"], b"400", channel.result) + self.assertEqual(channel.result["code"], b"400", channel.result) def test_POST_appservice_registration_invalid(self): self.appservice = None # no application service exists @@ -98,21 +98,21 @@ def test_POST_appservice_registration_invalid(self): b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) def test_POST_bad_password(self): request_data = json.dumps({"username": "kermit", "password": 666}) channel = self.make_request(b"POST", self.url, request_data) - self.assertEquals(channel.result["code"], b"400", channel.result) - self.assertEquals(channel.json_body["error"], "Invalid password") + self.assertEqual(channel.result["code"], b"400", channel.result) + self.assertEqual(channel.json_body["error"], "Invalid password") def test_POST_bad_username(self): request_data = json.dumps({"username": 777, "password": "monkey"}) channel = self.make_request(b"POST", self.url, request_data) - self.assertEquals(channel.result["code"], b"400", channel.result) - self.assertEquals(channel.json_body["error"], "Invalid username") + self.assertEqual(channel.result["code"], b"400", channel.result) + self.assertEqual(channel.json_body["error"], "Invalid username") def test_POST_user_valid(self): user_id = "@kermit:test" @@ -131,7 +131,7 @@ def test_POST_user_valid(self): "home_server": self.hs.hostname, "device_id": device_id, } - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.assertDictContainsSubset(det_data, channel.json_body) @override_config({"enable_registration": False}) @@ -141,9 +141,9 @@ def test_POST_disabled_registration(self): channel = self.make_request(b"POST", self.url, request_data) - self.assertEquals(channel.result["code"], b"403", channel.result) - self.assertEquals(channel.json_body["error"], "Registration has been disabled") - self.assertEquals(channel.json_body["errcode"], "M_FORBIDDEN") + self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.json_body["error"], "Registration has been disabled") + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") def test_POST_guest_registration(self): self.hs.config.key.macaroon_secret_key = "test" @@ -152,7 +152,7 @@ def test_POST_guest_registration(self): channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") det_data = {"home_server": self.hs.hostname, "device_id": "guest_device"} - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.assertDictContainsSubset(det_data, channel.json_body) def test_POST_disabled_guest_registration(self): @@ -160,8 +160,8 @@ def test_POST_disabled_guest_registration(self): channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.assertEquals(channel.result["code"], b"403", channel.result) - self.assertEquals(channel.json_body["error"], "Guest access is disabled") + self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.json_body["error"], "Guest access is disabled") @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting_guest(self): @@ -170,16 +170,16 @@ def test_POST_ratelimiting_guest(self): channel = self.make_request(b"POST", url, b"{}") if i == 5: - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self): @@ -194,16 +194,16 @@ def test_POST_ratelimiting(self): channel = self.make_request(b"POST", self.url, request_data) if i == 5: - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) @override_config({"registration_requires_token": True}) def test_POST_registration_requires_token(self): @@ -231,7 +231,7 @@ def test_POST_registration_requires_token(self): # Request without auth to get flows and session channel = self.make_request(b"POST", self.url, json.dumps(params)) - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] # Synapse adds a dummy stage to differentiate flows where otherwise one # flow would be a subset of another flow. @@ -249,7 +249,7 @@ def test_POST_registration_requires_token(self): } request_data = json.dumps(params) channel = self.make_request(b"POST", self.url, request_data) - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) completed = channel.json_body["completed"] self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @@ -265,7 +265,7 @@ def test_POST_registration_requires_token(self): "home_server": self.hs.hostname, "device_id": device_id, } - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.assertDictContainsSubset(det_data, channel.json_body) # Check the `completed` counter has been incremented and pending is 0 @@ -276,8 +276,8 @@ def test_POST_registration_requires_token(self): retcols=["pending", "completed"], ) ) - self.assertEquals(res["completed"], 1) - self.assertEquals(res["pending"], 0) + self.assertEqual(res["completed"], 1) + self.assertEqual(res["pending"], 0) @override_config({"registration_requires_token": True}) def test_POST_registration_token_invalid(self): @@ -295,23 +295,23 @@ def test_POST_registration_token_invalid(self): "session": session, } channel = self.make_request(b"POST", self.url, json.dumps(params)) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], Codes.MISSING_PARAM) - self.assertEquals(channel.json_body["completed"], []) + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM) + self.assertEqual(channel.json_body["completed"], []) # Test with non-string (invalid) params["auth"]["token"] = 1234 channel = self.make_request(b"POST", self.url, json.dumps(params)) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], Codes.INVALID_PARAM) - self.assertEquals(channel.json_body["completed"], []) + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) + self.assertEqual(channel.json_body["completed"], []) # Test with unknown token (invalid) params["auth"]["token"] = "1234" channel = self.make_request(b"POST", self.url, json.dumps(params)) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], Codes.UNAUTHORIZED) - self.assertEquals(channel.json_body["completed"], []) + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) + self.assertEqual(channel.json_body["completed"], []) @override_config({"registration_requires_token": True}) def test_POST_registration_token_limit_uses(self): @@ -354,7 +354,7 @@ def test_POST_registration_token_limit_uses(self): retcol="pending", ) ) - self.assertEquals(pending, 1) + self.assertEqual(pending, 1) # Check auth fails when using token with session2 params2["auth"] = { @@ -363,9 +363,9 @@ def test_POST_registration_token_limit_uses(self): "session": session2, } channel = self.make_request(b"POST", self.url, json.dumps(params2)) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], Codes.UNAUTHORIZED) - self.assertEquals(channel.json_body["completed"], []) + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) + self.assertEqual(channel.json_body["completed"], []) # Complete registration with session1 params1["auth"]["type"] = LoginType.DUMMY @@ -378,14 +378,14 @@ def test_POST_registration_token_limit_uses(self): retcols=["pending", "completed"], ) ) - self.assertEquals(res["pending"], 0) - self.assertEquals(res["completed"], 1) + self.assertEqual(res["pending"], 0) + self.assertEqual(res["completed"], 1) # Check auth still fails when using token with session2 channel = self.make_request(b"POST", self.url, json.dumps(params2)) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], Codes.UNAUTHORIZED) - self.assertEquals(channel.json_body["completed"], []) + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) + self.assertEqual(channel.json_body["completed"], []) @override_config({"registration_requires_token": True}) def test_POST_registration_token_expiry(self): @@ -417,9 +417,9 @@ def test_POST_registration_token_expiry(self): "session": session, } channel = self.make_request(b"POST", self.url, json.dumps(params)) - self.assertEquals(channel.result["code"], b"401", channel.result) - self.assertEquals(channel.json_body["errcode"], Codes.UNAUTHORIZED) - self.assertEquals(channel.json_body["completed"], []) + self.assertEqual(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) + self.assertEqual(channel.json_body["completed"], []) # Update token so it expires tomorrow self.get_success( @@ -504,7 +504,7 @@ def test_POST_registration_token_session_expiry(self): retcol="result", ) ) - self.assertEquals(db_to_json(result2), token) + self.assertEqual(db_to_json(result2), token) # Delete both sessions (mimics expiry) self.get_success( @@ -519,7 +519,7 @@ def test_POST_registration_token_session_expiry(self): retcol="pending", ) ) - self.assertEquals(pending, 0) + self.assertEqual(pending, 0) @override_config({"registration_requires_token": True}) def test_POST_registration_token_session_expiry_deleted_token(self): @@ -572,7 +572,7 @@ def test_POST_registration_token_session_expiry_deleted_token(self): def test_advertised_flows(self): channel = self.make_request(b"POST", self.url, b"{}") - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] # with the stock config, we only expect the dummy flow @@ -595,7 +595,7 @@ def test_advertised_flows(self): ) def test_advertised_flows_captcha_and_terms_and_3pids(self): channel = self.make_request(b"POST", self.url, b"{}") - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] self.assertCountEqual( @@ -627,7 +627,7 @@ def test_advertised_flows_captcha_and_terms_and_3pids(self): ) def test_advertised_flows_no_msisdn_email_required(self): channel = self.make_request(b"POST", self.url, b"{}") - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] # with the stock config, we expect all four combinations of 3pid @@ -671,7 +671,7 @@ def test_request_token_existing_email_inhibit_error(self): b"register/email/requestToken", {"client_secret": "foobar", "email": email, "send_attempt": 1}, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertIsNotNone(channel.json_body.get("sid")) @@ -694,9 +694,9 @@ def test_reject_invalid_email(self): b"register/email/requestToken", {"client_secret": "foobar", "email": "email@@email", "send_attempt": 1}, ) - self.assertEquals(400, channel.code, channel.result) + self.assertEqual(400, channel.code, channel.result) # Check error to ensure that we're not erroring due to a bug in the test. - self.assertEquals( + self.assertEqual( channel.json_body, {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, ) @@ -707,8 +707,8 @@ def test_reject_invalid_email(self): b"register/email/requestToken", {"client_secret": "foobar", "email": "email", "send_attempt": 1}, ) - self.assertEquals(400, channel.code, channel.result) - self.assertEquals( + self.assertEqual(400, channel.code, channel.result) + self.assertEqual( channel.json_body, {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, ) @@ -720,8 +720,8 @@ def test_reject_invalid_email(self): b"register/email/requestToken", {"client_secret": "foobar", "email": email, "send_attempt": 1}, ) - self.assertEquals(400, channel.code, channel.result) - self.assertEquals( + self.assertEqual(400, channel.code, channel.result) + self.assertEqual( channel.json_body, {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, ) @@ -745,7 +745,7 @@ def test_inhibit_user_in_use_error(self): # Check that /available correctly ignores the username provided despite the # username being already registered. channel = self.make_request("GET", "register/available?username=" + username) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # Test that when starting a UIA registration flow the request doesn't fail because # of a conflicting username @@ -799,14 +799,14 @@ def test_validity_period(self): # endpoint. channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.reactor.advance(datetime.timedelta(weeks=1).total_seconds()) channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEquals(channel.result["code"], b"403", channel.result) - self.assertEquals( + self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual( channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) @@ -826,12 +826,12 @@ def test_manual_renewal(self): params = {"user_id": user_id} request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) def test_manual_expire(self): user_id = self.register_user("kermit", "monkey") @@ -848,13 +848,13 @@ def test_manual_expire(self): } request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEquals(channel.result["code"], b"403", channel.result) - self.assertEquals( + self.assertEqual(channel.result["code"], b"403", channel.result) + self.assertEqual( channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) @@ -873,18 +873,18 @@ def test_logging_out_expired_user(self): } request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # Try to log the user out channel = self.make_request(b"POST", "/logout", access_token=tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # Log the user in again (allowed for expired accounts) tok = self.login("kermit", "monkey") # Try to log out all of the user's sessions channel = self.make_request(b"POST", "/logout/all", access_token=tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): @@ -959,7 +959,7 @@ def test_renewal_email(self): renewal_token = self.get_success(self.store.get_renewal_token_for_user(user_id)) url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token channel = self.make_request(b"GET", url) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # Check that we're getting HTML back. content_type = channel.headers.getRawHeaders(b"Content-Type") @@ -977,7 +977,7 @@ def test_renewal_email(self): # Move 1 day forward. Try to renew with the same token again. url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token channel = self.make_request(b"GET", url) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) # Check that we're getting HTML back. content_type = channel.headers.getRawHeaders(b"Content-Type") @@ -997,14 +997,14 @@ def test_renewal_email(self): # succeed. self.reactor.advance(datetime.timedelta(days=3).total_seconds()) channel = self.make_request(b"GET", "/sync", access_token=tok) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) def test_renewal_invalid_token(self): # Hit the renewal endpoint with an invalid token and check that it behaves as # expected, i.e. that it responds with 404 Not Found and the correct HTML. url = "/_matrix/client/unstable/account_validity/renew?token=123" channel = self.make_request(b"GET", url) - self.assertEquals(channel.result["code"], b"404", channel.result) + self.assertEqual(channel.result["code"], b"404", channel.result) # Check that we're getting HTML back. content_type = channel.headers.getRawHeaders(b"Content-Type") @@ -1028,7 +1028,7 @@ def test_manual_email_send(self): "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(len(self.email_attempts), 1) @@ -1103,7 +1103,7 @@ def test_manual_email_send_expired_account(self): "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(len(self.email_attempts), 1) @@ -1183,8 +1183,8 @@ def test_GET_token_valid(self): b"GET", f"{self.url}?token={token}", ) - self.assertEquals(channel.result["code"], b"200", channel.result) - self.assertEquals(channel.json_body["valid"], True) + self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.json_body["valid"], True) def test_GET_token_invalid(self): token = "1234" @@ -1192,8 +1192,8 @@ def test_GET_token_invalid(self): b"GET", f"{self.url}?token={token}", ) - self.assertEquals(channel.result["code"], b"200", channel.result) - self.assertEquals(channel.json_body["valid"], False) + self.assertEqual(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.json_body["valid"], False) @override_config( {"rc_registration_token_validity": {"per_second": 0.1, "burst_count": 5}} @@ -1208,10 +1208,10 @@ def test_GET_ratelimiting(self): ) if i == 5: - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) retry_after_ms = int(channel.json_body["retry_after_ms"]) else: - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.reactor.advance(retry_after_ms / 1000.0 + 1.0) @@ -1219,4 +1219,4 @@ def test_GET_ratelimiting(self): b"GET", f"{self.url}?token={token}", ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 5687dea48dca..8f7181103b2f 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -69,7 +69,7 @@ def test_send_relation(self): """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) event_id = channel.json_body["event_id"] @@ -78,7 +78,7 @@ def test_send_relation(self): "/rooms/%s/event/%s" % (self.room, event_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assert_dict( { @@ -103,7 +103,7 @@ def test_deny_invalid_event(self): parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, ) - self.assertEquals(400, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) # Unless that event is referenced from another event! self.get_success( @@ -123,7 +123,7 @@ def test_deny_invalid_event(self): parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) def test_deny_invalid_room(self): """Test that we deny relations on non-existant events""" @@ -136,15 +136,15 @@ def test_deny_invalid_room(self): channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", parent_id=parent_id, key="A" ) - self.assertEquals(400, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) def test_deny_double_react(self): """Test that we deny relations on membership events""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEquals(400, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) def test_deny_forked_thread(self): """It is invalid to start a thread off a thread.""" @@ -154,7 +154,7 @@ def test_deny_forked_thread(self): content={"msgtype": "m.text", "body": "foo"}, parent_id=self.parent_id, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) parent_id = channel.json_body["event_id"] channel = self._send_relation( @@ -163,16 +163,16 @@ def test_deny_forked_thread(self): content={"msgtype": "m.text", "body": "foo"}, parent_id=parent_id, ) - self.assertEquals(400, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) def test_basic_paginate_relations(self): """Tests that calling pagination API correctly the latest relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) first_annotation_id = channel.json_body["event_id"] channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) second_annotation_id = channel.json_body["event_id"] channel = self.make_request( @@ -180,11 +180,11 @@ def test_basic_paginate_relations(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # We expect to get back a single pagination result, which is the latest # full relation event we sent above. - self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) self.assert_dict( { "event_id": second_annotation_id, @@ -195,7 +195,7 @@ def test_basic_paginate_relations(self): ) # We also expect to get the original event (the id of which is self.parent_id) - self.assertEquals( + self.assertEqual( channel.json_body["original_event"]["event_id"], self.parent_id ) @@ -212,11 +212,11 @@ def test_basic_paginate_relations(self): f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # We expect to get back a single pagination result, which is the earliest # full relation event we sent above. - self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) self.assert_dict( { "event_id": first_annotation_id, @@ -245,7 +245,7 @@ def test_repeated_paginate_relations(self): channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) expected_event_ids.append(channel.json_body["event_id"]) prev_token = "" @@ -260,12 +260,12 @@ def test_repeated_paginate_relations(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) next_batch = channel.json_body.get("next_batch") - self.assertNotEquals(prev_token, next_batch) + self.assertNotEqual(prev_token, next_batch) prev_token = next_batch if not prev_token: @@ -273,7 +273,7 @@ def test_repeated_paginate_relations(self): # We paginated backwards, so reverse found_event_ids.reverse() - self.assertEquals(found_event_ids, expected_event_ids) + self.assertEqual(found_event_ids, expected_event_ids) # Reset and try again, but convert the tokens to the legacy format. prev_token = "" @@ -288,12 +288,12 @@ def test_repeated_paginate_relations(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) next_batch = channel.json_body.get("next_batch") - self.assertNotEquals(prev_token, next_batch) + self.assertNotEqual(prev_token, next_batch) prev_token = next_batch if not prev_token: @@ -301,12 +301,12 @@ def test_repeated_paginate_relations(self): # We paginated backwards, so reverse found_event_ids.reverse() - self.assertEquals(found_event_ids, expected_event_ids) + self.assertEqual(found_event_ids, expected_event_ids) def test_pagination_from_sync_and_messages(self): """Pagination tokens from /sync and /messages can be used to paginate /relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) annotation_id = channel.json_body["event_id"] # Send an event after the relation events. self.helper.send(self.room, body="Latest event", tok=self.user_token) @@ -319,7 +319,7 @@ def test_pagination_from_sync_and_messages(self): channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] sync_prev_batch = room_timeline["prev_batch"] self.assertIsNotNone(sync_prev_batch) @@ -335,7 +335,7 @@ def test_pagination_from_sync_and_messages(self): f"/rooms/{self.room}/messages?dir=b&limit=1", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) messages_end = channel.json_body["end"] self.assertIsNotNone(messages_end) # Ensure the relation event is not in the chunk returned from /messages. @@ -355,7 +355,7 @@ def test_pagination_from_sync_and_messages(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # The relation should be in the returned chunk. self.assertIn( @@ -386,7 +386,7 @@ def test_aggregation_pagination_groups(self): key=key, access_token=access_tokens[idx], ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) idx += 1 idx %= len(access_tokens) @@ -404,7 +404,7 @@ def test_aggregation_pagination_groups(self): % (self.room, self.parent_id, from_token), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) @@ -419,13 +419,13 @@ def test_aggregation_pagination_groups(self): next_batch = channel.json_body.get("next_batch") - self.assertNotEquals(prev_token, next_batch) + self.assertNotEqual(prev_token, next_batch) prev_token = next_batch if not prev_token: break - self.assertEquals(sent_groups, found_groups) + self.assertEqual(sent_groups, found_groups) def test_aggregation_pagination_within_group(self): """Test that we can paginate within an annotation group.""" @@ -449,14 +449,14 @@ def test_aggregation_pagination_within_group(self): key="👍", access_token=access_tokens[idx], ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) expected_event_ids.append(channel.json_body["event_id"]) idx += 1 # Also send a different type of reaction so that we test we don't see it channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) prev_token = "" found_event_ids: List[str] = [] @@ -473,7 +473,7 @@ def test_aggregation_pagination_within_group(self): f"/m.reaction/{encoded_key}?limit=1{from_token}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) @@ -481,7 +481,7 @@ def test_aggregation_pagination_within_group(self): next_batch = channel.json_body.get("next_batch") - self.assertNotEquals(prev_token, next_batch) + self.assertNotEqual(prev_token, next_batch) prev_token = next_batch if not prev_token: @@ -489,7 +489,7 @@ def test_aggregation_pagination_within_group(self): # We paginated backwards, so reverse found_event_ids.reverse() - self.assertEquals(found_event_ids, expected_event_ids) + self.assertEqual(found_event_ids, expected_event_ids) # Reset and try again, but convert the tokens to the legacy format. prev_token = "" @@ -506,7 +506,7 @@ def test_aggregation_pagination_within_group(self): f"/m.reaction/{encoded_key}?limit=1{from_token}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) @@ -514,7 +514,7 @@ def test_aggregation_pagination_within_group(self): next_batch = channel.json_body.get("next_batch") - self.assertNotEquals(prev_token, next_batch) + self.assertNotEqual(prev_token, next_batch) prev_token = next_batch if not prev_token: @@ -522,21 +522,21 @@ def test_aggregation_pagination_within_group(self): # We paginated backwards, so reverse found_event_ids.reverse() - self.assertEquals(found_event_ids, expected_event_ids) + self.assertEqual(found_event_ids, expected_event_ids) def test_aggregation(self): """Test that annotations get correctly aggregated.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -544,9 +544,9 @@ def test_aggregation(self): % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) - self.assertEquals( + self.assertEqual( channel.json_body, { "chunk": [ @@ -560,13 +560,13 @@ def test_aggregation_redactions(self): """Test that annotations get correctly aggregated after a redaction.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Now lets redact one of the 'a' reactions channel = self.make_request( @@ -575,7 +575,7 @@ def test_aggregation_redactions(self): access_token=self.user_token, content={}, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -583,9 +583,9 @@ def test_aggregation_redactions(self): % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) - self.assertEquals( + self.assertEqual( channel.json_body, {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, ) @@ -599,7 +599,7 @@ def test_aggregation_must_be_annotation(self): % (self.room, self.parent_id, RelationTypes.REPLACE), access_token=self.user_token, ) - self.assertEquals(400, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) @unittest.override_config( {"experimental_features": {"msc3440_enabled": True, "msc3666_enabled": True}} @@ -615,29 +615,29 @@ def test_bundled_aggregations(self): """ # Setup by sending a variety of relations. channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) reply_1 = channel.json_body["event_id"] channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) reply_2 = channel.json_body["event_id"] channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_2 = channel.json_body["event_id"] def assert_bundle(event_json: JsonDict) -> None: @@ -655,7 +655,7 @@ def assert_bundle(event_json: JsonDict) -> None: ) # Check the values of each field. - self.assertEquals( + self.assertEqual( { "chunk": [ {"type": "m.reaction", "key": "a", "count": 2}, @@ -665,12 +665,12 @@ def assert_bundle(event_json: JsonDict) -> None: relations_dict[RelationTypes.ANNOTATION], ) - self.assertEquals( + self.assertEqual( {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, relations_dict[RelationTypes.REFERENCE], ) - self.assertEquals( + self.assertEqual( 2, relations_dict[RelationTypes.THREAD].get("count"), ) @@ -701,7 +701,7 @@ def assert_bundle(event_json: JsonDict) -> None: f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) assert_bundle(channel.json_body) # Request the room messages. @@ -710,7 +710,7 @@ def assert_bundle(event_json: JsonDict) -> None: f"/rooms/{self.room}/messages?dir=b", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) # Request the room context. @@ -719,12 +719,12 @@ def assert_bundle(event_json: JsonDict) -> None: f"/rooms/{self.room}/context/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) assert_bundle(channel.json_body["event"]) # Request sync. channel = self.make_request("GET", "/sync", access_token=self.user_token) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) assert_bundle(self._find_event_in_chunk(room_timeline["events"])) @@ -737,7 +737,7 @@ def assert_bundle(event_json: JsonDict) -> None: content={"search_categories": {"room_events": {"search_term": "Hi"}}}, access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) chunk = [ result["result"] for result in channel.json_body["search_categories"]["room_events"][ @@ -751,42 +751,42 @@ def test_aggregation_get_event_for_annotation(self): when directly requested. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) annotation_id = channel.json_body["event_id"] # Annotate the annotation. channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", f"/rooms/{self.room}/event/{annotation_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertIsNone(channel.json_body["unsigned"].get("m.relations")) def test_aggregation_get_event_for_thread(self): """Test that threads get bundled aggregations included when directly requested.""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_id = channel.json_body["event_id"] # Annotate the annotation. channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", f"/rooms/{self.room}/event/{thread_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) - self.assertEquals( + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( channel.json_body["unsigned"].get("m.relations"), { RelationTypes.ANNOTATION: { @@ -801,11 +801,11 @@ def test_aggregation_get_event_for_thread(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1) thread_message = channel.json_body["chunk"][0] - self.assertEquals( + self.assertEqual( thread_message["unsigned"].get("m.relations"), { RelationTypes.ANNOTATION: { @@ -905,7 +905,7 @@ def test_ignore_invalid_room(self): f"/_matrix/client/unstable/rooms/{room2}/relations/{parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["chunk"], []) # And when fetching aggregations. @@ -914,7 +914,7 @@ def test_ignore_invalid_room(self): f"/_matrix/client/unstable/rooms/{room2}/aggregations/{parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["chunk"], []) # And for bundled aggregations. @@ -923,7 +923,7 @@ def test_ignore_invalid_room(self): f"/rooms/{room2}/event/{parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertNotIn("m.relations", channel.json_body["unsigned"]) @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) @@ -936,7 +936,7 @@ def test_edit(self): "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) edit_event_id = channel.json_body["event_id"] @@ -958,8 +958,8 @@ def assert_bundle(event_json: JsonDict) -> None: f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) - self.assertEquals(channel.json_body["content"], new_body) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(channel.json_body["content"], new_body) assert_bundle(channel.json_body) # Request the room messages. @@ -968,7 +968,7 @@ def assert_bundle(event_json: JsonDict) -> None: f"/rooms/{self.room}/messages?dir=b", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) # Request the room context. @@ -977,7 +977,7 @@ def assert_bundle(event_json: JsonDict) -> None: f"/rooms/{self.room}/context/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) assert_bundle(channel.json_body["event"]) # Request sync, but limit the timeline so it becomes limited (and includes @@ -988,7 +988,7 @@ def assert_bundle(event_json: JsonDict) -> None: channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) assert_bundle(self._find_event_in_chunk(room_timeline["events"])) @@ -1001,7 +1001,7 @@ def assert_bundle(event_json: JsonDict) -> None: content={"search_categories": {"room_events": {"search_term": "Hi"}}}, access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) chunk = [ result["result"] for result in channel.json_body["search_categories"]["room_events"][ @@ -1024,7 +1024,7 @@ def test_multi_edit(self): "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( @@ -1032,7 +1032,7 @@ def test_multi_edit(self): "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) edit_event_id = channel.json_body["event_id"] @@ -1045,16 +1045,16 @@ def test_multi_edit(self): "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, }, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) - self.assertEquals(channel.json_body["content"], new_body) + self.assertEqual(channel.json_body["content"], new_body) relations_dict = channel.json_body["unsigned"].get("m.relations") self.assertIn(RelationTypes.REPLACE, relations_dict) @@ -1076,7 +1076,7 @@ def test_edit_reply(self): "m.room.message", content={"msgtype": "m.text", "body": "A reply!"}, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) reply = channel.json_body["event_id"] new_body = {"msgtype": "m.text", "body": "I've been edited!"} @@ -1086,7 +1086,7 @@ def test_edit_reply(self): content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, parent_id=reply, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) edit_event_id = channel.json_body["event_id"] @@ -1095,7 +1095,7 @@ def test_edit_reply(self): "/rooms/%s/event/%s" % (self.room, reply), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # We expect to see the new body in the dict, as well as the reference # metadata sill intact. @@ -1133,7 +1133,7 @@ def test_edit_thread(self): "m.room.message", content={"msgtype": "m.text", "body": "A threaded reply!"}, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) threaded_event_id = channel.json_body["event_id"] new_body = {"msgtype": "m.text", "body": "I've been edited!"} @@ -1143,7 +1143,7 @@ def test_edit_thread(self): content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, parent_id=threaded_event_id, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Fetch the thread root, to get the bundled aggregation for the thread. channel = self.make_request( @@ -1151,7 +1151,7 @@ def test_edit_thread(self): f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # We expect that the edit message appears in the thread summary in the # unsigned relations section. @@ -1161,9 +1161,7 @@ def test_edit_thread(self): thread_summary = relations_dict[RelationTypes.THREAD] self.assertIn("latest_event", thread_summary) latest_event_in_thread = thread_summary["latest_event"] - self.assertEquals( - latest_event_in_thread["content"]["body"], "I've been edited!" - ) + self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") def test_edit_edit(self): """Test that an edit cannot be edited.""" @@ -1177,7 +1175,7 @@ def test_edit_edit(self): "m.new_content": new_body, }, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) edit_event_id = channel.json_body["event_id"] # Edit the edit event. @@ -1191,7 +1189,7 @@ def test_edit_edit(self): }, parent_id=edit_event_id, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Request the original event. channel = self.make_request( @@ -1199,9 +1197,9 @@ def test_edit_edit(self): "/rooms/%s/event/%s" % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # The edit to the edit should be ignored. - self.assertEquals(channel.json_body["content"], new_body) + self.assertEqual(channel.json_body["content"], new_body) # The relations information should not include the edit to the edit. relations_dict = channel.json_body["unsigned"].get("m.relations") @@ -1234,7 +1232,7 @@ def test_relations_redaction_redacts_edits(self): "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Check the relation is returned channel = self.make_request( @@ -1243,10 +1241,10 @@ def test_relations_redaction_redacts_edits(self): % (self.room, original_event_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertIn("chunk", channel.json_body) - self.assertEquals(len(channel.json_body["chunk"]), 1) + self.assertEqual(len(channel.json_body["chunk"]), 1) # Redact the original event channel = self.make_request( @@ -1256,7 +1254,7 @@ def test_relations_redaction_redacts_edits(self): access_token=self.user_token, content="{}", ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Try to check for remaining m.replace relations channel = self.make_request( @@ -1265,11 +1263,11 @@ def test_relations_redaction_redacts_edits(self): % (self.room, original_event_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Check that no relations are returned self.assertIn("chunk", channel.json_body) - self.assertEquals(channel.json_body["chunk"], []) + self.assertEqual(channel.json_body["chunk"], []) def test_aggregations_redaction_prevents_access_to_aggregations(self): """Test that annotations of an event are redacted when the original event @@ -1283,7 +1281,7 @@ def test_aggregations_redaction_prevents_access_to_aggregations(self): channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", key="👍", parent_id=original_event_id ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Redact the original channel = self.make_request( @@ -1297,7 +1295,7 @@ def test_aggregations_redaction_prevents_access_to_aggregations(self): access_token=self.user_token, content="{}", ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # Check that aggregations returns zero channel = self.make_request( @@ -1306,15 +1304,15 @@ def test_aggregations_redaction_prevents_access_to_aggregations(self): % (self.room, original_event_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertIn("chunk", channel.json_body) - self.assertEquals(channel.json_body["chunk"], []) + self.assertEqual(channel.json_body["chunk"], []) def test_unknown_relations(self): """Unknown relations should be accepted.""" channel = self._send_relation("m.relation.test", "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) event_id = channel.json_body["event_id"] channel = self.make_request( @@ -1323,18 +1321,18 @@ def test_unknown_relations(self): % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) # We expect to get back a single pagination result, which is the full # relation event we sent above. - self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) self.assert_dict( {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, channel.json_body["chunk"][0], ) # We also expect to get the original event (the id of which is self.parent_id) - self.assertEquals( + self.assertEqual( channel.json_body["original_event"]["event_id"], self.parent_id ) @@ -1344,7 +1342,7 @@ def test_unknown_relations(self): "/rooms/%s/event/%s" % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertNotIn("m.relations", channel.json_body["unsigned"]) # But unknown relations can be directly queried. @@ -1354,8 +1352,8 @@ def test_unknown_relations(self): % (self.room, self.parent_id), access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) - self.assertEquals(channel.json_body["chunk"], []) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: """ @@ -1422,15 +1420,15 @@ def _create_user(self, localpart: str) -> Tuple[str, str]: def test_background_update(self): """Test the event_arbitrary_relations background update.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) annotation_event_id_good = channel.json_body["event_id"] channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) annotation_event_id_bad = channel.json_body["event_id"] channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) thread_event_id = channel.json_body["event_id"] # Clean-up the table as if the inserts did not happen during event creation. @@ -1450,8 +1448,8 @@ def test_background_update(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) - self.assertEquals( + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( [ev["event_id"] for ev in channel.json_body["chunk"]], [annotation_event_id_good], ) @@ -1475,7 +1473,7 @@ def test_background_update(self): f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", access_token=self.user_token, ) - self.assertEquals(200, channel.code, channel.json_body) + self.assertEqual(200, channel.code, channel.json_body) self.assertCountEqual( [ev["event_id"] for ev in channel.json_body["chunk"]], [annotation_event_id_good, thread_event_id], diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 1afd96b8f5d8..e0b11e726433 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -95,7 +95,7 @@ def prepare(self, reactor, clock, hs): channel = self.make_request( "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # set topic for public room channel = self.make_request( @@ -103,7 +103,7 @@ def prepare(self, reactor, clock, hs): ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode("ascii"), b'{"topic":"Public Room Topic"}', ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # auth as user_id now self.helper.auth_user_id = self.user_id @@ -125,28 +125,28 @@ def send_msg_path(): "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), msg_content, ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # send message in created room not joined (no state), expect 403 channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # send message in created room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # send message in created room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # send message in created room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def test_topic_perms(self): topic_content = b'{"topic":"My Topic Name"}' @@ -156,28 +156,28 @@ def test_topic_perms(self): channel = self.make_request( "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room not joined, expect 403 channel = self.make_request("PUT", topic_path, topic_content) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", topic_path) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # set topic in created PRIVATE room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) channel = self.make_request("PUT", topic_path, topic_content) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # get topic in created PRIVATE room and invited, expect 403 channel = self.make_request("GET", topic_path) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) @@ -185,25 +185,25 @@ def test_topic_perms(self): # Only room ops can set topic by default self.helper.auth_user_id = self.rmcreator_id channel = self.make_request("PUT", topic_path, topic_content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.helper.auth_user_id = self.user_id channel = self.make_request("GET", topic_path) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(topic_content.decode("utf8")), channel.json_body) # set/get topic in created PRIVATE room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", topic_path, topic_content) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", topic_path) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # get topic in PUBLIC room, not joined, expect 403 channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) # set topic in PUBLIC room, not joined, expect 403 channel = self.make_request( @@ -211,7 +211,7 @@ def test_topic_perms(self): "/rooms/%s/state/m.room.topic" % self.created_public_rmid, topic_content, ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def _test_get_membership( self, room=None, members: Iterable = frozenset(), expect_code=None @@ -219,7 +219,7 @@ def _test_get_membership( for member in members: path = "/rooms/%s/state/m.room.member/%s" % (room, member) channel = self.make_request("GET", path) - self.assertEquals(expect_code, channel.code) + self.assertEqual(expect_code, channel.code) def test_membership_basic_room_perms(self): # === room does not exist === @@ -478,16 +478,16 @@ class RoomsMemberListTestCase(RoomBase): def test_get_member_list(self): room_id = self.helper.create_room_as(self.user_id) channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) def test_get_member_list_no_room(self): channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission(self): room_id = self.helper.create_room_as("@some_other_guy:red") channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_with_at_token(self): """ @@ -498,7 +498,7 @@ def test_get_member_list_no_permission_with_at_token(self): # first sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check that permission is denied for @sid1:red to get the @@ -507,7 +507,7 @@ def test_get_member_list_no_permission_with_at_token(self): "GET", f"/rooms/{room_id}/members?at={sync_token}", ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_former_member(self): """ @@ -520,14 +520,14 @@ def test_get_member_list_no_permission_former_member(self): # check that the user can see the member list to start with channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # ban the user self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban") # check the user can no longer see the member list channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_former_member_with_at_token(self): """ @@ -541,14 +541,14 @@ def test_get_member_list_no_permission_former_member_with_at_token(self): # sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check that the user can see the member list to start with channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) @@ -560,14 +560,14 @@ def test_get_member_list_no_permission_former_member_with_at_token(self): # now, with the original user, sync again to get a new at token channel = self.make_request("GET", "/sync") - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) sync_token = channel.json_body["next_batch"] # check the user can no longer see the updated member list channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) def test_get_member_list_mixed_memberships(self): room_creator = "@some_other_guy:red" @@ -576,17 +576,17 @@ def test_get_member_list_mixed_memberships(self): self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) # can't see list if you're just invited. channel = self.make_request("GET", room_path) - self.assertEquals(403, channel.code, msg=channel.result["body"]) + self.assertEqual(403, channel.code, msg=channel.result["body"]) self.helper.join(room=room_id, user=self.user_id) # can see list now joined channel = self.make_request("GET", room_path) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.helper.leave(room=room_id, user=self.user_id) # can see old list once left channel = self.make_request("GET", room_path) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) class RoomsCreateTestCase(RoomBase): @@ -598,19 +598,19 @@ def test_post_room_no_keys(self): # POST with no config keys, expect new room id channel = self.make_request("POST", "/createRoom", "{}") - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) def test_post_room_visibility_key(self): # POST with visibility config key, expect new room id channel = self.make_request("POST", "/createRoom", b'{"visibility":"private"}') - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_custom_key(self): # POST with custom config keys, expect new room id channel = self.make_request("POST", "/createRoom", b'{"custom":"stuff"}') - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_known_and_unknown_keys(self): @@ -618,16 +618,16 @@ def test_post_room_known_and_unknown_keys(self): channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_invalid_content(self): # POST with invalid content / paths, expect 400 channel = self.make_request("POST", "/createRoom", b'{"visibili') - self.assertEquals(400, channel.code) + self.assertEqual(400, channel.code) channel = self.make_request("POST", "/createRoom", b'["hello"]') - self.assertEquals(400, channel.code) + self.assertEqual(400, channel.code) def test_post_room_invitees_invalid_mxid(self): # POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088 @@ -635,7 +635,7 @@ def test_post_room_invitees_invalid_mxid(self): channel = self.make_request( "POST", "/createRoom", b'{"invite":["@alice:example.com "]}' ) - self.assertEquals(400, channel.code) + self.assertEqual(400, channel.code) @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) def test_post_room_invitees_ratelimit(self): @@ -694,9 +694,9 @@ async def user_may_join_room( "/createRoom", {}, ) - self.assertEquals(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, 200, channel.json_body) - self.assertEquals(join_mock.call_count, 0) + self.assertEqual(join_mock.call_count, 0) class RoomTopicTestCase(RoomBase): @@ -712,54 +712,54 @@ def prepare(self, reactor, clock, hs): def test_invalid_puts(self): # missing keys or invalid json channel = self.make_request("PUT", self.path, "{}") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", self.path, '{"_name":"bo"}') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", self.path, '{"nao') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request( "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' ) - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", self.path, "text only") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", self.path, "") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) # valid key, wrong type content = '{"topic":["Topic name"]}' channel = self.make_request("PUT", self.path, content) - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) def test_rooms_topic(self): # nothing should be there channel = self.make_request("GET", self.path) - self.assertEquals(404, channel.code, msg=channel.result["body"]) + self.assertEqual(404, channel.code, msg=channel.result["body"]) # valid put content = '{"topic":"Topic name"}' channel = self.make_request("PUT", self.path, content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # valid get channel = self.make_request("GET", self.path) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) def test_rooms_topic_with_extra_keys(self): # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' channel = self.make_request("PUT", self.path, content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # valid get channel = self.make_request("GET", self.path) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) @@ -775,22 +775,22 @@ def test_invalid_puts(self): path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json channel = self.make_request("PUT", path, "{}") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, '{"_name":"bo"}') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, '{"nao') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, "text only") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, "") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) # valid keys, wrong types content = '{"membership":["%s","%s","%s"]}' % ( @@ -799,7 +799,7 @@ def test_invalid_puts(self): Membership.LEAVE, ) channel = self.make_request("PUT", path, content.encode("ascii")) - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) def test_rooms_members_self(self): path = "/rooms/%s/state/m.room.member/%s" % ( @@ -810,13 +810,13 @@ def test_rooms_members_self(self): # valid join message (NOOP since we made the room) content = '{"membership":"%s"}' % Membership.JOIN channel = self.make_request("PUT", path, content.encode("ascii")) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, None) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} - self.assertEquals(expected_response, channel.json_body) + self.assertEqual(expected_response, channel.json_body) def test_rooms_members_other(self): self.other_id = "@zzsid1:red" @@ -828,11 +828,11 @@ def test_rooms_members_other(self): # valid invite message content = '{"membership":"%s"}' % Membership.INVITE channel = self.make_request("PUT", path, content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, None) - self.assertEquals(200, channel.code, msg=channel.result["body"]) - self.assertEquals(json.loads(content), channel.json_body) + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(json.loads(content), channel.json_body) def test_rooms_members_other_custom_keys(self): self.other_id = "@zzsid1:red" @@ -847,11 +847,11 @@ def test_rooms_members_other_custom_keys(self): "Join us!", ) channel = self.make_request("PUT", path, content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, None) - self.assertEquals(200, channel.code, msg=channel.result["body"]) - self.assertEquals(json.loads(content), channel.json_body) + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(json.loads(content), channel.json_body) class RoomInviteRatelimitTestCase(RoomBase): @@ -937,7 +937,7 @@ async def user_may_join_room( False, ), ) - self.assertEquals( + self.assertEqual( callback_mock.call_args, expected_call_args, callback_mock.call_args, @@ -955,7 +955,7 @@ async def user_may_join_room( True, ), ) - self.assertEquals( + self.assertEqual( callback_mock.call_args, expected_call_args, callback_mock.call_args, @@ -1013,7 +1013,7 @@ def test_join_local_ratelimit_profile_change(self): # Update the display name for the user. path = "/_matrix/client/r0/profile/%s/displayname" % self.user_id channel = self.make_request("PUT", path, {"displayname": "John Doe"}) - self.assertEquals(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, 200, channel.json_body) # Check that all the rooms have been sent a profile update into. for room_id in room_ids: @@ -1023,10 +1023,10 @@ def test_join_local_ratelimit_profile_change(self): ) channel = self.make_request("GET", path) - self.assertEquals(channel.code, 200) + self.assertEqual(channel.code, 200) self.assertIn("displayname", channel.json_body) - self.assertEquals(channel.json_body["displayname"], "John Doe") + self.assertEqual(channel.json_body["displayname"], "John Doe") @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} @@ -1047,7 +1047,7 @@ def test_join_local_ratelimit_idempotent(self): # if all of these requests ended up joining the user to a room. for _ in range(4): channel = self.make_request("POST", path % room_id, {}) - self.assertEquals(channel.code, 200) + self.assertEqual(channel.code, 200) @unittest.override_config( { @@ -1078,40 +1078,40 @@ def test_invalid_puts(self): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json channel = self.make_request("PUT", path, b"{}") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, b'{"_name":"bo"}') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, b'{"nao') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]') - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, b"text only") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) channel = self.make_request("PUT", path, b"") - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) def test_rooms_messages_sent(self): path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) content = b'{"body":"test","msgtype":{"type":"a"}}' channel = self.make_request("PUT", path, content) - self.assertEquals(400, channel.code, msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) # custom message types content = b'{"body":"test","msgtype":"test.custom.text"}' channel = self.make_request("PUT", path, content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) content = b'{"body":"test2","msgtype":"m.text"}' channel = self.make_request("PUT", path, content) - self.assertEquals(200, channel.code, msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) class RoomInitialSyncTestCase(RoomBase): @@ -1125,10 +1125,10 @@ def prepare(self, reactor, clock, hs): def test_initial_sync(self): channel = self.make_request("GET", "/rooms/%s/initialSync" % self.room_id) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) - self.assertEquals(self.room_id, channel.json_body["room_id"]) - self.assertEquals("join", channel.json_body["membership"]) + self.assertEqual(self.room_id, channel.json_body["room_id"]) + self.assertEqual("join", channel.json_body["membership"]) # Room state is easier to assert on if we unpack it into a dict state = {} @@ -1152,7 +1152,7 @@ def test_initial_sync(self): e["content"]["user_id"]: e for e in channel.json_body["presence"] } self.assertTrue(self.user_id in presence_by_user) - self.assertEquals("m.presence", presence_by_user[self.user_id]["type"]) + self.assertEqual("m.presence", presence_by_user[self.user_id]["type"]) class RoomMessageListTestCase(RoomBase): @@ -1168,9 +1168,9 @@ def test_topo_token_is_accepted(self): channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) self.assertTrue("start" in channel.json_body) - self.assertEquals(token, channel.json_body["start"]) + self.assertEqual(token, channel.json_body["start"]) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) @@ -1179,9 +1179,9 @@ def test_stream_token_is_accepted_for_fwd_pagianation(self): channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) self.assertTrue("start" in channel.json_body) - self.assertEquals(token, channel.json_body["start"]) + self.assertEqual(token, channel.json_body["start"]) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) @@ -2614,7 +2614,7 @@ def test_threepid_invite_spamcheck(self): }, access_token=self.tok, ) - self.assertEquals(channel.code, 200) + self.assertEqual(channel.code, 200) # Check that the callback was called with the right params. mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) @@ -2636,7 +2636,7 @@ def test_threepid_invite_spamcheck(self): }, access_token=self.tok, ) - self.assertEquals(channel.code, 403) + self.assertEqual(channel.code, 403) # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index 7d0e66b53412..2634c98dde40 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -96,7 +96,7 @@ def test_invite_3pid(self): {"id_server": "test", "medium": "email", "address": "test@test.test"}, access_token=self.banned_access_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # This should have raised an error earlier, but double check this wasn't called. identity_handler.lookup_3pid.assert_not_called() @@ -110,7 +110,7 @@ def test_create_room(self): {"visibility": "public", "invite": [self.other_user_id]}, access_token=self.banned_access_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) room_id = channel.json_body["room_id"] # But the user wasn't actually invited. @@ -165,7 +165,7 @@ def test_upgrade(self): {"new_version": "6"}, access_token=self.banned_access_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # A new room_id should be returned. self.assertIn("replacement_room", channel.json_body) @@ -190,11 +190,11 @@ def test_typing(self): {"typing": True, "timeout": 30000}, access_token=self.banned_access_token, ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) # There should be no typing events. event_source = self.hs.get_event_sources().sources.typing - self.assertEquals(event_source.get_current_key(), 0) + self.assertEqual(event_source.get_current_key(), 0) # The other user can join and send typing events. self.helper.join(room_id, self.other_user_id, tok=self.other_access_token) @@ -205,10 +205,10 @@ def test_typing(self): {"typing": True, "timeout": 30000}, access_token=self.other_access_token, ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) # These appear in the room. - self.assertEquals(event_source.get_current_key(), 1) + self.assertEqual(event_source.get_current_key(), 1) events = self.get_success( event_source.get_new_events( user=UserID.from_string(self.other_user_id), @@ -218,7 +218,7 @@ def test_typing(self): is_guest=False, ) ) - self.assertEquals( + self.assertEqual( events[0], [ { @@ -257,7 +257,7 @@ def test_displayname(self): {"displayname": new_display_name}, access_token=self.banned_access_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertEqual(channel.json_body, {}) # The user's display name should be updated. @@ -299,7 +299,7 @@ def test_room_displayname(self): {"membership": "join", "displayname": new_display_name}, access_token=self.banned_access_token, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertIn("event_id", channel.json_body) # The display name in the room should not be changed. diff --git a/tests/rest/client/test_shared_rooms.py b/tests/rest/client/test_shared_rooms.py index c42c8aff6c2c..294f46fb95b9 100644 --- a/tests/rest/client/test_shared_rooms.py +++ b/tests/rest/client/test_shared_rooms.py @@ -91,9 +91,9 @@ def _check_shared_rooms_with( # Check shared rooms from user1's perspective. # We should see the one room in common channel = self._get_shared_rooms(u1_token, u2) - self.assertEquals(200, channel.code, channel.result) - self.assertEquals(len(channel.json_body["joined"]), 1) - self.assertEquals(channel.json_body["joined"][0], room_id_one) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(len(channel.json_body["joined"]), 1) + self.assertEqual(channel.json_body["joined"][0], room_id_one) # Create another room and invite user2 to it room_id_two = self.helper.create_room_as( @@ -104,8 +104,8 @@ def _check_shared_rooms_with( # Check shared rooms again. We should now see both rooms. channel = self._get_shared_rooms(u1_token, u2) - self.assertEquals(200, channel.code, channel.result) - self.assertEquals(len(channel.json_body["joined"]), 2) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(len(channel.json_body["joined"]), 2) for room_id_id in channel.json_body["joined"]: self.assertIn(room_id_id, [room_id_one, room_id_two]) @@ -125,18 +125,18 @@ def test_shared_room_list_after_leave(self): # Assert user directory is not empty channel = self._get_shared_rooms(u1_token, u2) - self.assertEquals(200, channel.code, channel.result) - self.assertEquals(len(channel.json_body["joined"]), 1) - self.assertEquals(channel.json_body["joined"][0], room) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(len(channel.json_body["joined"]), 1) + self.assertEqual(channel.json_body["joined"][0], room) self.helper.leave(room, user=u1, tok=u1_token) # Check user1's view of shared rooms with user2 channel = self._get_shared_rooms(u1_token, u2) - self.assertEquals(200, channel.code, channel.result) - self.assertEquals(len(channel.json_body["joined"]), 0) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(len(channel.json_body["joined"]), 0) # Check user2's view of shared rooms with user1 channel = self._get_shared_rooms(u2_token, u1) - self.assertEquals(200, channel.code, channel.result) - self.assertEquals(len(channel.json_body["joined"]), 0) + self.assertEqual(200, channel.code, channel.result) + self.assertEqual(len(channel.json_body["joined"]), 0) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 69b4ef537843..435101395204 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -237,10 +237,10 @@ def test_sync_backwards_typing(self) -> None: typing_url % (room, other_user_id, other_access_token), b'{"typing": true, "timeout": 30000}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) channel = self.make_request("GET", "/sync?access_token=%s" % (access_token,)) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) next_batch = channel.json_body["next_batch"] # Stop typing. @@ -249,7 +249,7 @@ def test_sync_backwards_typing(self) -> None: typing_url % (room, other_user_id, other_access_token), b'{"typing": false}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) # Start typing. channel = self.make_request( @@ -257,11 +257,11 @@ def test_sync_backwards_typing(self) -> None: typing_url % (room, other_user_id, other_access_token), b'{"typing": true, "timeout": 30000}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) # Should return immediately channel = self.make_request("GET", sync_url % (access_token, next_batch)) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) next_batch = channel.json_body["next_batch"] # Reset typing serial back to 0, as if the master had. @@ -273,7 +273,7 @@ def test_sync_backwards_typing(self) -> None: self.helper.send(room, body="There!", tok=other_access_token) channel = self.make_request("GET", sync_url % (access_token, next_batch)) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) next_batch = channel.json_body["next_batch"] # This should time out! But it does not, because our stream token is @@ -281,7 +281,7 @@ def test_sync_backwards_typing(self) -> None: # already seen) is new, since it's got a token above our new, now-reset # stream token. channel = self.make_request("GET", sync_url % (access_token, next_batch)) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) next_batch = channel.json_body["next_batch"] # Clear the typing information, so that it doesn't think everything is @@ -351,7 +351,7 @@ def test_knock_room_state(self) -> None: b"{}", self.knocker_tok, ) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) # We expect to see the knock event in the stripped room state later self.expected_room_state[EventTypes.Member] = { diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index ac6b86ff6b02..9cca9edd3026 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -139,7 +139,7 @@ async def check(ev, state): {}, access_token=self.tok, ) - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) callback.assert_called_once() @@ -157,7 +157,7 @@ async def check(ev, state): {}, access_token=self.tok, ) - self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEqual(channel.result["code"], b"403", channel.result) def test_third_party_rules_workaround_synapse_errors_pass_through(self): """ @@ -193,7 +193,7 @@ async def check(ev, state) -> Tuple[bool, Optional[JsonDict]]: access_token=self.tok, ) # Check the error code - self.assertEquals(channel.result["code"], b"429", channel.result) + self.assertEqual(channel.result["code"], b"429", channel.result) # Check the JSON body has had the `nasty` key injected self.assertEqual( channel.json_body, @@ -329,10 +329,10 @@ def test_send_event(self): self.hs.get_module_api().create_and_send_event_into_room(event_dict) ) - self.assertEquals(event.sender, self.user_id) - self.assertEquals(event.room_id, self.room_id) - self.assertEquals(event.type, "m.room.message") - self.assertEquals(event.content, content) + self.assertEqual(event.sender, self.user_id) + self.assertEqual(event.room_id, self.room_id) + self.assertEqual(event.type, "m.room.message") + self.assertEqual(event.content, content) @unittest.override_config( { diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py index de312cb63c34..8b2da88e8a58 100644 --- a/tests/rest/client/test_typing.py +++ b/tests/rest/client/test_typing.py @@ -72,9 +72,9 @@ def test_set_typing(self): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": true, "timeout": 30000}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) - self.assertEquals(self.event_source.get_current_key(), 1) + self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( user=UserID.from_string(self.user_id), @@ -84,7 +84,7 @@ def test_set_typing(self): is_guest=False, ) ) - self.assertEquals( + self.assertEqual( events[0], [ { @@ -101,7 +101,7 @@ def test_set_not_typing(self): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": false}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) def test_typing_timeout(self): channel = self.make_request( @@ -109,19 +109,19 @@ def test_typing_timeout(self): "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": true, "timeout": 30000}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) - self.assertEquals(self.event_source.get_current_key(), 1) + self.assertEqual(self.event_source.get_current_key(), 1) self.reactor.advance(36) - self.assertEquals(self.event_source.get_current_key(), 2) + self.assertEqual(self.event_source.get_current_key(), 2) channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), b'{"typing": true, "timeout": 30000}', ) - self.assertEquals(200, channel.code) + self.assertEqual(200, channel.code) - self.assertEquals(self.event_source.get_current_key(), 3) + self.assertEqual(self.event_source.get_current_key(), 3) diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index 7f79336abcad..658c21b2a19f 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -65,7 +65,7 @@ def test_upgrade(self): Upgrading a room should work fine. """ channel = self._upgrade_room() - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertIn("replacement_room", channel.json_body) def test_not_in_room(self): @@ -77,7 +77,7 @@ def test_not_in_room(self): roomless_token = self.login(roomless, "pass") channel = self._upgrade_room(roomless_token) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) def test_power_levels(self): """ @@ -85,7 +85,7 @@ def test_power_levels(self): """ # The other user doesn't have the proper power level. channel = self._upgrade_room(self.other_token) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) # Increase the power levels so that this user can upgrade. power_levels = self.helper.get_state( @@ -103,7 +103,7 @@ def test_power_levels(self): # The upgrade should succeed! channel = self._upgrade_room(self.other_token) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) def test_power_levels_user_default(self): """ @@ -111,7 +111,7 @@ def test_power_levels_user_default(self): """ # The other user doesn't have the proper power level. channel = self._upgrade_room(self.other_token) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) # Increase the power levels so that this user can upgrade. power_levels = self.helper.get_state( @@ -129,7 +129,7 @@ def test_power_levels_user_default(self): # The upgrade should succeed! channel = self._upgrade_room(self.other_token) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) def test_power_levels_tombstone(self): """ @@ -137,7 +137,7 @@ def test_power_levels_tombstone(self): """ # The other user doesn't have the proper power level. channel = self._upgrade_room(self.other_token) - self.assertEquals(403, channel.code, channel.result) + self.assertEqual(403, channel.code, channel.result) # Increase the power levels so that this user can upgrade. power_levels = self.helper.get_state( @@ -155,7 +155,7 @@ def test_power_levels_tombstone(self): # The upgrade should succeed! channel = self._upgrade_room(self.other_token) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) power_levels = self.helper.get_state( self.room_id, @@ -197,7 +197,7 @@ def test_space(self): # Upgrade the room! channel = self._upgrade_room(room_id=space_id) - self.assertEquals(200, channel.code, channel.result) + self.assertEqual(200, channel.code, channel.result) self.assertIn("replacement_room", channel.json_body) new_space_id = channel.json_body["replacement_room"] diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 6878ccddbf19..cba9be17c4ca 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -94,7 +94,7 @@ def test_ensure_media_is_in_local_cache(self): self.assertTrue(os.path.exists(local_path)) # Asserts the file is under the expected local cache directory - self.assertEquals( + self.assertEqual( os.path.commonprefix([self.primary_base_path, local_path]), self.primary_base_path, ) diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 59def6e59cdf..1f6a9eb07bfc 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -88,18 +88,18 @@ def test_simple(self): res = self.get_success( self.store.have_seen_events("room1", ["event10", "event19"]) ) - self.assertEquals(res, {"event10"}) + self.assertEqual(res, {"event10"}) # that should result in a single db query - self.assertEquals(ctx.get_resource_usage().db_txn_count, 1) + self.assertEqual(ctx.get_resource_usage().db_txn_count, 1) # a second lookup of the same events should cause no queries with LoggingContext(name="test") as ctx: res = self.get_success( self.store.have_seen_events("room1", ["event10", "event19"]) ) - self.assertEquals(res, {"event10"}) - self.assertEquals(ctx.get_resource_usage().db_txn_count, 0) + self.assertEqual(res, {"event10"}) + self.assertEqual(ctx.get_resource_usage().db_txn_count, 0) def test_query_via_event_cache(self): # fetch an event into the event cache @@ -108,8 +108,8 @@ def test_query_via_event_cache(self): # looking it up should now cause no db hits with LoggingContext(name="test") as ctx: res = self.get_success(self.store.have_seen_events("room1", ["event10"])) - self.assertEquals(res, {"event10"}) - self.assertEquals(ctx.get_resource_usage().db_txn_count, 0) + self.assertEqual(res, {"event10"}) + self.assertEqual(ctx.get_resource_usage().db_txn_count, 0) class EventCacheTestCase(unittest.HomeserverTestCase): diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index d2f654214ede..ee599f433667 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -88,21 +88,21 @@ def _add_appservice(self, as_token, id, url, hs_token, sender) -> None: def test_retrieve_unknown_service_token(self) -> None: service = self.store.get_app_service_by_token("invalid_token") - self.assertEquals(service, None) + self.assertEqual(service, None) def test_retrieval_of_service(self) -> None: stored_service = self.store.get_app_service_by_token(self.as_token) assert stored_service is not None - self.assertEquals(stored_service.token, self.as_token) - self.assertEquals(stored_service.id, self.as_id) - self.assertEquals(stored_service.url, self.as_url) - self.assertEquals(stored_service.namespaces[ApplicationService.NS_ALIASES], []) - self.assertEquals(stored_service.namespaces[ApplicationService.NS_ROOMS], []) - self.assertEquals(stored_service.namespaces[ApplicationService.NS_USERS], []) + self.assertEqual(stored_service.token, self.as_token) + self.assertEqual(stored_service.id, self.as_id) + self.assertEqual(stored_service.url, self.as_url) + self.assertEqual(stored_service.namespaces[ApplicationService.NS_ALIASES], []) + self.assertEqual(stored_service.namespaces[ApplicationService.NS_ROOMS], []) + self.assertEqual(stored_service.namespaces[ApplicationService.NS_USERS], []) def test_retrieval_of_all_services(self) -> None: services = self.store.get_app_services() - self.assertEquals(len(services), 3) + self.assertEqual(len(services), 3) class ApplicationServiceTransactionStoreTestCase(unittest.HomeserverTestCase): @@ -182,7 +182,7 @@ def test_get_appservice_state_none( ) -> None: service = Mock(id="999") state = self.get_success(self.store.get_appservice_state(service)) - self.assertEquals(None, state) + self.assertEqual(None, state) def test_get_appservice_state_up( self, @@ -194,7 +194,7 @@ def test_get_appservice_state_up( state = self.get_success( defer.ensureDeferred(self.store.get_appservice_state(service)) ) - self.assertEquals(ApplicationServiceState.UP, state) + self.assertEqual(ApplicationServiceState.UP, state) def test_get_appservice_state_down( self, @@ -210,7 +210,7 @@ def test_get_appservice_state_down( ) service = Mock(id=self.as_list[1]["id"]) state = self.get_success(self.store.get_appservice_state(service)) - self.assertEquals(ApplicationServiceState.DOWN, state) + self.assertEqual(ApplicationServiceState.DOWN, state) def test_get_appservices_by_state_none( self, @@ -218,7 +218,7 @@ def test_get_appservices_by_state_none( services = self.get_success( self.store.get_appservices_by_state(ApplicationServiceState.DOWN) ) - self.assertEquals(0, len(services)) + self.assertEqual(0, len(services)) def test_set_appservices_state_down( self, @@ -235,7 +235,7 @@ def test_set_appservices_state_down( (ApplicationServiceState.DOWN.value,), ) ) - self.assertEquals(service.id, rows[0][0]) + self.assertEqual(service.id, rows[0][0]) def test_set_appservices_state_multiple_up( self, @@ -258,7 +258,7 @@ def test_set_appservices_state_multiple_up( (ApplicationServiceState.UP.value,), ) ) - self.assertEquals(service.id, rows[0][0]) + self.assertEqual(service.id, rows[0][0]) def test_create_appservice_txn_first( self, @@ -270,9 +270,9 @@ def test_create_appservice_txn_first( self.store.create_appservice_txn(service, events, [], [], {}, {}) ) ) - self.assertEquals(txn.id, 1) - self.assertEquals(txn.events, events) - self.assertEquals(txn.service, service) + self.assertEqual(txn.id, 1) + self.assertEqual(txn.events, events) + self.assertEqual(txn.service, service) def test_create_appservice_txn_older_last_txn( self, @@ -285,9 +285,9 @@ def test_create_appservice_txn_older_last_txn( txn = self.get_success( self.store.create_appservice_txn(service, events, [], [], {}, {}) ) - self.assertEquals(txn.id, 9646) - self.assertEquals(txn.events, events) - self.assertEquals(txn.service, service) + self.assertEqual(txn.id, 9646) + self.assertEqual(txn.events, events) + self.assertEqual(txn.service, service) def test_create_appservice_txn_up_to_date_last_txn( self, @@ -298,9 +298,9 @@ def test_create_appservice_txn_up_to_date_last_txn( txn = self.get_success( self.store.create_appservice_txn(service, events, [], [], {}, {}) ) - self.assertEquals(txn.id, 9644) - self.assertEquals(txn.events, events) - self.assertEquals(txn.service, service) + self.assertEqual(txn.id, 9644) + self.assertEqual(txn.events, events) + self.assertEqual(txn.service, service) def test_create_appservice_txn_up_fuzzing( self, @@ -322,9 +322,9 @@ def test_create_appservice_txn_up_fuzzing( txn = self.get_success( self.store.create_appservice_txn(service, events, [], [], {}, {}) ) - self.assertEquals(txn.id, 9644) - self.assertEquals(txn.events, events) - self.assertEquals(txn.service, service) + self.assertEqual(txn.id, 9644) + self.assertEqual(txn.events, events) + self.assertEqual(txn.service, service) def test_complete_appservice_txn_first_txn( self, @@ -346,8 +346,8 @@ def test_complete_appservice_txn_first_txn( (service.id,), ) ) - self.assertEquals(1, len(res)) - self.assertEquals(txn_id, res[0][0]) + self.assertEqual(1, len(res)) + self.assertEqual(txn_id, res[0][0]) res = self.get_success( self.db_pool.runQuery( @@ -357,7 +357,7 @@ def test_complete_appservice_txn_first_txn( (txn_id,), ) ) - self.assertEquals(0, len(res)) + self.assertEqual(0, len(res)) def test_complete_appservice_txn_existing_in_state_table( self, @@ -379,9 +379,9 @@ def test_complete_appservice_txn_existing_in_state_table( (service.id,), ) ) - self.assertEquals(1, len(res)) - self.assertEquals(txn_id, res[0][0]) - self.assertEquals(ApplicationServiceState.UP.value, res[0][1]) + self.assertEqual(1, len(res)) + self.assertEqual(txn_id, res[0][0]) + self.assertEqual(ApplicationServiceState.UP.value, res[0][1]) res = self.get_success( self.db_pool.runQuery( @@ -391,7 +391,7 @@ def test_complete_appservice_txn_existing_in_state_table( (txn_id,), ) ) - self.assertEquals(0, len(res)) + self.assertEqual(0, len(res)) def test_get_oldest_unsent_txn_none( self, @@ -399,7 +399,7 @@ def test_get_oldest_unsent_txn_none( service = Mock(id=self.as_list[0]["id"]) txn = self.get_success(self.store.get_oldest_unsent_txn(service)) - self.assertEquals(None, txn) + self.assertEqual(None, txn) def test_get_oldest_unsent_txn(self) -> None: service = Mock(id=self.as_list[0]["id"]) @@ -416,9 +416,9 @@ def test_get_oldest_unsent_txn(self) -> None: self.get_success(self._insert_txn(service.id, 12, other_events)) txn = self.get_success(self.store.get_oldest_unsent_txn(service)) - self.assertEquals(service, txn.service) - self.assertEquals(10, txn.id) - self.assertEquals(events, txn.events) + self.assertEqual(service, txn.service) + self.assertEqual(10, txn.id) + self.assertEqual(events, txn.events) def test_get_appservices_by_state_single( self, @@ -433,8 +433,8 @@ def test_get_appservices_by_state_single( services = self.get_success( self.store.get_appservices_by_state(ApplicationServiceState.DOWN) ) - self.assertEquals(1, len(services)) - self.assertEquals(self.as_list[0]["id"], services[0].id) + self.assertEqual(1, len(services)) + self.assertEqual(self.as_list[0]["id"], services[0].id) def test_get_appservices_by_state_multiple( self, @@ -455,8 +455,8 @@ def test_get_appservices_by_state_multiple( services = self.get_success( self.store.get_appservices_by_state(ApplicationServiceState.DOWN) ) - self.assertEquals(2, len(services)) - self.assertEquals( + self.assertEqual(2, len(services)) + self.assertEqual( {self.as_list[2]["id"], self.as_list[0]["id"]}, {services[0].id, services[1].id}, ) @@ -476,12 +476,12 @@ def test_get_type_stream_id_for_appservice_no_value(self) -> None: value = self.get_success( self.store.get_type_stream_id_for_appservice(self.service, "read_receipt") ) - self.assertEquals(value, 0) + self.assertEqual(value, 0) value = self.get_success( self.store.get_type_stream_id_for_appservice(self.service, "presence") ) - self.assertEquals(value, 0) + self.assertEqual(value, 0) def test_get_type_stream_id_for_appservice_invalid_type(self) -> None: self.get_failure( diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 3e4f0579c9cb..a8ffb52c0503 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -103,7 +103,7 @@ def test_select_one_1col(self): ) ) - self.assertEquals("Value", value) + self.assertEqual("Value", value) self.mock_txn.execute.assert_called_with( "SELECT retcol FROM tablename WHERE keycol = ?", ["TheKey"] ) @@ -121,7 +121,7 @@ def test_select_one_3col(self): ) ) - self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret) + self.assertEqual({"colA": 1, "colB": 2, "colC": 3}, ret) self.mock_txn.execute.assert_called_with( "SELECT colA, colB, colC FROM tablename WHERE keycol = ?", ["TheKey"] ) @@ -154,7 +154,7 @@ def test_select_list(self): ) ) - self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret) + self.assertEqual([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret) self.mock_txn.execute.assert_called_with( "SELECT colA FROM tablename WHERE keycol = ?", ["A set"] ) diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index 7b72a92424b7..20bf3ca17b9a 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -31,7 +31,7 @@ def test_room_to_alias(self): ) ) - self.assertEquals( + self.assertEqual( ["#my-room:test"], (self.get_success(self.store.get_aliases_for_room(self.room.to_string()))), ) diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index c9e3b9fa7994..0f9add48417a 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -57,7 +57,7 @@ def _assert_counts(noitf_count, highlight_count): "", self.store._get_unread_counts_by_pos_txn, room_id, user_id, 0 ) ) - self.assertEquals( + self.assertEqual( counts, NotifCounts( notify_count=noitf_count, diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index 4ca212fd1105..5806cb0e4bb7 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -38,12 +38,12 @@ def test_get_users_paginate(self) -> None: self.store.get_users_paginate(0, 10, name="bc", guests=False) ) - self.assertEquals(1, total) - self.assertEquals(self.displayname, users.pop()["displayname"]) + self.assertEqual(1, total) + self.assertEqual(self.displayname, users.pop()["displayname"]) users, total = self.get_success( self.store.get_users_paginate(0, 10, name="BC", guests=False) ) - self.assertEquals(1, total) - self.assertEquals(self.displayname, users.pop()["displayname"]) + self.assertEqual(1, total) + self.assertEqual(self.displayname, users.pop()["displayname"]) diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index b6f99af2f1b8..a019d06e09c5 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -33,7 +33,7 @@ def test_displayname(self) -> None: self.store.set_profile_displayname(self.u_frank.localpart, "Frank") ) - self.assertEquals( + self.assertEqual( "Frank", ( self.get_success( @@ -60,7 +60,7 @@ def test_avatar_url(self) -> None: ) ) - self.assertEquals( + self.assertEqual( "http://my.site/here", ( self.get_success( diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 1fa495f7787d..a49ac1525ec7 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -30,7 +30,7 @@ def prepare(self, reactor, clock, hs): def test_register(self): self.get_success(self.store.register_user(self.user_id, self.pwhash)) - self.assertEquals( + self.assertEqual( { # TODO(paul): Surely this field should be 'user_id', not 'name' "name": self.user_id, @@ -131,7 +131,7 @@ def test_3pid_inhibit_invalid_validation_session_error(self): ), ThreepidValidationError, ) - self.assertEquals(e.value.msg, "Unknown session_id", e) + self.assertEqual(e.value.msg, "Unknown session_id", e) # Set the config setting to true. self.store._ignore_unknown_session_error = True @@ -146,4 +146,4 @@ def test_3pid_inhibit_invalid_validation_session_error(self): ), ThreepidValidationError, ) - self.assertEquals(e.value.msg, "Validation token not found or has expired", e) + self.assertEqual(e.value.msg, "Validation token not found or has expired", e) diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 42bfca2a8366..5b011e18cd69 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -104,7 +104,7 @@ def STALE_test_room_name(self): self.store.get_current_state(room_id=self.room.to_string()) ) - self.assertEquals(1, len(state)) + self.assertEqual(1, len(state)) self.assertObjectHasAttributes( {"type": "m.room.name", "room_id": self.room.to_string(), "name": name}, state[0], @@ -121,7 +121,7 @@ def STALE_test_room_topic(self): self.store.get_current_state(room_id=self.room.to_string()) ) - self.assertEquals(1, len(state)) + self.assertEqual(1, len(state)) self.assertObjectHasAttributes( {"type": "m.room.topic", "room_id": self.room.to_string(), "topic": topic}, state[0], diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index d62e01726cba..8dfc1e1db903 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -53,7 +53,7 @@ def test_null_byte(self): result = self.get_success( store.search_msgs([room_id], "hi bob", ["content.body"]) ) - self.assertEquals(result.get("count"), 1) + self.assertEqual(result.get("count"), 1) if isinstance(store.database_engine, PostgresEngine): self.assertIn("hi", result.get("highlights")) self.assertIn("bob", result.get("highlights")) @@ -62,14 +62,14 @@ def test_null_byte(self): result = self.get_success( store.search_msgs([room_id], "another", ["content.body"]) ) - self.assertEquals(result.get("count"), 1) + self.assertEqual(result.get("count"), 1) if isinstance(store.database_engine, PostgresEngine): self.assertIn("another", result.get("highlights")) # Check that search works for a search term that overlaps with the message # containing a null byte and an unrelated message. result = self.get_success(store.search_msgs([room_id], "hi", ["content.body"])) - self.assertEquals(result.get("count"), 2) + self.assertEqual(result.get("count"), 2) result = self.get_success( store.search_msgs([room_id], "hi alice", ["content.body"]) ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 7028f0dfb000..b8f09a8ee081 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -55,7 +55,7 @@ def test_one_member(self): ) ) - self.assertEquals([self.room], [m.room_id for m in rooms_for_user]) + self.assertEqual([self.room], [m.room_id for m in rooms_for_user]) def test_count_known_servers(self): """ diff --git a/tests/test_distributor.py b/tests/test_distributor.py index f8341041ee6e..31546ea52bb0 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -48,7 +48,7 @@ def test_signal_catch(self): observers[0].assert_called_once_with("Go") observers[1].assert_called_once_with("Go") - self.assertEquals(mock_logger.warning.call_count, 1) + self.assertEqual(mock_logger.warning.call_count, 1) self.assertIsInstance(mock_logger.warning.call_args[0][0], str) def test_signal_prereg(self): diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 67dcf567cdb8..37fada5c5369 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -54,7 +54,7 @@ def test_ui_auth(self): request_data = json.dumps({"username": "kermit", "password": "monkey"}) channel = self.make_request(b"POST", self.url, request_data) - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) self.assertTrue(channel.json_body is not None) self.assertIsInstance(channel.json_body["session"], str) @@ -99,7 +99,7 @@ def test_ui_auth(self): # We don't bother checking that the response is correct - we'll leave that to # other tests. We just want to make sure we're on the right path. - self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEqual(channel.result["code"], b"401", channel.result) # Finish the UI auth for terms request_data = json.dumps( @@ -117,7 +117,7 @@ def test_ui_auth(self): # We're interested in getting a response that looks like a successful # registration, not so much that the details are exactly what we want. - self.assertEquals(channel.result["code"], b"200", channel.result) + self.assertEqual(channel.result["code"], b"200", channel.result) self.assertTrue(channel.json_body is not None) self.assertIsInstance(channel.json_body["user_id"], str) diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index f2ef1c6051eb..d04bcae0fabe 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -25,7 +25,7 @@ def test_advance_time(self): self.clock.advance_time(20) - self.assertEquals(20, self.clock.time() - start_time) + self.assertEqual(20, self.clock.time() - start_time) def test_later(self): invoked = [0, 0] diff --git a/tests/test_types.py b/tests/test_types.py index 0d0c00d97a06..80888a744d1b 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -22,9 +22,9 @@ class UserIDTestCase(unittest.HomeserverTestCase): def test_parse(self): user = UserID.from_string("@1234abcd:test") - self.assertEquals("1234abcd", user.localpart) - self.assertEquals("test", user.domain) - self.assertEquals(True, self.hs.is_mine(user)) + self.assertEqual("1234abcd", user.localpart) + self.assertEqual("test", user.domain) + self.assertEqual(True, self.hs.is_mine(user)) def test_pase_empty(self): with self.assertRaises(SynapseError): @@ -33,7 +33,7 @@ def test_pase_empty(self): def test_build(self): user = UserID("5678efgh", "my.domain") - self.assertEquals(user.to_string(), "@5678efgh:my.domain") + self.assertEqual(user.to_string(), "@5678efgh:my.domain") def test_compare(self): userA = UserID.from_string("@userA:my.domain") @@ -48,14 +48,14 @@ class RoomAliasTestCase(unittest.HomeserverTestCase): def test_parse(self): room = RoomAlias.from_string("#channel:test") - self.assertEquals("channel", room.localpart) - self.assertEquals("test", room.domain) - self.assertEquals(True, self.hs.is_mine(room)) + self.assertEqual("channel", room.localpart) + self.assertEqual("test", room.domain) + self.assertEqual(True, self.hs.is_mine(room)) def test_build(self): room = RoomAlias("channel", "my.domain") - self.assertEquals(room.to_string(), "#channel:my.domain") + self.assertEqual(room.to_string(), "#channel:my.domain") def test_validate(self): id_string = "#test:domain,test" diff --git a/tests/unittest.py b/tests/unittest.py index 0caa8e7a45fe..326895f4c9f5 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -152,12 +152,12 @@ def tearDown(orig): def assertObjectHasAttributes(self, attrs, obj): """Asserts that the given object has each of the attributes given, and - that the value of each matches according to assertEquals.""" + that the value of each matches according to assertEqual.""" for key in attrs.keys(): if not hasattr(obj, key): raise AssertionError("Expected obj to have a '.%s'" % key) try: - self.assertEquals(attrs[key], getattr(obj, key)) + self.assertEqual(attrs[key], getattr(obj, key)) except AssertionError as e: raise (type(e))(f"Assert error for '.{key}':") from e @@ -169,7 +169,7 @@ def assert_dict(self, required, actual): actual (dict): The test result. Extra keys will not be checked. """ for key in required: - self.assertEquals( + self.assertEqual( required[key], actual[key], msg="%s mismatch. %s" % (key, actual) ) diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index c613ce3f1055..02b99b466a26 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -31,7 +31,7 @@ def test_hit(self): cache = DeferredCache("test") cache.prefill("foo", 123) - self.assertEquals(self.successResultOf(cache.get("foo")), 123) + self.assertEqual(self.successResultOf(cache.get("foo")), 123) def test_hit_deferred(self): cache = DeferredCache("test") diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index ced3efd93f8b..b92d3f0c1b51 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -434,8 +434,8 @@ def func(self, key): a = A() - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals((yield a.func("bar")), "bar") + self.assertEqual((yield a.func("foo")), "foo") + self.assertEqual((yield a.func("bar")), "bar") @defer.inlineCallbacks def test_hit(self): @@ -450,10 +450,10 @@ def func(self, key): a = A() yield a.func("foo") - self.assertEquals(callcount[0], 1) + self.assertEqual(callcount[0], 1) - self.assertEquals((yield a.func("foo")), "foo") - self.assertEquals(callcount[0], 1) + self.assertEqual((yield a.func("foo")), "foo") + self.assertEqual(callcount[0], 1) @defer.inlineCallbacks def test_invalidate(self): @@ -468,13 +468,13 @@ def func(self, key): a = A() yield a.func("foo") - self.assertEquals(callcount[0], 1) + self.assertEqual(callcount[0], 1) a.func.invalidate(("foo",)) yield a.func("foo") - self.assertEquals(callcount[0], 2) + self.assertEqual(callcount[0], 2) def test_invalidate_missing(self): class A: @@ -499,7 +499,7 @@ def func(self, key): for k in range(0, 12): yield a.func(k) - self.assertEquals(callcount[0], 12) + self.assertEqual(callcount[0], 12) # There must have been at least 2 evictions, meaning if we calculate # all 12 values again, we must get called at least 2 more times @@ -525,8 +525,8 @@ def func(self, key): a.func.prefill(("foo",), 456) - self.assertEquals(a.func("foo").result, 456) - self.assertEquals(callcount[0], 0) + self.assertEqual(a.func("foo").result, 456) + self.assertEqual(callcount[0], 0) @defer.inlineCallbacks def test_invalidate_context(self): @@ -547,19 +547,19 @@ def func2(self, key, cache_context): a = A() yield a.func2("foo") - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) + self.assertEqual(callcount[0], 1) + self.assertEqual(callcount2[0], 1) a.func.invalidate(("foo",)) yield a.func("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 1) + self.assertEqual(callcount[0], 2) + self.assertEqual(callcount2[0], 1) yield a.func2("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) + self.assertEqual(callcount[0], 2) + self.assertEqual(callcount2[0], 2) @defer.inlineCallbacks def test_eviction_context(self): @@ -581,22 +581,22 @@ def func2(self, key, cache_context): yield a.func2("foo") yield a.func2("foo2") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) + self.assertEqual(callcount[0], 2) + self.assertEqual(callcount2[0], 2) yield a.func2("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) + self.assertEqual(callcount[0], 2) + self.assertEqual(callcount2[0], 2) yield a.func("foo3") - self.assertEquals(callcount[0], 3) - self.assertEquals(callcount2[0], 2) + self.assertEqual(callcount[0], 3) + self.assertEqual(callcount2[0], 2) yield a.func2("foo") - self.assertEquals(callcount[0], 4) - self.assertEquals(callcount2[0], 3) + self.assertEqual(callcount[0], 4) + self.assertEqual(callcount2[0], 3) @defer.inlineCallbacks def test_double_get(self): @@ -619,30 +619,30 @@ def func2(self, key, cache_context): yield a.func2("foo") - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 1) + self.assertEqual(callcount[0], 1) + self.assertEqual(callcount2[0], 1) a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.del_multi.call_count, 1) + self.assertEqual(a.func2.cache.cache.del_multi.call_count, 1) yield a.func2("foo") a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.del_multi.call_count, 2) + self.assertEqual(a.func2.cache.cache.del_multi.call_count, 2) - self.assertEquals(callcount[0], 1) - self.assertEquals(callcount2[0], 2) + self.assertEqual(callcount[0], 1) + self.assertEqual(callcount2[0], 2) a.func.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.del_multi.call_count, 3) + self.assertEqual(a.func2.cache.cache.del_multi.call_count, 3) yield a.func("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 2) + self.assertEqual(callcount[0], 2) + self.assertEqual(callcount2[0], 2) yield a.func2("foo") - self.assertEquals(callcount[0], 2) - self.assertEquals(callcount2[0], 3) + self.assertEqual(callcount[0], 2) + self.assertEqual(callcount2[0], 3) class CachedListDescriptorTestCase(unittest.TestCase): diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index e6e13ba06cc8..7f60aae5ba0d 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -26,8 +26,8 @@ def test_get_set(self): cache = ExpiringCache("test", clock, max_len=1) cache["key"] = "value" - self.assertEquals(cache.get("key"), "value") - self.assertEquals(cache["key"], "value") + self.assertEqual(cache.get("key"), "value") + self.assertEqual(cache["key"], "value") def test_eviction(self): clock = MockClock() @@ -35,13 +35,13 @@ def test_eviction(self): cache["key"] = "value" cache["key2"] = "value2" - self.assertEquals(cache.get("key"), "value") - self.assertEquals(cache.get("key2"), "value2") + self.assertEqual(cache.get("key"), "value") + self.assertEqual(cache.get("key2"), "value2") cache["key3"] = "value3" - self.assertEquals(cache.get("key"), None) - self.assertEquals(cache.get("key2"), "value2") - self.assertEquals(cache.get("key3"), "value3") + self.assertEqual(cache.get("key"), None) + self.assertEqual(cache.get("key2"), "value2") + self.assertEqual(cache.get("key3"), "value3") def test_iterable_eviction(self): clock = MockClock() @@ -51,15 +51,15 @@ def test_iterable_eviction(self): cache["key2"] = [2, 3] cache["key3"] = [4, 5] - self.assertEquals(cache.get("key"), [1]) - self.assertEquals(cache.get("key2"), [2, 3]) - self.assertEquals(cache.get("key3"), [4, 5]) + self.assertEqual(cache.get("key"), [1]) + self.assertEqual(cache.get("key2"), [2, 3]) + self.assertEqual(cache.get("key3"), [4, 5]) cache["key4"] = [6, 7] - self.assertEquals(cache.get("key"), None) - self.assertEquals(cache.get("key2"), None) - self.assertEquals(cache.get("key3"), [4, 5]) - self.assertEquals(cache.get("key4"), [6, 7]) + self.assertEqual(cache.get("key"), None) + self.assertEqual(cache.get("key2"), None) + self.assertEqual(cache.get("key3"), [4, 5]) + self.assertEqual(cache.get("key4"), [6, 7]) def test_time_eviction(self): clock = MockClock() @@ -69,13 +69,13 @@ def test_time_eviction(self): clock.advance_time(0.5) cache["key2"] = 2 - self.assertEquals(cache.get("key"), 1) - self.assertEquals(cache.get("key2"), 2) + self.assertEqual(cache.get("key"), 1) + self.assertEqual(cache.get("key2"), 2) clock.advance_time(0.9) - self.assertEquals(cache.get("key"), None) - self.assertEquals(cache.get("key2"), 2) + self.assertEqual(cache.get("key"), None) + self.assertEqual(cache.get("key2"), 2) clock.advance_time(1) - self.assertEquals(cache.get("key"), None) - self.assertEquals(cache.get("key2"), None) + self.assertEqual(cache.get("key"), None) + self.assertEqual(cache.get("key2"), None) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 621b0f9fcdf0..2ad321e1847c 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -17,7 +17,7 @@ class LoggingContextTestCase(unittest.TestCase): def _check_test_key(self, value): - self.assertEquals(current_context().name, value) + self.assertEqual(current_context().name, value) def test_with_context(self): with LoggingContext("test"): diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 291644eb7dd0..321fc1776f8c 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -27,37 +27,37 @@ class LruCacheTestCase(unittest.HomeserverTestCase): def test_get_set(self): cache = LruCache(1) cache["key"] = "value" - self.assertEquals(cache.get("key"), "value") - self.assertEquals(cache["key"], "value") + self.assertEqual(cache.get("key"), "value") + self.assertEqual(cache["key"], "value") def test_eviction(self): cache = LruCache(2) cache[1] = 1 cache[2] = 2 - self.assertEquals(cache.get(1), 1) - self.assertEquals(cache.get(2), 2) + self.assertEqual(cache.get(1), 1) + self.assertEqual(cache.get(2), 2) cache[3] = 3 - self.assertEquals(cache.get(1), None) - self.assertEquals(cache.get(2), 2) - self.assertEquals(cache.get(3), 3) + self.assertEqual(cache.get(1), None) + self.assertEqual(cache.get(2), 2) + self.assertEqual(cache.get(3), 3) def test_setdefault(self): cache = LruCache(1) - self.assertEquals(cache.setdefault("key", 1), 1) - self.assertEquals(cache.get("key"), 1) - self.assertEquals(cache.setdefault("key", 2), 1) - self.assertEquals(cache.get("key"), 1) + self.assertEqual(cache.setdefault("key", 1), 1) + self.assertEqual(cache.get("key"), 1) + self.assertEqual(cache.setdefault("key", 2), 1) + self.assertEqual(cache.get("key"), 1) cache["key"] = 2 # Make sure overriding works. - self.assertEquals(cache.get("key"), 2) + self.assertEqual(cache.get("key"), 2) def test_pop(self): cache = LruCache(1) cache["key"] = 1 - self.assertEquals(cache.pop("key"), 1) - self.assertEquals(cache.pop("key"), None) + self.assertEqual(cache.pop("key"), 1) + self.assertEqual(cache.pop("key"), None) def test_del_multi(self): cache = LruCache(4, cache_type=TreeCache) @@ -66,23 +66,23 @@ def test_del_multi(self): cache[("vehicles", "car")] = "vroom" cache[("vehicles", "train")] = "chuff" - self.assertEquals(len(cache), 4) + self.assertEqual(len(cache), 4) - self.assertEquals(cache.get(("animal", "cat")), "mew") - self.assertEquals(cache.get(("vehicles", "car")), "vroom") + self.assertEqual(cache.get(("animal", "cat")), "mew") + self.assertEqual(cache.get(("vehicles", "car")), "vroom") cache.del_multi(("animal",)) - self.assertEquals(len(cache), 2) - self.assertEquals(cache.get(("animal", "cat")), None) - self.assertEquals(cache.get(("animal", "dog")), None) - self.assertEquals(cache.get(("vehicles", "car")), "vroom") - self.assertEquals(cache.get(("vehicles", "train")), "chuff") + self.assertEqual(len(cache), 2) + self.assertEqual(cache.get(("animal", "cat")), None) + self.assertEqual(cache.get(("animal", "dog")), None) + self.assertEqual(cache.get(("vehicles", "car")), "vroom") + self.assertEqual(cache.get(("vehicles", "train")), "chuff") # Man from del_multi say "Yes". def test_clear(self): cache = LruCache(1) cache["key"] = 1 cache.clear() - self.assertEquals(len(cache), 0) + self.assertEqual(len(cache), 0) @override_config({"caches": {"per_cache_factors": {"mycache": 10}}}) def test_special_size(self): @@ -105,10 +105,10 @@ def test_get(self): self.assertFalse(m.called) cache.set("key", "value2") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) cache.set("key", "value") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) def test_multi_get(self): m = Mock() @@ -124,10 +124,10 @@ def test_multi_get(self): self.assertFalse(m.called) cache.set("key", "value2") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) cache.set("key", "value") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) def test_set(self): m = Mock() @@ -140,10 +140,10 @@ def test_set(self): self.assertFalse(m.called) cache.set("key", "value2") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) cache.set("key", "value") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) def test_pop(self): m = Mock() @@ -153,13 +153,13 @@ def test_pop(self): self.assertFalse(m.called) cache.pop("key") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) cache.set("key", "value") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) cache.pop("key") - self.assertEquals(m.call_count, 1) + self.assertEqual(m.call_count, 1) def test_del_multi(self): m1 = Mock() @@ -173,17 +173,17 @@ def test_del_multi(self): cache.set(("b", "1"), "value", callbacks=[m3]) cache.set(("b", "2"), "value", callbacks=[m4]) - self.assertEquals(m1.call_count, 0) - self.assertEquals(m2.call_count, 0) - self.assertEquals(m3.call_count, 0) - self.assertEquals(m4.call_count, 0) + self.assertEqual(m1.call_count, 0) + self.assertEqual(m2.call_count, 0) + self.assertEqual(m3.call_count, 0) + self.assertEqual(m4.call_count, 0) cache.del_multi(("a",)) - self.assertEquals(m1.call_count, 1) - self.assertEquals(m2.call_count, 1) - self.assertEquals(m3.call_count, 0) - self.assertEquals(m4.call_count, 0) + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 1) + self.assertEqual(m3.call_count, 0) + self.assertEqual(m4.call_count, 0) def test_clear(self): m1 = Mock() @@ -193,13 +193,13 @@ def test_clear(self): cache.set("key1", "value", callbacks=[m1]) cache.set("key2", "value", callbacks=[m2]) - self.assertEquals(m1.call_count, 0) - self.assertEquals(m2.call_count, 0) + self.assertEqual(m1.call_count, 0) + self.assertEqual(m2.call_count, 0) cache.clear() - self.assertEquals(m1.call_count, 1) - self.assertEquals(m2.call_count, 1) + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 1) def test_eviction(self): m1 = Mock(name="m1") @@ -210,33 +210,33 @@ def test_eviction(self): cache.set("key1", "value", callbacks=[m1]) cache.set("key2", "value", callbacks=[m2]) - self.assertEquals(m1.call_count, 0) - self.assertEquals(m2.call_count, 0) - self.assertEquals(m3.call_count, 0) + self.assertEqual(m1.call_count, 0) + self.assertEqual(m2.call_count, 0) + self.assertEqual(m3.call_count, 0) cache.set("key3", "value", callbacks=[m3]) - self.assertEquals(m1.call_count, 1) - self.assertEquals(m2.call_count, 0) - self.assertEquals(m3.call_count, 0) + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 0) + self.assertEqual(m3.call_count, 0) cache.set("key3", "value") - self.assertEquals(m1.call_count, 1) - self.assertEquals(m2.call_count, 0) - self.assertEquals(m3.call_count, 0) + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 0) + self.assertEqual(m3.call_count, 0) cache.get("key2") - self.assertEquals(m1.call_count, 1) - self.assertEquals(m2.call_count, 0) - self.assertEquals(m3.call_count, 0) + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 0) + self.assertEqual(m3.call_count, 0) cache.set("key1", "value", callbacks=[m1]) - self.assertEquals(m1.call_count, 1) - self.assertEquals(m2.call_count, 0) - self.assertEquals(m3.call_count, 1) + self.assertEqual(m1.call_count, 1) + self.assertEqual(m2.call_count, 0) + self.assertEqual(m3.call_count, 1) class LruCacheSizedTestCase(unittest.HomeserverTestCase): @@ -247,20 +247,20 @@ def test_evict(self): cache["key3"] = [3] cache["key4"] = [4] - self.assertEquals(cache["key1"], [0]) - self.assertEquals(cache["key2"], [1, 2]) - self.assertEquals(cache["key3"], [3]) - self.assertEquals(cache["key4"], [4]) - self.assertEquals(len(cache), 5) + self.assertEqual(cache["key1"], [0]) + self.assertEqual(cache["key2"], [1, 2]) + self.assertEqual(cache["key3"], [3]) + self.assertEqual(cache["key4"], [4]) + self.assertEqual(len(cache), 5) cache["key5"] = [5, 6] - self.assertEquals(len(cache), 4) - self.assertEquals(cache.get("key1"), None) - self.assertEquals(cache.get("key2"), None) - self.assertEquals(cache["key3"], [3]) - self.assertEquals(cache["key4"], [4]) - self.assertEquals(cache["key5"], [5, 6]) + self.assertEqual(len(cache), 4) + self.assertEqual(cache.get("key1"), None) + self.assertEqual(cache.get("key2"), None) + self.assertEqual(cache["key3"], [3]) + self.assertEqual(cache["key4"], [4]) + self.assertEqual(cache["key5"], [5, 6]) def test_zero_size_drop_from_cache(self) -> None: """Test that `drop_from_cache` works correctly with 0-sized entries.""" diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index 60663720536d..567cb184684e 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -23,61 +23,61 @@ def test_get_set_onelevel(self): cache = TreeCache() cache[("a",)] = "A" cache[("b",)] = "B" - self.assertEquals(cache.get(("a",)), "A") - self.assertEquals(cache.get(("b",)), "B") - self.assertEquals(len(cache), 2) + self.assertEqual(cache.get(("a",)), "A") + self.assertEqual(cache.get(("b",)), "B") + self.assertEqual(len(cache), 2) def test_pop_onelevel(self): cache = TreeCache() cache[("a",)] = "A" cache[("b",)] = "B" - self.assertEquals(cache.pop(("a",)), "A") - self.assertEquals(cache.pop(("a",)), None) - self.assertEquals(cache.get(("b",)), "B") - self.assertEquals(len(cache), 1) + self.assertEqual(cache.pop(("a",)), "A") + self.assertEqual(cache.pop(("a",)), None) + self.assertEqual(cache.get(("b",)), "B") + self.assertEqual(len(cache), 1) def test_get_set_twolevel(self): cache = TreeCache() cache[("a", "a")] = "AA" cache[("a", "b")] = "AB" cache[("b", "a")] = "BA" - self.assertEquals(cache.get(("a", "a")), "AA") - self.assertEquals(cache.get(("a", "b")), "AB") - self.assertEquals(cache.get(("b", "a")), "BA") - self.assertEquals(len(cache), 3) + self.assertEqual(cache.get(("a", "a")), "AA") + self.assertEqual(cache.get(("a", "b")), "AB") + self.assertEqual(cache.get(("b", "a")), "BA") + self.assertEqual(len(cache), 3) def test_pop_twolevel(self): cache = TreeCache() cache[("a", "a")] = "AA" cache[("a", "b")] = "AB" cache[("b", "a")] = "BA" - self.assertEquals(cache.pop(("a", "a")), "AA") - self.assertEquals(cache.get(("a", "a")), None) - self.assertEquals(cache.get(("a", "b")), "AB") - self.assertEquals(cache.pop(("b", "a")), "BA") - self.assertEquals(cache.pop(("b", "a")), None) - self.assertEquals(len(cache), 1) + self.assertEqual(cache.pop(("a", "a")), "AA") + self.assertEqual(cache.get(("a", "a")), None) + self.assertEqual(cache.get(("a", "b")), "AB") + self.assertEqual(cache.pop(("b", "a")), "BA") + self.assertEqual(cache.pop(("b", "a")), None) + self.assertEqual(len(cache), 1) def test_pop_mixedlevel(self): cache = TreeCache() cache[("a", "a")] = "AA" cache[("a", "b")] = "AB" cache[("b", "a")] = "BA" - self.assertEquals(cache.get(("a", "a")), "AA") + self.assertEqual(cache.get(("a", "a")), "AA") popped = cache.pop(("a",)) - self.assertEquals(cache.get(("a", "a")), None) - self.assertEquals(cache.get(("a", "b")), None) - self.assertEquals(cache.get(("b", "a")), "BA") - self.assertEquals(len(cache), 1) + self.assertEqual(cache.get(("a", "a")), None) + self.assertEqual(cache.get(("a", "b")), None) + self.assertEqual(cache.get(("b", "a")), "BA") + self.assertEqual(len(cache), 1) - self.assertEquals({"AA", "AB"}, set(iterate_tree_cache_entry(popped))) + self.assertEqual({"AA", "AB"}, set(iterate_tree_cache_entry(popped))) def test_clear(self): cache = TreeCache() cache[("a",)] = "A" cache[("b",)] = "B" cache.clear() - self.assertEquals(len(cache), 0) + self.assertEqual(len(cache), 0) def test_contains(self): cache = TreeCache() From 9e83521af860cb33a7459dbe74188ce5ef39f446 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Feb 2022 07:52:44 -0500 Subject: [PATCH 305/474] Properly failover for unknown endpoints from Conduit/Dendrite. (#12077) Before this fix, a legitimate 404 from a federation endpoint (e.g. due to an unknown room) would be treated as an unknown endpoint. This could cause unnecessary federation traffic. --- changelog.d/12077.bugfix | 1 + synapse/federation/federation_client.py | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 changelog.d/12077.bugfix diff --git a/changelog.d/12077.bugfix b/changelog.d/12077.bugfix new file mode 100644 index 000000000000..1bce82082dad --- /dev/null +++ b/changelog.d/12077.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 2121e92e3a9f..a4bae3c4c85f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -615,11 +615,15 @@ def _is_unknown_endpoint( synapse_error = e.to_synapse_error() # There is no good way to detect an "unknown" endpoint. # - # Dendrite returns a 404 (with no body); synapse returns a 400 + # Dendrite returns a 404 (with a body of "404 page not found"); + # Conduit returns a 404 (with no body); and Synapse returns a 400 # with M_UNRECOGNISED. - return e.code == 404 or ( - e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED - ) + # + # This needs to be rather specific as some endpoints truly do return 404 + # errors. + return ( + e.code == 404 and (not e.response or e.response == b"404 page not found") + ) or (e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED) async def _try_destination_list( self, @@ -1002,7 +1006,7 @@ async def _do_send_join( ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, - # fallback to the v1 endpoint. Otherwise consider it a legitmate error + # fallback to the v1 endpoint. Otherwise, consider it a legitimate error # and raise. if not self._is_unknown_endpoint(e): raise @@ -1071,7 +1075,7 @@ async def _do_send_invite( except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, # fallback to the v1 endpoint if the room uses old-style event IDs. - # Otherwise consider it a legitmate error and raise. + # Otherwise, consider it a legitimate error and raise. err = e.to_synapse_error() if self._is_unknown_endpoint(e, err): if room_version.event_format != EventFormatVersions.V1: @@ -1132,7 +1136,7 @@ async def _do_send_leave(self, destination: str, pdu: EventBase) -> JsonDict: ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, - # fallback to the v1 endpoint. Otherwise consider it a legitmate error + # fallback to the v1 endpoint. Otherwise, consider it a legitimate error # and raise. if not self._is_unknown_endpoint(e): raise @@ -1458,8 +1462,8 @@ async def send_request( ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, - # fallback to the unstable endpoint. Otherwise consider it a - # legitmate error and raise. + # fallback to the unstable endpoint. Otherwise, consider it a + # legitimate error and raise. if not self._is_unknown_endpoint(e): raise From 5565f454e1b323b637dd418549f70fadac0f44b4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 28 Feb 2022 14:10:36 +0000 Subject: [PATCH 306/474] Actually fix bad debug logging rejecting device list & signing key transactions (#12098) --- changelog.d/12098.bugfix | 1 + .../federation/transport/server/federation.py | 2 +- tests/federation/transport/test_server.py | 20 ++++++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12098.bugfix diff --git a/changelog.d/12098.bugfix b/changelog.d/12098.bugfix new file mode 100644 index 000000000000..6b696692e332 --- /dev/null +++ b/changelog.d/12098.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. \ No newline at end of file diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 9cc9a7339d19..23ce34305705 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -110,7 +110,7 @@ async def on_PUT( if issue_8631_logger.isEnabledFor(logging.DEBUG): DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"] device_list_updates = [ - edu.content + edu.get("content", {}) for edu in transaction_data.get("edus", []) if edu.get("edu_type") in DEVICE_UPDATE_EDUS ] diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index ce49d094d727..5f001c33b05e 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -13,7 +13,7 @@ # limitations under the License. from tests import unittest -from tests.unittest import override_config +from tests.unittest import DEBUG, override_config class RoomDirectoryFederationTests(unittest.FederatingHomeserverTestCase): @@ -38,3 +38,21 @@ def test_open_public_room_list_over_federation(self): "/_matrix/federation/v1/publicRooms", ) self.assertEqual(200, channel.code) + + @DEBUG + def test_edu_debugging_doesnt_explode(self): + """Sanity check incoming federation succeeds with `synapse.debug_8631` enabled. + + Remove this when we strip out issue_8631_logger. + """ + channel = self.make_signed_federation_request( + "PUT", + "/_matrix/federation/v1/send/txn_id_1234/", + content={ + "edus": [ + {"edu_type": "m.device_list_update", "content": {"foo": "bar"}} + ], + "pdus": [], + }, + ) + self.assertEqual(200, channel.code) From 6c0b44a3d73f73dc5913f081418347645dc84d6f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 28 Feb 2022 17:40:24 +0000 Subject: [PATCH 307/474] Fix `PushRuleEvaluator` and `Filter` to work on frozendicts (#12100) * Fix `PushRuleEvaluator` to work on frozendicts frozendicts do not (necessarily) inherit from dict, so this needs to handle them correctly. * Fix event filtering for frozen events Looks like this one was introduced by #11194. --- changelog.d/12100.bugfix | 1 + synapse/api/filtering.py | 5 +++-- synapse/push/push_rule_evaluator.py | 8 ++++---- tests/api/test_filtering.py | 10 ++++++++++ tests/push/test_push_rule_evaluator.py | 9 +++++++++ 5 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12100.bugfix diff --git a/changelog.d/12100.bugfix b/changelog.d/12100.bugfix new file mode 100644 index 000000000000..181095ad9929 --- /dev/null +++ b/changelog.d/12100.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index fe4cc2e8eec1..cb532d723828 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -22,6 +22,7 @@ Dict, Iterable, List, + Mapping, Optional, Set, TypeVar, @@ -361,10 +362,10 @@ def _check(self, event: FilterEvent) -> bool: return self._check_fields(field_matchers) else: content = event.get("content") - # Content is assumed to be a dict below, so ensure it is. This should + # Content is assumed to be a mapping below, so ensure it is. This should # always be true for events, but account_data has been allowed to # have non-dict content. - if not isinstance(content, dict): + if not isinstance(content, Mapping): content = {} sender = event.get("sender", None) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 659a53805df1..f617c759e6cf 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -15,12 +15,12 @@ import logging import re -from typing import Any, Dict, List, Optional, Pattern, Tuple, Union +from typing import Any, Dict, List, Mapping, Optional, Pattern, Tuple, Union from matrix_common.regex import glob_to_regex, to_word_pattern from synapse.events import EventBase -from synapse.types import JsonDict, UserID +from synapse.types import UserID from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -223,7 +223,7 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: def _flatten_dict( - d: Union[EventBase, JsonDict], + d: Union[EventBase, Mapping[str, Any]], prefix: Optional[List[str]] = None, result: Optional[Dict[str, str]] = None, ) -> Dict[str, str]: @@ -234,7 +234,7 @@ def _flatten_dict( for key, value in d.items(): if isinstance(value, str): result[".".join(prefix + [key])] = value.lower() - elif isinstance(value, dict): + elif isinstance(value, Mapping): _flatten_dict(value, prefix=(prefix + [key]), result=result) return result diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 2525018e9515..8c3354ce3c79 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -18,6 +18,7 @@ from unittest.mock import patch import jsonschema +from frozendict import frozendict from synapse.api.constants import EventContentFields from synapse.api.errors import SynapseError @@ -327,6 +328,15 @@ def test_filter_labels(self): self.assertFalse(Filter(self.hs, definition)._check(event)) + # check it works with frozendicts too + event = MockEvent( + sender="@foo:bar", + type="m.room.message", + room_id="!secretbase:unknown", + content=frozendict({EventContentFields.LABELS: ["#fun"]}), + ) + self.assertTrue(Filter(self.hs, definition)._check(event)) + def test_filter_not_labels(self): definition = {"org.matrix.not_labels": ["#fun"]} event = MockEvent( diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index a52e89e4074b..3849beb9d6d6 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -14,6 +14,8 @@ from typing import Any, Dict +import frozendict + from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator @@ -191,6 +193,13 @@ def test_event_match_non_body(self): "pattern should only match at the start/end of the value", ) + # it should work on frozendicts too + self._assert_matches( + condition, + frozendict.frozendict({"value": "FoobaZ"}), + "patterns should match on frozendicts", + ) + # wildcards should match condition = { "kind": "event_match", From 1901cb1d4a8b7d9af64493fbd336e9aa2561c20c Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 28 Feb 2022 18:47:37 +0100 Subject: [PATCH 308/474] Add type hints to `tests/rest/client` (#12084) --- changelog.d/12084.misc | 1 + mypy.ini | 3 +- tests/rest/client/test_profile.py | 78 +++++++++++++---------- tests/rest/client/test_push_rule_attrs.py | 26 ++++---- tests/rest/client/test_redactions.py | 26 +++++--- tests/rest/client/test_relations.py | 58 +++++++++-------- tests/rest/client/test_retention.py | 41 ++++++++---- tests/rest/client/test_sendtodevice.py | 8 +-- tests/rest/client/test_shadow_banned.py | 22 ++++--- tests/rest/client/test_shared_rooms.py | 20 +++--- tests/rest/client/test_upgrade_room.py | 17 +++-- tests/rest/client/utils.py | 36 +++++++---- 12 files changed, 198 insertions(+), 138 deletions(-) create mode 100644 changelog.d/12084.misc diff --git a/changelog.d/12084.misc b/changelog.d/12084.misc new file mode 100644 index 000000000000..0360dbd61edc --- /dev/null +++ b/changelog.d/12084.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/client`. diff --git a/mypy.ini b/mypy.ini index 610660b9b721..bd75905c8d3c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -84,7 +84,6 @@ exclude = (?x) |tests/rest/client/test_third_party_rules.py |tests/rest/client/test_transactions.py |tests/rest/client/test_typing.py - |tests/rest/client/utils.py |tests/rest/key/v2/test_remote_key_resource.py |tests/rest/media/v1/test_base.py |tests/rest/media/v1/test_media_storage.py @@ -253,7 +252,7 @@ disallow_untyped_defs = True [mypy-tests.rest.admin.*] disallow_untyped_defs = True -[mypy-tests.rest.client.test_directory] +[mypy-tests.rest.client.*] disallow_untyped_defs = True [mypy-tests.federation.transport.test_client] diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 4239e1e61075..77c3ced42ec1 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -13,12 +13,16 @@ # limitations under the License. """Tests REST events for /profile paths.""" -from typing import Any, Dict +from typing import Any, Dict, Optional + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import login, profile, room +from synapse.server import HomeServer from synapse.types import UserID +from synapse.util import Clock from tests import unittest @@ -32,20 +36,20 @@ class ProfileTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() return self.hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.owner = self.register_user("owner", "pass") self.owner_tok = self.login("owner", "pass") self.other = self.register_user("other", "pass", displayname="Bob") - def test_get_displayname(self): + def test_get_displayname(self) -> None: res = self._get_displayname() self.assertEqual(res, "owner") - def test_set_displayname(self): + def test_set_displayname(self) -> None: channel = self.make_request( "PUT", "/profile/%s/displayname" % (self.owner,), @@ -57,7 +61,7 @@ def test_set_displayname(self): res = self._get_displayname() self.assertEqual(res, "test") - def test_set_displayname_noauth(self): + def test_set_displayname_noauth(self) -> None: channel = self.make_request( "PUT", "/profile/%s/displayname" % (self.owner,), @@ -65,7 +69,7 @@ def test_set_displayname_noauth(self): ) self.assertEqual(channel.code, 401, channel.result) - def test_set_displayname_too_long(self): + def test_set_displayname_too_long(self) -> None: """Attempts to set a stupid displayname should get a 400""" channel = self.make_request( "PUT", @@ -78,11 +82,11 @@ def test_set_displayname_too_long(self): res = self._get_displayname() self.assertEqual(res, "owner") - def test_get_displayname_other(self): + def test_get_displayname_other(self) -> None: res = self._get_displayname(self.other) self.assertEqual(res, "Bob") - def test_set_displayname_other(self): + def test_set_displayname_other(self) -> None: channel = self.make_request( "PUT", "/profile/%s/displayname" % (self.other,), @@ -91,11 +95,11 @@ def test_set_displayname_other(self): ) self.assertEqual(channel.code, 400, channel.result) - def test_get_avatar_url(self): + def test_get_avatar_url(self) -> None: res = self._get_avatar_url() self.assertIsNone(res) - def test_set_avatar_url(self): + def test_set_avatar_url(self) -> None: channel = self.make_request( "PUT", "/profile/%s/avatar_url" % (self.owner,), @@ -107,7 +111,7 @@ def test_set_avatar_url(self): res = self._get_avatar_url() self.assertEqual(res, "http://my.server/pic.gif") - def test_set_avatar_url_noauth(self): + def test_set_avatar_url_noauth(self) -> None: channel = self.make_request( "PUT", "/profile/%s/avatar_url" % (self.owner,), @@ -115,7 +119,7 @@ def test_set_avatar_url_noauth(self): ) self.assertEqual(channel.code, 401, channel.result) - def test_set_avatar_url_too_long(self): + def test_set_avatar_url_too_long(self) -> None: """Attempts to set a stupid avatar_url should get a 400""" channel = self.make_request( "PUT", @@ -128,11 +132,11 @@ def test_set_avatar_url_too_long(self): res = self._get_avatar_url() self.assertIsNone(res) - def test_get_avatar_url_other(self): + def test_get_avatar_url_other(self) -> None: res = self._get_avatar_url(self.other) self.assertIsNone(res) - def test_set_avatar_url_other(self): + def test_set_avatar_url_other(self) -> None: channel = self.make_request( "PUT", "/profile/%s/avatar_url" % (self.other,), @@ -141,14 +145,14 @@ def test_set_avatar_url_other(self): ) self.assertEqual(channel.code, 400, channel.result) - def _get_displayname(self, name=None): + def _get_displayname(self, name: Optional[str] = None) -> str: channel = self.make_request( "GET", "/profile/%s/displayname" % (name or self.owner,) ) self.assertEqual(channel.code, 200, channel.result) return channel.json_body["displayname"] - def _get_avatar_url(self, name=None): + def _get_avatar_url(self, name: Optional[str] = None) -> str: channel = self.make_request( "GET", "/profile/%s/avatar_url" % (name or self.owner,) ) @@ -156,7 +160,7 @@ def _get_avatar_url(self, name=None): return channel.json_body.get("avatar_url") @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_size_limit_global(self): + def test_avatar_size_limit_global(self) -> None: """Tests that the maximum size limit for avatars is enforced when updating a global profile. """ @@ -187,7 +191,7 @@ def test_avatar_size_limit_global(self): self.assertEqual(channel.code, 200, channel.result) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_size_limit_per_room(self): + def test_avatar_size_limit_per_room(self) -> None: """Tests that the maximum size limit for avatars is enforced when updating a per-room profile. """ @@ -220,7 +224,7 @@ def test_avatar_size_limit_per_room(self): self.assertEqual(channel.code, 200, channel.result) @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) - def test_avatar_allowed_mime_type_global(self): + def test_avatar_allowed_mime_type_global(self) -> None: """Tests that the MIME type whitelist for avatars is enforced when updating a global profile. """ @@ -251,7 +255,7 @@ def test_avatar_allowed_mime_type_global(self): self.assertEqual(channel.code, 200, channel.result) @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) - def test_avatar_allowed_mime_type_per_room(self): + def test_avatar_allowed_mime_type_per_room(self) -> None: """Tests that the MIME type whitelist for avatars is enforced when updating a per-room profile. """ @@ -283,7 +287,7 @@ def test_avatar_allowed_mime_type_per_room(self): ) self.assertEqual(channel.code, 200, channel.result) - def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None: """Stores metadata about files in the database. Args: @@ -316,8 +320,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): - + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["require_auth_for_profile_requests"] = True config["limit_profile_requests_to_users_who_share_rooms"] = True @@ -325,7 +328,7 @@ def make_homeserver(self, reactor, clock): return self.hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # User owning the requested profile. self.owner = self.register_user("owner", "pass") self.owner_tok = self.login("owner", "pass") @@ -337,22 +340,24 @@ def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) - def test_no_auth(self): + def test_no_auth(self) -> None: self.try_fetch_profile(401) - def test_not_in_shared_room(self): + def test_not_in_shared_room(self) -> None: self.ensure_requester_left_room() self.try_fetch_profile(403, access_token=self.requester_tok) - def test_in_shared_room(self): + def test_in_shared_room(self) -> None: self.ensure_requester_left_room() self.helper.join(room=self.room_id, user=self.requester, tok=self.requester_tok) self.try_fetch_profile(200, self.requester_tok) - def try_fetch_profile(self, expected_code, access_token=None): + def try_fetch_profile( + self, expected_code: int, access_token: Optional[str] = None + ) -> None: self.request_profile(expected_code, access_token=access_token) self.request_profile( @@ -363,13 +368,18 @@ def try_fetch_profile(self, expected_code, access_token=None): expected_code, url_suffix="/avatar_url", access_token=access_token ) - def request_profile(self, expected_code, url_suffix="", access_token=None): + def request_profile( + self, + expected_code: int, + url_suffix: str = "", + access_token: Optional[str] = None, + ) -> None: channel = self.make_request( "GET", self.profile_url + url_suffix, access_token=access_token ) self.assertEqual(channel.code, expected_code, channel.result) - def ensure_requester_left_room(self): + def ensure_requester_left_room(self) -> None: try: self.helper.leave( room=self.room_id, user=self.requester, tok=self.requester_tok @@ -389,7 +399,7 @@ class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): profile.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["require_auth_for_profile_requests"] = True config["limit_profile_requests_to_users_who_share_rooms"] = True @@ -397,12 +407,12 @@ def make_homeserver(self, reactor, clock): return self.hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # User requesting the profile. self.requester = self.register_user("requester", "pass") self.requester_tok = self.login("requester", "pass") - def test_can_lookup_own_profile(self): + def test_can_lookup_own_profile(self) -> None: """Tests that a user can lookup their own profile without having to be in a room if 'require_auth_for_profile_requests' is set to true in the server's config. """ diff --git a/tests/rest/client/test_push_rule_attrs.py b/tests/rest/client/test_push_rule_attrs.py index d0ce91ccd95c..4f875b92898e 100644 --- a/tests/rest/client/test_push_rule_attrs.py +++ b/tests/rest/client/test_push_rule_attrs.py @@ -27,7 +27,7 @@ class PushRuleAttributesTestCase(HomeserverTestCase): ] hijack_auth = False - def test_enabled_on_creation(self): + def test_enabled_on_creation(self) -> None: """ Tests the GET and PUT of push rules' `enabled` endpoints. Tests that a rule is enabled upon creation, even though a rule with that @@ -56,7 +56,7 @@ def test_enabled_on_creation(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], True) - def test_enabled_on_recreation(self): + def test_enabled_on_recreation(self) -> None: """ Tests the GET and PUT of push rules' `enabled` endpoints. Tests that a rule is enabled upon creation, even if a rule with that @@ -113,7 +113,7 @@ def test_enabled_on_recreation(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], True) - def test_enabled_disable(self): + def test_enabled_disable(self) -> None: """ Tests the GET and PUT of push rules' `enabled` endpoints. Tests that a rule is disabled and enabled when we ask for it. @@ -166,7 +166,7 @@ def test_enabled_disable(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["enabled"], True) - def test_enabled_404_when_get_non_existent(self): + def test_enabled_404_when_get_non_existent(self) -> None: """ Tests that `enabled` gives 404 when the rule doesn't exist. """ @@ -212,7 +212,7 @@ def test_enabled_404_when_get_non_existent(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_enabled_404_when_get_non_existent_server_rule(self): + def test_enabled_404_when_get_non_existent_server_rule(self) -> None: """ Tests that `enabled` gives 404 when the server-default rule doesn't exist. """ @@ -226,7 +226,7 @@ def test_enabled_404_when_get_non_existent_server_rule(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_enabled_404_when_put_non_existent_rule(self): + def test_enabled_404_when_put_non_existent_rule(self) -> None: """ Tests that `enabled` gives 404 when we put to a rule that doesn't exist. """ @@ -243,7 +243,7 @@ def test_enabled_404_when_put_non_existent_rule(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_enabled_404_when_put_non_existent_server_rule(self): + def test_enabled_404_when_put_non_existent_server_rule(self) -> None: """ Tests that `enabled` gives 404 when we put to a server-default rule that doesn't exist. """ @@ -260,7 +260,7 @@ def test_enabled_404_when_put_non_existent_server_rule(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_actions_get(self): + def test_actions_get(self) -> None: """ Tests that `actions` gives you what you expect on a fresh rule. """ @@ -289,7 +289,7 @@ def test_actions_get(self): channel.json_body["actions"], ["notify", {"set_tweak": "highlight"}] ) - def test_actions_put(self): + def test_actions_put(self) -> None: """ Tests that PUT on actions updates the value you'd get from GET. """ @@ -325,7 +325,7 @@ def test_actions_put(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["actions"], ["dont_notify"]) - def test_actions_404_when_get_non_existent(self): + def test_actions_404_when_get_non_existent(self) -> None: """ Tests that `actions` gives 404 when the rule doesn't exist. """ @@ -365,7 +365,7 @@ def test_actions_404_when_get_non_existent(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_actions_404_when_get_non_existent_server_rule(self): + def test_actions_404_when_get_non_existent_server_rule(self) -> None: """ Tests that `actions` gives 404 when the server-default rule doesn't exist. """ @@ -379,7 +379,7 @@ def test_actions_404_when_get_non_existent_server_rule(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_actions_404_when_put_non_existent_rule(self): + def test_actions_404_when_put_non_existent_rule(self) -> None: """ Tests that `actions` gives 404 when putting to a rule that doesn't exist. """ @@ -396,7 +396,7 @@ def test_actions_404_when_put_non_existent_rule(self): self.assertEqual(channel.code, 404) self.assertEqual(channel.json_body["errcode"], Codes.NOT_FOUND) - def test_actions_404_when_put_non_existent_server_rule(self): + def test_actions_404_when_put_non_existent_server_rule(self) -> None: """ Tests that `actions` gives 404 when putting to a server-default rule that doesn't exist. """ diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 433d715f695d..7401b5e0c0fa 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -11,9 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List + +from twisted.test.proto_helpers import MemoryReactor from synapse.rest import admin from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -28,7 +34,7 @@ class RedactionsTestCase(HomeserverTestCase): sync.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["rc_message"] = {"per_second": 0.2, "burst_count": 10} @@ -36,7 +42,7 @@ def make_homeserver(self, reactor, clock): return self.setup_test_homeserver(config=config) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # register a couple of users self.mod_user_id = self.register_user("user1", "pass") self.mod_access_token = self.login("user1", "pass") @@ -60,7 +66,9 @@ def prepare(self, reactor, clock, hs): room=self.room_id, user=self.other_user_id, tok=self.other_access_token ) - def _redact_event(self, access_token, room_id, event_id, expect_code=200): + def _redact_event( + self, access_token: str, room_id: str, event_id: str, expect_code: int = 200 + ) -> JsonDict: """Helper function to send a redaction event. Returns the json body. @@ -71,13 +79,13 @@ def _redact_event(self, access_token, room_id, event_id, expect_code=200): self.assertEqual(int(channel.result["code"]), expect_code) return channel.json_body - def _sync_room_timeline(self, access_token, room_id): + def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]: channel = self.make_request("GET", "sync", access_token=self.mod_access_token) self.assertEqual(channel.result["code"], b"200") room_sync = channel.json_body["rooms"]["join"][room_id] return room_sync["timeline"]["events"] - def test_redact_event_as_moderator(self): + def test_redact_event_as_moderator(self) -> None: # as a regular user, send a message to redact b = self.helper.send(room_id=self.room_id, tok=self.other_access_token) msg_id = b["event_id"] @@ -98,7 +106,7 @@ def test_redact_event_as_moderator(self): self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id) self.assertEqual(timeline[-2]["content"], {}) - def test_redact_event_as_normal(self): + def test_redact_event_as_normal(self) -> None: # as a regular user, send a message to redact b = self.helper.send(room_id=self.room_id, tok=self.other_access_token) normal_msg_id = b["event_id"] @@ -133,7 +141,7 @@ def test_redact_event_as_normal(self): self.assertEqual(timeline[-3]["unsigned"]["redacted_by"], redaction_id) self.assertEqual(timeline[-3]["content"], {}) - def test_redact_nonexistent_event(self): + def test_redact_nonexistent_event(self) -> None: # control case: an existing event b = self.helper.send(room_id=self.room_id, tok=self.other_access_token) msg_id = b["event_id"] @@ -158,7 +166,7 @@ def test_redact_nonexistent_event(self): self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id) self.assertEqual(timeline[-2]["content"], {}) - def test_redact_create_event(self): + def test_redact_create_event(self) -> None: # control case: an existing event b = self.helper.send(room_id=self.room_id, tok=self.mod_access_token) msg_id = b["event_id"] @@ -178,7 +186,7 @@ def test_redact_create_event(self): self.other_access_token, self.room_id, create_event_id, expect_code=403 ) - def test_redact_event_as_moderator_ratelimit(self): + def test_redact_event_as_moderator_ratelimit(self) -> None: """Tests that the correct ratelimiting is applied to redactions""" message_ids = [] diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 8f7181103b2f..c8db45719e07 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -18,11 +18,15 @@ from typing import Dict, List, Optional, Tuple from unittest.mock import patch +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin from synapse.rest.client import login, register, relations, room, sync +from synapse.server import HomeServer from synapse.storage.relations import RelationPaginationToken from synapse.types import JsonDict, StreamToken +from synapse.util import Clock from tests import unittest from tests.server import FakeChannel @@ -52,7 +56,7 @@ def default_config(self) -> dict: return config - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.user_id, self.user_token = self._create_user("alice") @@ -63,7 +67,7 @@ def prepare(self, reactor, clock, hs): res = self.helper.send(self.room, body="Hi!", tok=self.user_token) self.parent_id = res["event_id"] - def test_send_relation(self): + def test_send_relation(self) -> None: """Tests that sending a relation using the new /send_relation works creates the right shape of event. """ @@ -95,7 +99,7 @@ def test_send_relation(self): channel.json_body, ) - def test_deny_invalid_event(self): + def test_deny_invalid_event(self) -> None: """Test that we deny relations on non-existant events""" channel = self._send_relation( RelationTypes.ANNOTATION, @@ -125,7 +129,7 @@ def test_deny_invalid_event(self): ) self.assertEqual(200, channel.code, channel.json_body) - def test_deny_invalid_room(self): + def test_deny_invalid_room(self) -> None: """Test that we deny relations on non-existant events""" # Create another room and send a message in it. room2 = self.helper.create_room_as(self.user_id, tok=self.user_token) @@ -138,7 +142,7 @@ def test_deny_invalid_room(self): ) self.assertEqual(400, channel.code, channel.json_body) - def test_deny_double_react(self): + def test_deny_double_react(self) -> None: """Test that we deny relations on membership events""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") self.assertEqual(200, channel.code, channel.json_body) @@ -146,7 +150,7 @@ def test_deny_double_react(self): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(400, channel.code, channel.json_body) - def test_deny_forked_thread(self): + def test_deny_forked_thread(self) -> None: """It is invalid to start a thread off a thread.""" channel = self._send_relation( RelationTypes.THREAD, @@ -165,7 +169,7 @@ def test_deny_forked_thread(self): ) self.assertEqual(400, channel.code, channel.json_body) - def test_basic_paginate_relations(self): + def test_basic_paginate_relations(self) -> None: """Tests that calling pagination API correctly the latest relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(200, channel.code, channel.json_body) @@ -235,7 +239,7 @@ def _stream_token_to_relation_token(self, token: str) -> str: ).to_string(self.store) ) - def test_repeated_paginate_relations(self): + def test_repeated_paginate_relations(self) -> None: """Test that if we paginate using a limit and tokens then we get the expected events. """ @@ -303,7 +307,7 @@ def test_repeated_paginate_relations(self): found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) - def test_pagination_from_sync_and_messages(self): + def test_pagination_from_sync_and_messages(self) -> None: """Pagination tokens from /sync and /messages can be used to paginate /relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") self.assertEqual(200, channel.code, channel.json_body) @@ -362,7 +366,7 @@ def test_pagination_from_sync_and_messages(self): annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] ) - def test_aggregation_pagination_groups(self): + def test_aggregation_pagination_groups(self) -> None: """Test that we can paginate annotation groups correctly.""" # We need to create ten separate users to send each reaction. @@ -427,7 +431,7 @@ def test_aggregation_pagination_groups(self): self.assertEqual(sent_groups, found_groups) - def test_aggregation_pagination_within_group(self): + def test_aggregation_pagination_within_group(self) -> None: """Test that we can paginate within an annotation group.""" # We need to create ten separate users to send each reaction. @@ -524,7 +528,7 @@ def test_aggregation_pagination_within_group(self): found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) - def test_aggregation(self): + def test_aggregation(self) -> None: """Test that annotations get correctly aggregated.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") @@ -556,7 +560,7 @@ def test_aggregation(self): }, ) - def test_aggregation_redactions(self): + def test_aggregation_redactions(self) -> None: """Test that annotations get correctly aggregated after a redaction.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") @@ -590,7 +594,7 @@ def test_aggregation_redactions(self): {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, ) - def test_aggregation_must_be_annotation(self): + def test_aggregation_must_be_annotation(self) -> None: """Test that aggregations must be annotations.""" channel = self.make_request( @@ -604,7 +608,7 @@ def test_aggregation_must_be_annotation(self): @unittest.override_config( {"experimental_features": {"msc3440_enabled": True, "msc3666_enabled": True}} ) - def test_bundled_aggregations(self): + def test_bundled_aggregations(self) -> None: """ Test that annotations, references, and threads get correctly bundled. @@ -746,7 +750,7 @@ def assert_bundle(event_json: JsonDict) -> None: ] assert_bundle(self._find_event_in_chunk(chunk)) - def test_aggregation_get_event_for_annotation(self): + def test_aggregation_get_event_for_annotation(self) -> None: """Test that annotations do not get bundled aggregations included when directly requested. """ @@ -768,7 +772,7 @@ def test_aggregation_get_event_for_annotation(self): self.assertEqual(200, channel.code, channel.json_body) self.assertIsNone(channel.json_body["unsigned"].get("m.relations")) - def test_aggregation_get_event_for_thread(self): + def test_aggregation_get_event_for_thread(self) -> None: """Test that threads get bundled aggregations included when directly requested.""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") self.assertEqual(200, channel.code, channel.json_body) @@ -815,7 +819,7 @@ def test_aggregation_get_event_for_thread(self): ) @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) - def test_ignore_invalid_room(self): + def test_ignore_invalid_room(self) -> None: """Test that we ignore invalid relations over federation.""" # Create another room and send a message in it. room2 = self.helper.create_room_as(self.user_id, tok=self.user_token) @@ -927,7 +931,7 @@ def test_ignore_invalid_room(self): self.assertNotIn("m.relations", channel.json_body["unsigned"]) @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) - def test_edit(self): + def test_edit(self) -> None: """Test that a simple edit works.""" new_body = {"msgtype": "m.text", "body": "I've been edited!"} @@ -1010,7 +1014,7 @@ def assert_bundle(event_json: JsonDict) -> None: ] assert_bundle(self._find_event_in_chunk(chunk)) - def test_multi_edit(self): + def test_multi_edit(self) -> None: """Test that multiple edits, including attempts by people who shouldn't be allowed, are correctly handled. """ @@ -1067,7 +1071,7 @@ def test_multi_edit(self): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) - def test_edit_reply(self): + def test_edit_reply(self) -> None: """Test that editing a reply works.""" # Create a reply to edit. @@ -1124,7 +1128,7 @@ def test_edit_reply(self): ) @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) - def test_edit_thread(self): + def test_edit_thread(self) -> None: """Test that editing a thread works.""" # Create a thread and edit the last event. @@ -1163,7 +1167,7 @@ def test_edit_thread(self): latest_event_in_thread = thread_summary["latest_event"] self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") - def test_edit_edit(self): + def test_edit_edit(self) -> None: """Test that an edit cannot be edited.""" new_body = {"msgtype": "m.text", "body": "Initial edit"} channel = self._send_relation( @@ -1213,7 +1217,7 @@ def test_edit_edit(self): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) - def test_relations_redaction_redacts_edits(self): + def test_relations_redaction_redacts_edits(self) -> None: """Test that edits of an event are redacted when the original event is redacted. """ @@ -1269,7 +1273,7 @@ def test_relations_redaction_redacts_edits(self): self.assertIn("chunk", channel.json_body) self.assertEqual(channel.json_body["chunk"], []) - def test_aggregations_redaction_prevents_access_to_aggregations(self): + def test_aggregations_redaction_prevents_access_to_aggregations(self) -> None: """Test that annotations of an event are redacted when the original event is redacted. """ @@ -1309,7 +1313,7 @@ def test_aggregations_redaction_prevents_access_to_aggregations(self): self.assertIn("chunk", channel.json_body) self.assertEqual(channel.json_body["chunk"], []) - def test_unknown_relations(self): + def test_unknown_relations(self) -> None: """Unknown relations should be accepted.""" channel = self._send_relation("m.relation.test", "m.room.test") self.assertEqual(200, channel.code, channel.json_body) @@ -1417,7 +1421,7 @@ def _create_user(self, localpart: str) -> Tuple[str, str]: return user_id, access_token - def test_background_update(self): + def test_background_update(self) -> None: """Test the event_arbitrary_relations background update.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index c41a1c14a138..f3bf8d0934e6 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -13,9 +13,14 @@ # limitations under the License. from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from synapse.visibility import filter_events_for_client from tests import unittest @@ -31,7 +36,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["retention"] = { "enabled": True, @@ -47,7 +52,7 @@ def make_homeserver(self, reactor, clock): return self.hs - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("user", "password") self.token = self.login("user", "password") @@ -55,7 +60,7 @@ def prepare(self, reactor, clock, homeserver): self.serializer = self.hs.get_event_client_serializer() self.clock = self.hs.get_clock() - def test_retention_event_purged_with_state_event(self): + def test_retention_event_purged_with_state_event(self) -> None: """Tests that expired events are correctly purged when the room's retention policy is defined by a state event. """ @@ -72,7 +77,7 @@ def test_retention_event_purged_with_state_event(self): self._test_retention_event_purged(room_id, one_day_ms * 1.5) - def test_retention_event_purged_with_state_event_outside_allowed(self): + def test_retention_event_purged_with_state_event_outside_allowed(self) -> None: """Tests that the server configuration can override the policy for a room when running the purge jobs. """ @@ -102,7 +107,7 @@ def test_retention_event_purged_with_state_event_outside_allowed(self): # instead of the one specified in the room's policy. self._test_retention_event_purged(room_id, one_day_ms * 0.5) - def test_retention_event_purged_without_state_event(self): + def test_retention_event_purged_without_state_event(self) -> None: """Tests that expired events are correctly purged when the room's retention policy is defined by the server's configuration's default retention policy. """ @@ -110,7 +115,7 @@ def test_retention_event_purged_without_state_event(self): self._test_retention_event_purged(room_id, one_day_ms * 2) - def test_visibility(self): + def test_visibility(self) -> None: """Tests that synapse.visibility.filter_events_for_client correctly filters out outdated events """ @@ -152,7 +157,7 @@ def test_visibility(self): # That event should be the second, not outdated event. self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events) - def _test_retention_event_purged(self, room_id: str, increment: float): + def _test_retention_event_purged(self, room_id: str, increment: float) -> None: """Run the following test scenario to test the message retention policy support: 1. Send event 1 @@ -186,6 +191,7 @@ def _test_retention_event_purged(self, room_id: str, increment: float): resp = self.helper.send(room_id=room_id, body="1", tok=self.token) expired_event_id = resp.get("event_id") + assert expired_event_id is not None # Check that we can retrieve the event. expired_event = self.get_event(expired_event_id) @@ -201,6 +207,7 @@ def _test_retention_event_purged(self, room_id: str, increment: float): resp = self.helper.send(room_id=room_id, body="2", tok=self.token) valid_event_id = resp.get("event_id") + assert valid_event_id is not None # Advance the time again. Now our first event should have expired but our second # one should still be kept. @@ -218,7 +225,7 @@ def _test_retention_event_purged(self, room_id: str, increment: float): # has been purged. self.get_event(room_id, create_event.event_id) - def get_event(self, event_id, expect_none=False): + def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict: event = self.get_success(self.store.get_event(event_id, allow_none=True)) if expect_none: @@ -240,7 +247,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["retention"] = { "enabled": True, @@ -254,11 +261,11 @@ def make_homeserver(self, reactor, clock): ) return self.hs - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("user", "password") self.token = self.login("user", "password") - def test_no_default_policy(self): + def test_no_default_policy(self) -> None: """Tests that an event doesn't get expired if there is neither a default retention policy nor a policy specific to the room. """ @@ -266,7 +273,7 @@ def test_no_default_policy(self): self._test_retention(room_id) - def test_state_policy(self): + def test_state_policy(self) -> None: """Tests that an event gets correctly expired if there is no default retention policy but there's a policy specific to the room. """ @@ -283,12 +290,15 @@ def test_state_policy(self): self._test_retention(room_id, expected_code_for_first_event=404) - def _test_retention(self, room_id, expected_code_for_first_event=200): + def _test_retention( + self, room_id: str, expected_code_for_first_event: int = 200 + ) -> None: # Send a first event to the room. This is the event we'll want to be purged at the # end of the test. resp = self.helper.send(room_id=room_id, body="1", tok=self.token) first_event_id = resp.get("event_id") + assert first_event_id is not None # Check that we can retrieve the event. expired_event = self.get_event(room_id, first_event_id) @@ -304,6 +314,7 @@ def _test_retention(self, room_id, expected_code_for_first_event=200): resp = self.helper.send(room_id=room_id, body="2", tok=self.token) second_event_id = resp.get("event_id") + assert second_event_id is not None # Advance the time by another month. self.reactor.advance(one_day_ms * 30 / 1000) @@ -322,7 +333,9 @@ def _test_retention(self, room_id, expected_code_for_first_event=200): second_event = self.get_event(room_id, second_event_id) self.assertEqual(second_event.get("content", {}).get("body"), "2", second_event) - def get_event(self, room_id, event_id, expected_code=200): + def get_event( + self, room_id: str, event_id: str, expected_code: int = 200 + ) -> JsonDict: url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) channel = self.make_request("GET", url, access_token=self.token) diff --git a/tests/rest/client/test_sendtodevice.py b/tests/rest/client/test_sendtodevice.py index e2ed14457fc0..c3942889e113 100644 --- a/tests/rest/client/test_sendtodevice.py +++ b/tests/rest/client/test_sendtodevice.py @@ -26,7 +26,7 @@ class SendToDeviceTestCase(HomeserverTestCase): sync.register_servlets, ] - def test_user_to_user(self): + def test_user_to_user(self) -> None: """A to-device message from one user to another should get delivered""" user1 = self.register_user("u1", "pass") @@ -73,7 +73,7 @@ def test_user_to_user(self): self.assertEqual(channel.json_body.get("to_device", {}).get("events", []), []) @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) - def test_local_room_key_request(self): + def test_local_room_key_request(self) -> None: """m.room_key_request has special-casing; test from local user""" user1 = self.register_user("u1", "pass") user1_tok = self.login("u1", "pass", "d1") @@ -128,7 +128,7 @@ def test_local_room_key_request(self): ) @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) - def test_remote_room_key_request(self): + def test_remote_room_key_request(self) -> None: """m.room_key_request has special-casing; test from remote user""" user2 = self.register_user("u2", "pass") user2_tok = self.login("u2", "pass", "d2") @@ -199,7 +199,7 @@ def test_remote_room_key_request(self): }, ) - def test_limited_sync(self): + def test_limited_sync(self) -> None: """If a limited sync for to-devices happens the next /sync should respond immediately.""" self.register_user("u1", "pass") diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index 2634c98dde40..ae5ada3be7ef 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -14,6 +14,8 @@ from unittest.mock import Mock, patch +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import ( @@ -23,13 +25,15 @@ room, room_upgrade_rest_servlet, ) +from synapse.server import HomeServer from synapse.types import UserID +from synapse.util import Clock from tests import unittest class _ShadowBannedBase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Create two users, one of which is shadow-banned. self.banned_user_id = self.register_user("banned", "test") self.banned_access_token = self.login("banned", "test") @@ -55,7 +59,7 @@ class RoomTestCase(_ShadowBannedBase): room_upgrade_rest_servlet.register_servlets, ] - def test_invite(self): + def test_invite(self) -> None: """Invites from shadow-banned users don't actually get sent.""" # The create works fine. @@ -77,7 +81,7 @@ def test_invite(self): ) self.assertEqual(invited_rooms, []) - def test_invite_3pid(self): + def test_invite_3pid(self) -> None: """Ensure that a 3PID invite does not attempt to contact the identity server.""" identity_handler = self.hs.get_identity_handler() identity_handler.lookup_3pid = Mock( @@ -101,7 +105,7 @@ def test_invite_3pid(self): # This should have raised an error earlier, but double check this wasn't called. identity_handler.lookup_3pid.assert_not_called() - def test_create_room(self): + def test_create_room(self) -> None: """Invitations during a room creation should be discarded, but the room still gets created.""" # The room creation is successful. channel = self.make_request( @@ -126,7 +130,7 @@ def test_create_room(self): users = self.get_success(self.store.get_users_in_room(room_id)) self.assertCountEqual(users, ["@banned:test", "@otheruser:test"]) - def test_message(self): + def test_message(self) -> None: """Messages from shadow-banned users don't actually get sent.""" room_id = self.helper.create_room_as( @@ -151,7 +155,7 @@ def test_message(self): ) self.assertNotIn(event_id, latest_events) - def test_upgrade(self): + def test_upgrade(self) -> None: """A room upgrade should fail, but look like it succeeded.""" # The create works fine. @@ -177,7 +181,7 @@ def test_upgrade(self): # The summary should be empty since the room doesn't exist. self.assertEqual(summary, {}) - def test_typing(self): + def test_typing(self) -> None: """Typing notifications should not be propagated into the room.""" # The create works fine. room_id = self.helper.create_room_as( @@ -240,7 +244,7 @@ class ProfileTestCase(_ShadowBannedBase): room.register_servlets, ] - def test_displayname(self): + def test_displayname(self) -> None: """Profile changes should succeed, but don't end up in a room.""" original_display_name = "banned" new_display_name = "new name" @@ -281,7 +285,7 @@ def test_displayname(self): event.content, {"membership": "join", "displayname": original_display_name} ) - def test_room_displayname(self): + def test_room_displayname(self) -> None: """Changes to state events for a room should be processed, but not end up in the room.""" original_display_name = "banned" new_display_name = "new name" diff --git a/tests/rest/client/test_shared_rooms.py b/tests/rest/client/test_shared_rooms.py index 294f46fb95b9..3818b7b14bf3 100644 --- a/tests/rest/client/test_shared_rooms.py +++ b/tests/rest/client/test_shared_rooms.py @@ -11,8 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.rest.client import login, room, shared_rooms +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeChannel @@ -30,16 +34,16 @@ class UserSharedRoomsTest(unittest.HomeserverTestCase): shared_rooms.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["update_user_directory"] = True return self.setup_test_homeserver(config=config) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_user_directory_handler() - def _get_shared_rooms(self, token, other_user) -> FakeChannel: + def _get_shared_rooms(self, token: str, other_user: str) -> FakeChannel: return self.make_request( "GET", "/_matrix/client/unstable/uk.half-shot.msc2666/user/shared_rooms/%s" @@ -47,14 +51,14 @@ def _get_shared_rooms(self, token, other_user) -> FakeChannel: access_token=token, ) - def test_shared_room_list_public(self): + def test_shared_room_list_public(self) -> None: """ A room should show up in the shared list of rooms between two users if it is public. """ self._check_shared_rooms_with(room_one_is_public=True, room_two_is_public=True) - def test_shared_room_list_private(self): + def test_shared_room_list_private(self) -> None: """ A room should show up in the shared list of rooms between two users if it is private. @@ -63,7 +67,7 @@ def test_shared_room_list_private(self): room_one_is_public=False, room_two_is_public=False ) - def test_shared_room_list_mixed(self): + def test_shared_room_list_mixed(self) -> None: """ The shared room list between two users should contain both public and private rooms. @@ -72,7 +76,7 @@ def test_shared_room_list_mixed(self): def _check_shared_rooms_with( self, room_one_is_public: bool, room_two_is_public: bool - ): + ) -> None: """Checks that shared public or private rooms between two users appear in their shared room lists """ @@ -109,7 +113,7 @@ def _check_shared_rooms_with( for room_id_id in channel.json_body["joined"]: self.assertIn(room_id_id, [room_id_one, room_id_two]) - def test_shared_room_list_after_leave(self): + def test_shared_room_list_after_leave(self) -> None: """ A room should no longer be considered shared if the other user has left it. diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index 658c21b2a19f..b7d0f42daf5f 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -13,11 +13,14 @@ # limitations under the License. from typing import Optional +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventContentFields, EventTypes, RoomTypes from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.rest import admin from synapse.rest.client import login, room, room_upgrade_rest_servlet from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeChannel @@ -31,7 +34,7 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): room_upgrade_rest_servlet.register_servlets, ] - def prepare(self, reactor, clock, hs: "HomeServer"): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.creator = self.register_user("creator", "pass") @@ -60,7 +63,7 @@ def _upgrade_room( access_token=token or self.creator_token, ) - def test_upgrade(self): + def test_upgrade(self) -> None: """ Upgrading a room should work fine. """ @@ -68,7 +71,7 @@ def test_upgrade(self): self.assertEqual(200, channel.code, channel.result) self.assertIn("replacement_room", channel.json_body) - def test_not_in_room(self): + def test_not_in_room(self) -> None: """ Upgrading a room should work fine. """ @@ -79,7 +82,7 @@ def test_not_in_room(self): channel = self._upgrade_room(roomless_token) self.assertEqual(403, channel.code, channel.result) - def test_power_levels(self): + def test_power_levels(self) -> None: """ Another user can upgrade the room if their power level is increased. """ @@ -105,7 +108,7 @@ def test_power_levels(self): channel = self._upgrade_room(self.other_token) self.assertEqual(200, channel.code, channel.result) - def test_power_levels_user_default(self): + def test_power_levels_user_default(self) -> None: """ Another user can upgrade the room if the default power level for users is increased. """ @@ -131,7 +134,7 @@ def test_power_levels_user_default(self): channel = self._upgrade_room(self.other_token) self.assertEqual(200, channel.code, channel.result) - def test_power_levels_tombstone(self): + def test_power_levels_tombstone(self) -> None: """ Another user can upgrade the room if they can send the tombstone event. """ @@ -164,7 +167,7 @@ def test_power_levels_tombstone(self): ) self.assertNotIn(self.other, power_levels["users"]) - def test_space(self): + def test_space(self) -> None: """Test upgrading a space.""" # Create a space. diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 46cd5f70a8ed..28663826fcbc 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -41,6 +41,7 @@ from twisted.web.server import Site from synapse.api.constants import Membership +from synapse.server import HomeServer from synapse.types import JsonDict from tests.server import FakeChannel, FakeSite, make_request @@ -48,15 +49,15 @@ from tests.test_utils.html_parsers import TestHtmlParser -@attr.s +@attr.s(auto_attribs=True) class RestHelper: """Contains extra helper functions to quickly and clearly perform a given REST action, which isn't the focus of the test. """ - hs = attr.ib() - site = attr.ib(type=Site) - auth_user_id = attr.ib() + hs: HomeServer + site: Site + auth_user_id: Optional[str] @overload def create_room_as( @@ -145,7 +146,7 @@ def create_room_as( def invite( self, - room: Optional[str] = None, + room: str, src: Optional[str] = None, targ: Optional[str] = None, expect_code: int = HTTPStatus.OK, @@ -216,7 +217,7 @@ def knock( def leave( self, - room: Optional[str] = None, + room: str, user: Optional[str] = None, expect_code: int = HTTPStatus.OK, tok: Optional[str] = None, @@ -230,14 +231,22 @@ def leave( expect_code=expect_code, ) - def ban(self, room: str, src: str, targ: str, **kwargs: object) -> None: + def ban( + self, + room: str, + src: str, + targ: str, + expect_code: int = HTTPStatus.OK, + tok: Optional[str] = None, + ) -> None: """A convenience helper: `change_membership` with `membership` preset to "ban".""" self.change_membership( room=room, src=src, targ=targ, + tok=tok, membership=Membership.BAN, - **kwargs, + expect_code=expect_code, ) def change_membership( @@ -378,7 +387,7 @@ def _read_write_state( room_id: str, event_type: str, body: Optional[Dict[str, Any]], - tok: str, + tok: Optional[str], expect_code: int = HTTPStatus.OK, state_key: str = "", method: str = "GET", @@ -458,7 +467,7 @@ def send_state( room_id: str, event_type: str, body: Dict[str, Any], - tok: str, + tok: Optional[str], expect_code: int = HTTPStatus.OK, state_key: str = "", ) -> JsonDict: @@ -658,7 +667,12 @@ def complete_oidc_auth( (TEST_OIDC_USERINFO_ENDPOINT, user_info_dict), ] - async def mock_req(method: str, uri: str, data=None, headers=None): + async def mock_req( + method: str, + uri: str, + data: Optional[dict] = None, + headers: Optional[Iterable[Tuple[AnyStr, AnyStr]]] = None, + ): (expected_uri, resp_obj) = expected_requests.pop(0) assert uri == expected_uri resp = FakeResponse( From 1866fb39d7ffc86d7374a9aed916f70a91ec65fa Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Feb 2022 13:29:09 -0500 Subject: [PATCH 309/474] Move experimental support for MSC3440 to /versions. (#12099) Instead of being part of /capabilities, this matches a change to MSC3440 to properly use these endpoints. --- changelog.d/12099.misc | 1 + synapse/rest/client/capabilities.py | 3 --- synapse/rest/client/versions.py | 2 ++ 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12099.misc diff --git a/changelog.d/12099.misc b/changelog.d/12099.misc new file mode 100644 index 000000000000..0553825dbce4 --- /dev/null +++ b/changelog.d/12099.misc @@ -0,0 +1 @@ +Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py index b80fdd3712fa..4237071c61bd 100644 --- a/synapse/rest/client/capabilities.py +++ b/synapse/rest/client/capabilities.py @@ -72,9 +72,6 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "org.matrix.msc3244.room_capabilities" ] = MSC3244_CAPABILITIES - if self.config.experimental.msc3440_enabled: - response["capabilities"]["io.element.thread"] = {"enabled": True} - if self.config.experimental.msc3720_enabled: response["capabilities"]["org.matrix.msc3720.account_status"] = { "enabled": True, diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 00f29344a8a2..2e5d0e4e2258 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -99,6 +99,8 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "org.matrix.msc2716": self.config.experimental.msc2716_enabled, # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 "org.matrix.msc3030": self.config.experimental.msc3030_enabled, + # Adds support for thread relations, per MSC3440. + "org.matrix.msc3440": self.config.experimental.msc3440_enabled, }, }, ) From 7754af24ab163a3666bc04c7df409e59ace0d763 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 28 Feb 2022 13:33:00 -0500 Subject: [PATCH 310/474] Remove the unstable `/spaces` endpoint. (#12073) ...and various code supporting it. The /spaces endpoint was from an old version of MSC2946 and included both a Client-Server and Server-Server API. Note that the unstable /hierarchy endpoint (from the final version of MSC2946) is not yet removed. --- changelog.d/12073.removal | 1 + docs/workers.md | 2 - synapse/federation/federation_client.py | 226 ++---------- synapse/federation/transport/client.py | 33 -- .../federation/transport/server/federation.py | 76 ----- synapse/handlers/room_summary.py | 323 +----------------- synapse/rest/client/room.py | 68 ---- tests/handlers/test_room_summary.py | 119 +------ 8 files changed, 46 insertions(+), 802 deletions(-) create mode 100644 changelog.d/12073.removal diff --git a/changelog.d/12073.removal b/changelog.d/12073.removal new file mode 100644 index 000000000000..1f3979271270 --- /dev/null +++ b/changelog.d/12073.removal @@ -0,0 +1 @@ +Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). diff --git a/docs/workers.md b/docs/workers.md index b82a6900acf4..b0f8599ef062 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -212,7 +212,6 @@ information. ^/_matrix/federation/v1/user/devices/ ^/_matrix/federation/v1/get_groups_publicised$ ^/_matrix/key/v2/query - ^/_matrix/federation/unstable/org.matrix.msc2946/spaces/ ^/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/ # Inbound federation transaction request @@ -225,7 +224,6 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$ - ^/_matrix/client/unstable/org.matrix.msc2946/rooms/.*/spaces$ ^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$ ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$ ^/_matrix/client/(r0|v3|unstable)/account/3pid$ diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index a4bae3c4c85f..64e595e748f4 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1362,61 +1362,6 @@ async def get_room_complexity( # server doesn't give it to us. return None - async def get_space_summary( - self, - destinations: Iterable[str], - room_id: str, - suggested_only: bool, - max_rooms_per_space: Optional[int], - exclude_rooms: List[str], - ) -> "FederationSpaceSummaryResult": - """ - Call other servers to get a summary of the given space - - - Args: - destinations: The remote servers. We will try them in turn, omitting any - that have been blacklisted. - - room_id: ID of the space to be queried - - suggested_only: If true, ask the remote server to only return children - with the "suggested" flag set - - max_rooms_per_space: A limit on the number of children to return for each - space - - exclude_rooms: A list of room IDs to tell the remote server to skip - - Returns: - a parsed FederationSpaceSummaryResult - - Raises: - SynapseError if we were unable to get a valid summary from any of the - remote servers - """ - - async def send_request(destination: str) -> FederationSpaceSummaryResult: - res = await self.transport_layer.get_space_summary( - destination=destination, - room_id=room_id, - suggested_only=suggested_only, - max_rooms_per_space=max_rooms_per_space, - exclude_rooms=exclude_rooms, - ) - - try: - return FederationSpaceSummaryResult.from_json_dict(res) - except ValueError as e: - raise InvalidResponseError(str(e)) - - return await self._try_destination_list( - "fetch space summary", - destinations, - send_request, - failover_on_unknown_endpoint=True, - ) - async def get_room_hierarchy( self, destinations: Iterable[str], @@ -1488,10 +1433,8 @@ async def send_request( if any(not isinstance(e, dict) for e in children_state): raise InvalidResponseError("Invalid event in 'children_state' list") try: - [ - FederationSpaceSummaryEventResult.from_json_dict(e) - for e in children_state - ] + for child_state in children_state: + _validate_hierarchy_event(child_state) except ValueError as e: raise InvalidResponseError(str(e)) @@ -1513,62 +1456,12 @@ async def send_request( return room, children_state, children, inaccessible_children - try: - result = await self._try_destination_list( - "fetch room hierarchy", - destinations, - send_request, - failover_on_unknown_endpoint=True, - ) - except SynapseError as e: - # If an unexpected error occurred, re-raise it. - if e.code != 502: - raise - - logger.debug( - "Couldn't fetch room hierarchy, falling back to the spaces API" - ) - - # Fallback to the old federation API and translate the results if - # no servers implement the new API. - # - # The algorithm below is a bit inefficient as it only attempts to - # parse information for the requested room, but the legacy API may - # return additional layers. - legacy_result = await self.get_space_summary( - destinations, - room_id, - suggested_only, - max_rooms_per_space=None, - exclude_rooms=[], - ) - - # Find the requested room in the response (and remove it). - for _i, room in enumerate(legacy_result.rooms): - if room.get("room_id") == room_id: - break - else: - # The requested room was not returned, nothing we can do. - raise - requested_room = legacy_result.rooms.pop(_i) - - # Find any children events of the requested room. - children_events = [] - children_room_ids = set() - for event in legacy_result.events: - if event.room_id == room_id: - children_events.append(event.data) - children_room_ids.add(event.state_key) - - # Find the children rooms. - children = [] - for room in legacy_result.rooms: - if room.get("room_id") in children_room_ids: - children.append(room) - - # It isn't clear from the response whether some of the rooms are - # not accessible. - result = (requested_room, children_events, children, ()) + result = await self._try_destination_list( + "fetch room hierarchy", + destinations, + send_request, + failover_on_unknown_endpoint=True, + ) # Cache the result to avoid fetching data over federation every time. self._get_room_hierarchy_cache[(room_id, suggested_only)] = result @@ -1710,89 +1603,34 @@ def from_json_dict(cls, d: JsonDict) -> "TimestampToEventResponse": return cls(event_id, origin_server_ts, d) -@attr.s(frozen=True, slots=True, auto_attribs=True) -class FederationSpaceSummaryEventResult: - """Represents a single event in the result of a successful get_space_summary call. - - It's essentially just a serialised event object, but we do a bit of parsing and - validation in `from_json_dict` and store some of the validated properties in - object attributes. - """ - - event_type: str - room_id: str - state_key: str - via: Sequence[str] - - # the raw data, including the above keys - data: JsonDict - - @classmethod - def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryEventResult": - """Parse an event within the result of a /spaces/ request - - Args: - d: json object to be parsed - - Raises: - ValueError if d is not a valid event - """ - - event_type = d.get("type") - if not isinstance(event_type, str): - raise ValueError("Invalid event: 'event_type' must be a str") - - room_id = d.get("room_id") - if not isinstance(room_id, str): - raise ValueError("Invalid event: 'room_id' must be a str") - - state_key = d.get("state_key") - if not isinstance(state_key, str): - raise ValueError("Invalid event: 'state_key' must be a str") +def _validate_hierarchy_event(d: JsonDict) -> None: + """Validate an event within the result of a /hierarchy request - content = d.get("content") - if not isinstance(content, dict): - raise ValueError("Invalid event: 'content' must be a dict") + Args: + d: json object to be parsed - via = content.get("via") - if not isinstance(via, Sequence): - raise ValueError("Invalid event: 'via' must be a list") - if any(not isinstance(v, str) for v in via): - raise ValueError("Invalid event: 'via' must be a list of strings") - - return cls(event_type, room_id, state_key, via, d) - - -@attr.s(frozen=True, slots=True, auto_attribs=True) -class FederationSpaceSummaryResult: - """Represents the data returned by a successful get_space_summary call.""" + Raises: + ValueError if d is not a valid event + """ - rooms: List[JsonDict] - events: Sequence[FederationSpaceSummaryEventResult] + event_type = d.get("type") + if not isinstance(event_type, str): + raise ValueError("Invalid event: 'event_type' must be a str") - @classmethod - def from_json_dict(cls, d: JsonDict) -> "FederationSpaceSummaryResult": - """Parse the result of a /spaces/ request + room_id = d.get("room_id") + if not isinstance(room_id, str): + raise ValueError("Invalid event: 'room_id' must be a str") - Args: - d: json object to be parsed + state_key = d.get("state_key") + if not isinstance(state_key, str): + raise ValueError("Invalid event: 'state_key' must be a str") - Raises: - ValueError if d is not a valid /spaces/ response - """ - rooms = d.get("rooms") - if not isinstance(rooms, List): - raise ValueError("'rooms' must be a list") - if any(not isinstance(r, dict) for r in rooms): - raise ValueError("Invalid room in 'rooms' list") - - events = d.get("events") - if not isinstance(events, Sequence): - raise ValueError("'events' must be a list") - if any(not isinstance(e, dict) for e in events): - raise ValueError("Invalid event in 'events' list") - parsed_events = [ - FederationSpaceSummaryEventResult.from_json_dict(e) for e in events - ] + content = d.get("content") + if not isinstance(content, dict): + raise ValueError("Invalid event: 'content' must be a dict") - return cls(rooms, parsed_events) + via = content.get("via") + if not isinstance(via, Sequence): + raise ValueError("Invalid event: 'via' must be a list") + if any(not isinstance(v, str) for v in via): + raise ValueError("Invalid event: 'via' must be a list of strings") diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 69998de52068..de6e5f44fe85 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1179,39 +1179,6 @@ async def get_room_complexity(self, destination: str, room_id: str) -> JsonDict: return await self.client.get_json(destination=destination, path=path) - async def get_space_summary( - self, - destination: str, - room_id: str, - suggested_only: bool, - max_rooms_per_space: Optional[int], - exclude_rooms: List[str], - ) -> JsonDict: - """ - Args: - destination: The remote server - room_id: The room ID to ask about. - suggested_only: if True, only suggested rooms will be returned - max_rooms_per_space: an optional limit to the number of children to be - returned per space - exclude_rooms: a list of any rooms we can skip - """ - # TODO When switching to the stable endpoint, use GET instead of POST. - path = _create_path( - FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id - ) - - params = { - "suggested_only": suggested_only, - "exclude_rooms": exclude_rooms, - } - if max_rooms_per_space is not None: - params["max_rooms_per_space"] = max_rooms_per_space - - return await self.client.post_json( - destination=destination, path=path, data=params - ) - async def get_room_hierarchy( self, destination: str, room_id: str, suggested_only: bool ) -> JsonDict: diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 23ce34305705..aed3d5069ca1 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -624,81 +624,6 @@ async def on_GET( ) -class FederationSpaceSummaryServlet(BaseFederationServlet): - PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946" - PATH = "/spaces/(?P[^/]*)" - - def __init__( - self, - hs: "HomeServer", - authenticator: Authenticator, - ratelimiter: FederationRateLimiter, - server_name: str, - ): - super().__init__(hs, authenticator, ratelimiter, server_name) - self.handler = hs.get_room_summary_handler() - - async def on_GET( - self, - origin: str, - content: Literal[None], - query: Mapping[bytes, Sequence[bytes]], - room_id: str, - ) -> Tuple[int, JsonDict]: - suggested_only = parse_boolean_from_args(query, "suggested_only", default=False) - - max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space") - if max_rooms_per_space is not None and max_rooms_per_space < 0: - raise SynapseError( - 400, - "Value for 'max_rooms_per_space' must be a non-negative integer", - Codes.BAD_JSON, - ) - - exclude_rooms = parse_strings_from_args(query, "exclude_rooms", default=[]) - - return 200, await self.handler.federation_space_summary( - origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms - ) - - # TODO When switching to the stable endpoint, remove the POST handler. - async def on_POST( - self, - origin: str, - content: JsonDict, - query: Mapping[bytes, Sequence[bytes]], - room_id: str, - ) -> Tuple[int, JsonDict]: - suggested_only = content.get("suggested_only", False) - if not isinstance(suggested_only, bool): - raise SynapseError( - 400, "'suggested_only' must be a boolean", Codes.BAD_JSON - ) - - exclude_rooms = content.get("exclude_rooms", []) - if not isinstance(exclude_rooms, list) or any( - not isinstance(x, str) for x in exclude_rooms - ): - raise SynapseError(400, "bad value for 'exclude_rooms'", Codes.BAD_JSON) - - max_rooms_per_space = content.get("max_rooms_per_space") - if max_rooms_per_space is not None: - if not isinstance(max_rooms_per_space, int): - raise SynapseError( - 400, "bad value for 'max_rooms_per_space'", Codes.BAD_JSON - ) - if max_rooms_per_space < 0: - raise SynapseError( - 400, - "Value for 'max_rooms_per_space' must be a non-negative integer", - Codes.BAD_JSON, - ) - - return 200, await self.handler.federation_space_summary( - origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms - ) - - class FederationRoomHierarchyServlet(BaseFederationServlet): PATH = "/hierarchy/(?P[^/]*)" @@ -826,7 +751,6 @@ async def on_POST( On3pidBindServlet, FederationVersionServlet, RoomComplexityServlet, - FederationSpaceSummaryServlet, FederationRoomHierarchyServlet, FederationRoomHierarchyUnstableServlet, FederationV1SendKnockServlet, diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 2e61d1cbe92d..55c2cbdba8bc 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -15,7 +15,6 @@ import itertools import logging import re -from collections import deque from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Set, Tuple import attr @@ -107,153 +106,6 @@ def __init__(self, hs: "HomeServer"): "get_room_hierarchy", ) - async def get_space_summary( - self, - requester: str, - room_id: str, - suggested_only: bool = False, - max_rooms_per_space: Optional[int] = None, - ) -> JsonDict: - """ - Implementation of the space summary C-S API - - Args: - requester: user id of the user making this request - - room_id: room id to start the summary at - - suggested_only: whether we should only return children with the "suggested" - flag set. - - max_rooms_per_space: an optional limit on the number of child rooms we will - return. This does not apply to the root room (ie, room_id), and - is overridden by MAX_ROOMS_PER_SPACE. - - Returns: - summary dict to return - """ - # First of all, check that the room is accessible. - if not await self._is_local_room_accessible(room_id, requester): - raise AuthError( - 403, - "User %s not in room %s, and room previews are disabled" - % (requester, room_id), - ) - - # the queue of rooms to process - room_queue = deque((_RoomQueueEntry(room_id, ()),)) - - # rooms we have already processed - processed_rooms: Set[str] = set() - - # events we have already processed. We don't necessarily have their event ids, - # so instead we key on (room id, state key) - processed_events: Set[Tuple[str, str]] = set() - - rooms_result: List[JsonDict] = [] - events_result: List[JsonDict] = [] - - if max_rooms_per_space is None or max_rooms_per_space > MAX_ROOMS_PER_SPACE: - max_rooms_per_space = MAX_ROOMS_PER_SPACE - - while room_queue and len(rooms_result) < MAX_ROOMS: - queue_entry = room_queue.popleft() - room_id = queue_entry.room_id - if room_id in processed_rooms: - # already done this room - continue - - logger.debug("Processing room %s", room_id) - - is_in_room = await self._store.is_host_joined(room_id, self._server_name) - - # The client-specified max_rooms_per_space limit doesn't apply to the - # room_id specified in the request, so we ignore it if this is the - # first room we are processing. - max_children = max_rooms_per_space if processed_rooms else MAX_ROOMS - - if is_in_room: - room_entry = await self._summarize_local_room( - requester, None, room_id, suggested_only, max_children - ) - - events: Sequence[JsonDict] = [] - if room_entry: - rooms_result.append(room_entry.room) - events = room_entry.children_state_events - - logger.debug( - "Query of local room %s returned events %s", - room_id, - ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events], - ) - else: - fed_rooms = await self._summarize_remote_room( - queue_entry, - suggested_only, - max_children, - exclude_rooms=processed_rooms, - ) - - # The results over federation might include rooms that the we, - # as the requesting server, are allowed to see, but the requesting - # user is not permitted see. - # - # Filter the returned results to only what is accessible to the user. - events = [] - for room_entry in fed_rooms: - room = room_entry.room - fed_room_id = room_entry.room_id - - # The user can see the room, include it! - if await self._is_remote_room_accessible( - requester, fed_room_id, room - ): - # Before returning to the client, remove the allowed_room_ids - # and allowed_spaces keys. - room.pop("allowed_room_ids", None) - room.pop("allowed_spaces", None) # historical - - rooms_result.append(room) - events.extend(room_entry.children_state_events) - - # All rooms returned don't need visiting again (even if the user - # didn't have access to them). - processed_rooms.add(fed_room_id) - - logger.debug( - "Query of %s returned rooms %s, events %s", - room_id, - [room_entry.room.get("room_id") for room_entry in fed_rooms], - ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events], - ) - - # the room we queried may or may not have been returned, but don't process - # it again, anyway. - processed_rooms.add(room_id) - - # XXX: is it ok that we blindly iterate through any events returned by - # a remote server, whether or not they actually link to any rooms in our - # tree? - for ev in events: - # remote servers might return events we have already processed - # (eg, Dendrite returns inward pointers as well as outward ones), so - # we need to filter them out, to avoid returning duplicate links to the - # client. - ev_key = (ev["room_id"], ev["state_key"]) - if ev_key in processed_events: - continue - events_result.append(ev) - - # add the child to the queue. we have already validated - # that the vias are a list of server names. - room_queue.append( - _RoomQueueEntry(ev["state_key"], ev["content"]["via"]) - ) - processed_events.add(ev_key) - - return {"rooms": rooms_result, "events": events_result} - async def get_room_hierarchy( self, requester: Requester, @@ -398,8 +250,6 @@ async def _get_room_hierarchy( None, room_id, suggested_only, - # Do not limit the maximum children. - max_children=None, ) # Otherwise, attempt to use information for federation. @@ -488,74 +338,6 @@ async def _get_room_hierarchy( return result - async def federation_space_summary( - self, - origin: str, - room_id: str, - suggested_only: bool, - max_rooms_per_space: Optional[int], - exclude_rooms: Iterable[str], - ) -> JsonDict: - """ - Implementation of the space summary Federation API - - Args: - origin: The server requesting the spaces summary. - - room_id: room id to start the summary at - - suggested_only: whether we should only return children with the "suggested" - flag set. - - max_rooms_per_space: an optional limit on the number of child rooms we will - return. Unlike the C-S API, this applies to the root room (room_id). - It is clipped to MAX_ROOMS_PER_SPACE. - - exclude_rooms: a list of rooms to skip over (presumably because the - calling server has already seen them). - - Returns: - summary dict to return - """ - # the queue of rooms to process - room_queue = deque((room_id,)) - - # the set of rooms that we should not walk further. Initialise it with the - # excluded-rooms list; we will add other rooms as we process them so that - # we do not loop. - processed_rooms: Set[str] = set(exclude_rooms) - - rooms_result: List[JsonDict] = [] - events_result: List[JsonDict] = [] - - # Set a limit on the number of rooms to return. - if max_rooms_per_space is None or max_rooms_per_space > MAX_ROOMS_PER_SPACE: - max_rooms_per_space = MAX_ROOMS_PER_SPACE - - while room_queue and len(rooms_result) < MAX_ROOMS: - room_id = room_queue.popleft() - if room_id in processed_rooms: - # already done this room - continue - - room_entry = await self._summarize_local_room( - None, origin, room_id, suggested_only, max_rooms_per_space - ) - - processed_rooms.add(room_id) - - if room_entry: - rooms_result.append(room_entry.room) - events_result.extend(room_entry.children_state_events) - - # add any children to the queue - room_queue.extend( - edge_event["state_key"] - for edge_event in room_entry.children_state_events - ) - - return {"rooms": rooms_result, "events": events_result} - async def get_federation_hierarchy( self, origin: str, @@ -579,7 +361,7 @@ async def get_federation_hierarchy( The JSON hierarchy dictionary. """ root_room_entry = await self._summarize_local_room( - None, origin, requested_room_id, suggested_only, max_children=None + None, origin, requested_room_id, suggested_only ) if root_room_entry is None: # Room is inaccessible to the requesting server. @@ -600,7 +382,7 @@ async def get_federation_hierarchy( continue room_entry = await self._summarize_local_room( - None, origin, room_id, suggested_only, max_children=0 + None, origin, room_id, suggested_only, include_children=False ) # If the room is accessible, include it in the results. # @@ -626,7 +408,7 @@ async def _summarize_local_room( origin: Optional[str], room_id: str, suggested_only: bool, - max_children: Optional[int], + include_children: bool = True, ) -> Optional["_RoomEntry"]: """ Generate a room entry and a list of event entries for a given room. @@ -641,9 +423,8 @@ async def _summarize_local_room( room_id: The room ID to summarize. suggested_only: True if only suggested children should be returned. Otherwise, all children are returned. - max_children: - The maximum number of children rooms to include. A value of None - means no limit. + include_children: + Whether to include the events of any children. Returns: A room entry if the room should be returned. None, otherwise. @@ -653,9 +434,8 @@ async def _summarize_local_room( room_entry = await self._build_room_entry(room_id, for_federation=bool(origin)) - # If the room is not a space or the children don't matter, return just - # the room information. - if room_entry.get("room_type") != RoomTypes.SPACE or max_children == 0: + # If the room is not a space return just the room information. + if room_entry.get("room_type") != RoomTypes.SPACE or not include_children: return _RoomEntry(room_id, room_entry) # Otherwise, look for child rooms/spaces. @@ -665,14 +445,6 @@ async def _summarize_local_room( # we only care about suggested children child_events = filter(_is_suggested_child_event, child_events) - # TODO max_children is legacy code for the /spaces endpoint. - if max_children is not None: - child_iter: Iterable[EventBase] = itertools.islice( - child_events, max_children - ) - else: - child_iter = child_events - stripped_events: List[JsonDict] = [ { "type": e.type, @@ -682,80 +454,10 @@ async def _summarize_local_room( "sender": e.sender, "origin_server_ts": e.origin_server_ts, } - for e in child_iter + for e in child_events ] return _RoomEntry(room_id, room_entry, stripped_events) - async def _summarize_remote_room( - self, - room: "_RoomQueueEntry", - suggested_only: bool, - max_children: Optional[int], - exclude_rooms: Iterable[str], - ) -> Iterable["_RoomEntry"]: - """ - Request room entries and a list of event entries for a given room by querying a remote server. - - Args: - room: The room to summarize. - suggested_only: True if only suggested children should be returned. - Otherwise, all children are returned. - max_children: - The maximum number of children rooms to include. This is capped - to a server-set limit. - exclude_rooms: - Rooms IDs which do not need to be summarized. - - Returns: - An iterable of room entries. - """ - room_id = room.room_id - logger.info("Requesting summary for %s via %s", room_id, room.via) - - # we need to make the exclusion list json-serialisable - exclude_rooms = list(exclude_rooms) - - via = itertools.islice(room.via, MAX_SERVERS_PER_SPACE) - try: - res = await self._federation_client.get_space_summary( - via, - room_id, - suggested_only=suggested_only, - max_rooms_per_space=max_children, - exclude_rooms=exclude_rooms, - ) - except Exception as e: - logger.warning( - "Unable to get summary of %s via federation: %s", - room_id, - e, - exc_info=logger.isEnabledFor(logging.DEBUG), - ) - return () - - # Group the events by their room. - children_by_room: Dict[str, List[JsonDict]] = {} - for ev in res.events: - if ev.event_type == EventTypes.SpaceChild: - children_by_room.setdefault(ev.room_id, []).append(ev.data) - - # Generate the final results. - results = [] - for fed_room in res.rooms: - fed_room_id = fed_room.get("room_id") - if not fed_room_id or not isinstance(fed_room_id, str): - continue - - results.append( - _RoomEntry( - fed_room_id, - fed_room, - children_by_room.get(fed_room_id, []), - ) - ) - - return results - async def _summarize_remote_room_hierarchy( self, room: "_RoomQueueEntry", suggested_only: bool ) -> Tuple[Optional["_RoomEntry"], Dict[str, JsonDict], Set[str]]: @@ -958,9 +660,8 @@ async def _is_remote_room_accessible( ): return True - # Check if the user is a member of any of the allowed spaces - # from the response. - allowed_rooms = room.get("allowed_room_ids") or room.get("allowed_spaces") + # Check if the user is a member of any of the allowed rooms from the response. + allowed_rooms = room.get("allowed_room_ids") if allowed_rooms and isinstance(allowed_rooms, list): if await self._event_auth_handler.is_user_in_rooms( allowed_rooms, requester @@ -1028,8 +729,6 @@ async def _build_room_entry(self, room_id: str, for_federation: bool) -> JsonDic ) if allowed_rooms: entry["allowed_room_ids"] = allowed_rooms - # TODO Remove this key once the API is stable. - entry["allowed_spaces"] = allowed_rooms # Filter out Nones – rather omit the field altogether room_entry = {k: v for k, v in entry.items() if v is not None} @@ -1094,7 +793,7 @@ async def get_room_summary( room_id, # Suggested-only doesn't matter since no children are requested. suggested_only=False, - max_children=0, + include_children=False, ) if not room_entry: diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 5ccfe5a92f06..8a06ab8c5f05 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -1141,73 +1141,6 @@ async def on_GET( } -class RoomSpaceSummaryRestServlet(RestServlet): - PATTERNS = ( - re.compile( - "^/_matrix/client/unstable/org.matrix.msc2946" - "/rooms/(?P[^/]*)/spaces$" - ), - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - self._auth = hs.get_auth() - self._room_summary_handler = hs.get_room_summary_handler() - - async def on_GET( - self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self._auth.get_user_by_req(request, allow_guest=True) - - max_rooms_per_space = parse_integer(request, "max_rooms_per_space") - if max_rooms_per_space is not None and max_rooms_per_space < 0: - raise SynapseError( - 400, - "Value for 'max_rooms_per_space' must be a non-negative integer", - Codes.BAD_JSON, - ) - - return 200, await self._room_summary_handler.get_space_summary( - requester.user.to_string(), - room_id, - suggested_only=parse_boolean(request, "suggested_only", default=False), - max_rooms_per_space=max_rooms_per_space, - ) - - # TODO When switching to the stable endpoint, remove the POST handler. - async def on_POST( - self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self._auth.get_user_by_req(request, allow_guest=True) - content = parse_json_object_from_request(request) - - suggested_only = content.get("suggested_only", False) - if not isinstance(suggested_only, bool): - raise SynapseError( - 400, "'suggested_only' must be a boolean", Codes.BAD_JSON - ) - - max_rooms_per_space = content.get("max_rooms_per_space") - if max_rooms_per_space is not None: - if not isinstance(max_rooms_per_space, int): - raise SynapseError( - 400, "'max_rooms_per_space' must be an integer", Codes.BAD_JSON - ) - if max_rooms_per_space < 0: - raise SynapseError( - 400, - "Value for 'max_rooms_per_space' must be a non-negative integer", - Codes.BAD_JSON, - ) - - return 200, await self._room_summary_handler.get_space_summary( - requester.user.to_string(), - room_id, - suggested_only=suggested_only, - max_rooms_per_space=max_rooms_per_space, - ) - - class RoomHierarchyRestServlet(RestServlet): PATTERNS = ( re.compile( @@ -1301,7 +1234,6 @@ def register_servlets( RoomRedactEventRestServlet(hs).register(http_server) RoomTypingRestServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) - RoomSpaceSummaryRestServlet(hs).register(http_server) RoomHierarchyRestServlet(hs).register(http_server) if hs.config.experimental.msc3266_enabled: RoomSummaryRestServlet(hs).register(http_server) diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 51b22d299812..b33ff94a3999 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -157,35 +157,6 @@ def _add_child( state_key=room_id, ) - def _assert_rooms( - self, result: JsonDict, rooms_and_children: Iterable[Tuple[str, Iterable[str]]] - ) -> None: - """ - Assert that the expected room IDs and events are in the response. - - Args: - result: The result from the API call. - rooms_and_children: An iterable of tuples where each tuple is: - The expected room ID. - The expected IDs of any children rooms. - """ - room_ids = [] - children_ids = [] - for room_id, children in rooms_and_children: - room_ids.append(room_id) - if children: - children_ids.extend([(room_id, child_id) for child_id in children]) - self.assertCountEqual( - [room.get("room_id") for room in result["rooms"]], room_ids - ) - self.assertCountEqual( - [ - (event.get("room_id"), event.get("state_key")) - for event in result["events"] - ], - children_ids, - ) - def _assert_hierarchy( self, result: JsonDict, rooms_and_children: Iterable[Tuple[str, Iterable[str]]] ) -> None: @@ -251,11 +222,9 @@ def _poke_fed_invite(self, room_id: str, from_user: str) -> None: def test_simple_space(self): """Test a simple space with a single room.""" - result = self.get_success(self.handler.get_space_summary(self.user, self.space)) # The result should have the space and the room in it, along with a link # from space -> room. expected = [(self.space, [self.room]), (self.room, ())] - self._assert_rooms(result, expected) result = self.get_success( self.handler.get_room_hierarchy(create_requester(self.user), self.space) @@ -271,12 +240,6 @@ def test_large_space(self): self._add_child(self.space, room, self.token) rooms.append(room) - result = self.get_success(self.handler.get_space_summary(self.user, self.space)) - # The spaces result should have the space and the first 50 rooms in it, - # along with the links from space -> room for those 50 rooms. - expected = [(self.space, rooms[:50])] + [(room, []) for room in rooms[:49]] - self._assert_rooms(result, expected) - # The result should have the space and the rooms in it, along with the links # from space -> room. expected = [(self.space, rooms)] + [(room, []) for room in rooms] @@ -300,10 +263,7 @@ def test_visibility(self): token2 = self.login("user2", "pass") # The user can see the space since it is publicly joinable. - result = self.get_success(self.handler.get_space_summary(user2, self.space)) expected = [(self.space, [self.room]), (self.room, ())] - self._assert_rooms(result, expected) - result = self.get_success( self.handler.get_room_hierarchy(create_requester(user2), self.space) ) @@ -316,7 +276,6 @@ def test_visibility(self): body={"join_rule": JoinRules.INVITE}, tok=self.token, ) - self.get_failure(self.handler.get_space_summary(user2, self.space), AuthError) self.get_failure( self.handler.get_room_hierarchy(create_requester(user2), self.space), AuthError, @@ -329,9 +288,6 @@ def test_visibility(self): body={"history_visibility": HistoryVisibility.WORLD_READABLE}, tok=self.token, ) - result = self.get_success(self.handler.get_space_summary(user2, self.space)) - self._assert_rooms(result, expected) - result = self.get_success( self.handler.get_room_hierarchy(create_requester(user2), self.space) ) @@ -344,7 +300,6 @@ def test_visibility(self): body={"history_visibility": HistoryVisibility.JOINED}, tok=self.token, ) - self.get_failure(self.handler.get_space_summary(user2, self.space), AuthError) self.get_failure( self.handler.get_room_hierarchy(create_requester(user2), self.space), AuthError, @@ -353,19 +308,12 @@ def test_visibility(self): # Join the space and results should be returned. self.helper.invite(self.space, targ=user2, tok=self.token) self.helper.join(self.space, user2, tok=token2) - result = self.get_success(self.handler.get_space_summary(user2, self.space)) - self._assert_rooms(result, expected) - result = self.get_success( self.handler.get_room_hierarchy(create_requester(user2), self.space) ) self._assert_hierarchy(result, expected) # Attempting to view an unknown room returns the same error. - self.get_failure( - self.handler.get_space_summary(user2, "#not-a-space:" + self.hs.hostname), - AuthError, - ) self.get_failure( self.handler.get_room_hierarchy( create_requester(user2), "#not-a-space:" + self.hs.hostname @@ -496,7 +444,6 @@ def test_filtering(self): # Join the space. self.helper.join(self.space, user2, tok=token2) - result = self.get_success(self.handler.get_space_summary(user2, self.space)) expected = [ ( self.space, @@ -520,7 +467,6 @@ def test_filtering(self): (world_readable_room, ()), (joined_room, ()), ] - self._assert_rooms(result, expected) result = self.get_success( self.handler.get_room_hierarchy(create_requester(user2), self.space) @@ -554,8 +500,6 @@ def test_complex_space(self): self._add_child(subspace, self.room, token=self.token) self._add_child(subspace, room2, self.token) - result = self.get_success(self.handler.get_space_summary(self.user, self.space)) - # The result should include each room a single time and each link. expected = [ (self.space, [self.room, room2, subspace]), @@ -563,7 +507,6 @@ def test_complex_space(self): (subspace, [subroom, self.room, room2]), (subroom, ()), ] - self._assert_rooms(result, expected) result = self.get_success( self.handler.get_room_hierarchy(create_requester(self.user), self.space) @@ -728,10 +671,8 @@ def test_unknown_room_version(self): ) ) - result = self.get_success(self.handler.get_space_summary(self.user, self.space)) # The result should have only the space, along with a link from space -> room. expected = [(self.space, [self.room])] - self._assert_rooms(result, expected) result = self.get_success( self.handler.get_room_hierarchy(create_requester(self.user), self.space) @@ -775,41 +716,18 @@ def test_fed_complex(self): "world_readable": True, } - async def summarize_remote_room( - _self, room, suggested_only, max_children, exclude_rooms - ): - return [ - requested_room_entry, - _RoomEntry( - subroom, - { - "room_id": subroom, - "world_readable": True, - }, - ), - ] - async def summarize_remote_room_hierarchy(_self, room, suggested_only): return requested_room_entry, {subroom: child_room}, set() # Add a room to the space which is on another server. self._add_child(self.space, subspace, self.token) - with mock.patch( - "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room", - new=summarize_remote_room, - ): - result = self.get_success( - self.handler.get_space_summary(self.user, self.space) - ) - expected = [ (self.space, [self.room, subspace]), (self.room, ()), (subspace, [subroom]), (subroom, ()), ] - self._assert_rooms(result, expected) with mock.patch( "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy", @@ -881,7 +799,7 @@ def test_fed_filtering(self): "room_id": restricted_room, "world_readable": False, "join_rules": JoinRules.RESTRICTED, - "allowed_spaces": [], + "allowed_room_ids": [], }, ), ( @@ -890,7 +808,7 @@ def test_fed_filtering(self): "room_id": restricted_accessible_room, "world_readable": False, "join_rules": JoinRules.RESTRICTED, - "allowed_spaces": [self.room], + "allowed_room_ids": [self.room], }, ), ( @@ -929,30 +847,12 @@ def test_fed_filtering(self): ], ) - async def summarize_remote_room( - _self, room, suggested_only, max_children, exclude_rooms - ): - return [subspace_room_entry] + [ - # A copy is made of the room data since the allowed_spaces key - # is removed. - _RoomEntry(child_room[0], dict(child_room[1])) - for child_room in children_rooms - ] - async def summarize_remote_room_hierarchy(_self, room, suggested_only): return subspace_room_entry, dict(children_rooms), set() # Add a room to the space which is on another server. self._add_child(self.space, subspace, self.token) - with mock.patch( - "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room", - new=summarize_remote_room, - ): - result = self.get_success( - self.handler.get_space_summary(self.user, self.space) - ) - expected = [ (self.space, [self.room, subspace]), (self.room, ()), @@ -976,7 +876,6 @@ async def summarize_remote_room_hierarchy(_self, room, suggested_only): (world_readable_room, ()), (joined_room, ()), ] - self._assert_rooms(result, expected) with mock.patch( "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy", @@ -1010,31 +909,17 @@ def test_fed_invited(self): }, ) - async def summarize_remote_room( - _self, room, suggested_only, max_children, exclude_rooms - ): - return [fed_room_entry] - async def summarize_remote_room_hierarchy(_self, room, suggested_only): return fed_room_entry, {}, set() # Add a room to the space which is on another server. self._add_child(self.space, fed_room, self.token) - with mock.patch( - "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room", - new=summarize_remote_room, - ): - result = self.get_success( - self.handler.get_space_summary(self.user, self.space) - ) - expected = [ (self.space, [self.room, fed_room]), (self.room, ()), (fed_room, ()), ] - self._assert_rooms(result, expected) with mock.patch( "synapse.handlers.room_summary.RoomSummaryHandler._summarize_remote_room_hierarchy", From 952efd0bca967bc2fcabe5c3f1f58e14ddc41686 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 28 Feb 2022 19:59:00 +0100 Subject: [PATCH 311/474] Add type hints to `tests/rest/client` (#12094) * Add type hints to `tests/rest/client` * update `mypy.ini` * newsfile * add `test_register.py` --- changelog.d/12094.misc | 1 + mypy.ini | 3 - tests/rest/client/test_events.py | 20 +++--- tests/rest/client/test_groups.py | 2 +- tests/rest/client/test_register.py | 110 +++++++++++++++-------------- 5 files changed, 72 insertions(+), 64 deletions(-) create mode 100644 changelog.d/12094.misc diff --git a/changelog.d/12094.misc b/changelog.d/12094.misc new file mode 100644 index 000000000000..0360dbd61edc --- /dev/null +++ b/changelog.d/12094.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/client`. diff --git a/mypy.ini b/mypy.ini index bd75905c8d3c..38ff78760931 100644 --- a/mypy.ini +++ b/mypy.ini @@ -75,10 +75,7 @@ exclude = (?x) |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_account.py - |tests/rest/client/test_events.py |tests/rest/client/test_filter.py - |tests/rest/client/test_groups.py - |tests/rest/client/test_register.py |tests/rest/client/test_report_event.py |tests/rest/client/test_rooms.py |tests/rest/client/test_third_party_rules.py diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index 145f247836a0..1b1392fa2fdd 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -16,8 +16,12 @@ from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.rest.client import events, login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -32,7 +36,7 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["enable_registration_captcha"] = False @@ -41,11 +45,11 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(config=config) - hs.get_federation_handler = Mock() + hs.get_federation_handler = Mock() # type: ignore[assignment] return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # register an account self.user_id = self.register_user("sid1", "pass") @@ -55,7 +59,7 @@ def prepare(self, reactor, clock, hs): self.other_user = self.register_user("other2", "pass") self.other_token = self.login(self.other_user, "pass") - def test_stream_basic_permissions(self): + def test_stream_basic_permissions(self) -> None: # invalid token, expect 401 # note: this is in violation of the original v1 spec, which expected # 403. However, since the v1 spec no longer exists and the v1 @@ -76,7 +80,7 @@ def test_stream_basic_permissions(self): self.assertTrue("start" in channel.json_body) self.assertTrue("end" in channel.json_body) - def test_stream_room_permissions(self): + def test_stream_room_permissions(self) -> None: room_id = self.helper.create_room_as(self.other_user, tok=self.other_token) self.helper.send(room_id, tok=self.other_token) @@ -111,7 +115,7 @@ def test_stream_room_permissions(self): # left to room (expect no content for room) - def TODO_test_stream_items(self): + def TODO_test_stream_items(self) -> None: # new user, no content # join room, expect 1 item (join) @@ -136,7 +140,7 @@ class GetEventsTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, hs, reactor, clock): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # register an account self.user_id = self.register_user("sid1", "pass") @@ -144,7 +148,7 @@ def prepare(self, hs, reactor, clock): self.room_id = self.helper.create_room_as(self.user_id, tok=self.token) - def test_get_event_via_events(self): + def test_get_event_via_events(self) -> None: resp = self.helper.send(self.room_id, tok=self.token) event_id = resp["event_id"] diff --git a/tests/rest/client/test_groups.py b/tests/rest/client/test_groups.py index c99f54cf4f7c..e067cf825c62 100644 --- a/tests/rest/client/test_groups.py +++ b/tests/rest/client/test_groups.py @@ -25,7 +25,7 @@ class GroupsTestCase(unittest.HomeserverTestCase): servlets = [room.register_servlets, groups.register_servlets] @override_config({"enable_group_creation": True}) - def test_rooms_limited_by_visibility(self): + def test_rooms_limited_by_visibility(self) -> None: group_id = "+spqr:test" # Alice creates a group diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 4b95b8541c11..9aebf1735a96 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -16,15 +16,21 @@ import datetime import json import os +from typing import Any, Dict, List, Tuple import pkg_resources +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService from synapse.rest.client import account, account_validity, login, logout, register, sync +from synapse.server import HomeServer from synapse.storage._base import db_to_json +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.unittest import override_config @@ -39,12 +45,12 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): ] url = b"/_matrix/client/r0/register" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["allow_guest_access"] = True return config - def test_POST_appservice_registration_valid(self): + def test_POST_appservice_registration_valid(self) -> None: user_id = "@as_user_kermit:test" as_token = "i_am_an_app_service" @@ -69,7 +75,7 @@ def test_POST_appservice_registration_valid(self): det_data = {"user_id": user_id, "home_server": self.hs.hostname} self.assertDictContainsSubset(det_data, channel.json_body) - def test_POST_appservice_registration_no_type(self): + def test_POST_appservice_registration_no_type(self) -> None: as_token = "i_am_an_app_service" appservice = ApplicationService( @@ -89,7 +95,7 @@ def test_POST_appservice_registration_no_type(self): self.assertEqual(channel.result["code"], b"400", channel.result) - def test_POST_appservice_registration_invalid(self): + def test_POST_appservice_registration_invalid(self) -> None: self.appservice = None # no application service exists request_data = json.dumps( {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} @@ -100,21 +106,21 @@ def test_POST_appservice_registration_invalid(self): self.assertEqual(channel.result["code"], b"401", channel.result) - def test_POST_bad_password(self): + def test_POST_bad_password(self) -> None: request_data = json.dumps({"username": "kermit", "password": 666}) channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"400", channel.result) self.assertEqual(channel.json_body["error"], "Invalid password") - def test_POST_bad_username(self): + def test_POST_bad_username(self) -> None: request_data = json.dumps({"username": 777, "password": "monkey"}) channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"400", channel.result) self.assertEqual(channel.json_body["error"], "Invalid username") - def test_POST_user_valid(self): + def test_POST_user_valid(self) -> None: user_id = "@kermit:test" device_id = "frogfone" params = { @@ -135,7 +141,7 @@ def test_POST_user_valid(self): self.assertDictContainsSubset(det_data, channel.json_body) @override_config({"enable_registration": False}) - def test_POST_disabled_registration(self): + def test_POST_disabled_registration(self) -> None: request_data = json.dumps({"username": "kermit", "password": "monkey"}) self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None) @@ -145,7 +151,7 @@ def test_POST_disabled_registration(self): self.assertEqual(channel.json_body["error"], "Registration has been disabled") self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - def test_POST_guest_registration(self): + def test_POST_guest_registration(self) -> None: self.hs.config.key.macaroon_secret_key = "test" self.hs.config.registration.allow_guest_access = True @@ -155,7 +161,7 @@ def test_POST_guest_registration(self): self.assertEqual(channel.result["code"], b"200", channel.result) self.assertDictContainsSubset(det_data, channel.json_body) - def test_POST_disabled_guest_registration(self): + def test_POST_disabled_guest_registration(self) -> None: self.hs.config.registration.allow_guest_access = False channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") @@ -164,7 +170,7 @@ def test_POST_disabled_guest_registration(self): self.assertEqual(channel.json_body["error"], "Guest access is disabled") @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) - def test_POST_ratelimiting_guest(self): + def test_POST_ratelimiting_guest(self) -> None: for i in range(0, 6): url = self.url + b"?kind=guest" channel = self.make_request(b"POST", url, b"{}") @@ -182,7 +188,7 @@ def test_POST_ratelimiting_guest(self): self.assertEqual(channel.result["code"], b"200", channel.result) @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) - def test_POST_ratelimiting(self): + def test_POST_ratelimiting(self) -> None: for i in range(0, 6): params = { "username": "kermit" + str(i), @@ -206,7 +212,7 @@ def test_POST_ratelimiting(self): self.assertEqual(channel.result["code"], b"200", channel.result) @override_config({"registration_requires_token": True}) - def test_POST_registration_requires_token(self): + def test_POST_registration_requires_token(self) -> None: username = "kermit" device_id = "frogfone" token = "abcd" @@ -223,7 +229,7 @@ def test_POST_registration_requires_token(self): }, ) ) - params = { + params: JsonDict = { "username": username, "password": "monkey", "device_id": device_id, @@ -280,8 +286,8 @@ def test_POST_registration_requires_token(self): self.assertEqual(res["pending"], 0) @override_config({"registration_requires_token": True}) - def test_POST_registration_token_invalid(self): - params = { + def test_POST_registration_token_invalid(self) -> None: + params: JsonDict = { "username": "kermit", "password": "monkey", } @@ -314,7 +320,7 @@ def test_POST_registration_token_invalid(self): self.assertEqual(channel.json_body["completed"], []) @override_config({"registration_requires_token": True}) - def test_POST_registration_token_limit_uses(self): + def test_POST_registration_token_limit_uses(self) -> None: token = "abcd" store = self.hs.get_datastores().main # Create token that can be used once @@ -330,8 +336,8 @@ def test_POST_registration_token_limit_uses(self): }, ) ) - params1 = {"username": "bert", "password": "monkey"} - params2 = {"username": "ernie", "password": "monkey"} + params1: JsonDict = {"username": "bert", "password": "monkey"} + params2: JsonDict = {"username": "ernie", "password": "monkey"} # Do 2 requests without auth to get two session IDs channel1 = self.make_request(b"POST", self.url, json.dumps(params1)) session1 = channel1.json_body["session"] @@ -388,7 +394,7 @@ def test_POST_registration_token_limit_uses(self): self.assertEqual(channel.json_body["completed"], []) @override_config({"registration_requires_token": True}) - def test_POST_registration_token_expiry(self): + def test_POST_registration_token_expiry(self) -> None: token = "abcd" now = self.hs.get_clock().time_msec() store = self.hs.get_datastores().main @@ -405,7 +411,7 @@ def test_POST_registration_token_expiry(self): }, ) ) - params = {"username": "kermit", "password": "monkey"} + params: JsonDict = {"username": "kermit", "password": "monkey"} # Request without auth to get session channel = self.make_request(b"POST", self.url, json.dumps(params)) session = channel.json_body["session"] @@ -436,7 +442,7 @@ def test_POST_registration_token_expiry(self): self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @override_config({"registration_requires_token": True}) - def test_POST_registration_token_session_expiry(self): + def test_POST_registration_token_session_expiry(self) -> None: """Test `pending` is decremented when an uncompleted session expires.""" token = "abcd" store = self.hs.get_datastores().main @@ -454,8 +460,8 @@ def test_POST_registration_token_session_expiry(self): ) # Do 2 requests without auth to get two session IDs - params1 = {"username": "bert", "password": "monkey"} - params2 = {"username": "ernie", "password": "monkey"} + params1: JsonDict = {"username": "bert", "password": "monkey"} + params2: JsonDict = {"username": "ernie", "password": "monkey"} channel1 = self.make_request(b"POST", self.url, json.dumps(params1)) session1 = channel1.json_body["session"] channel2 = self.make_request(b"POST", self.url, json.dumps(params2)) @@ -522,7 +528,7 @@ def test_POST_registration_token_session_expiry(self): self.assertEqual(pending, 0) @override_config({"registration_requires_token": True}) - def test_POST_registration_token_session_expiry_deleted_token(self): + def test_POST_registration_token_session_expiry_deleted_token(self) -> None: """Test session expiry doesn't break when the token is deleted. 1. Start but don't complete UIA with a registration token @@ -545,7 +551,7 @@ def test_POST_registration_token_session_expiry_deleted_token(self): ) # Do request without auth to get a session ID - params = {"username": "kermit", "password": "monkey"} + params: JsonDict = {"username": "kermit", "password": "monkey"} channel = self.make_request(b"POST", self.url, json.dumps(params)) session = channel.json_body["session"] @@ -570,7 +576,7 @@ def test_POST_registration_token_session_expiry_deleted_token(self): store.delete_old_ui_auth_sessions(self.hs.get_clock().time_msec()) ) - def test_advertised_flows(self): + def test_advertised_flows(self) -> None: channel = self.make_request(b"POST", self.url, b"{}") self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] @@ -593,7 +599,7 @@ def test_advertised_flows(self): }, } ) - def test_advertised_flows_captcha_and_terms_and_3pids(self): + def test_advertised_flows_captcha_and_terms_and_3pids(self) -> None: channel = self.make_request(b"POST", self.url, b"{}") self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] @@ -625,7 +631,7 @@ def test_advertised_flows_captcha_and_terms_and_3pids(self): }, } ) - def test_advertised_flows_no_msisdn_email_required(self): + def test_advertised_flows_no_msisdn_email_required(self) -> None: channel = self.make_request(b"POST", self.url, b"{}") self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] @@ -646,7 +652,7 @@ def test_advertised_flows_no_msisdn_email_required(self): }, } ) - def test_request_token_existing_email_inhibit_error(self): + def test_request_token_existing_email_inhibit_error(self) -> None: """Test that requesting a token via this endpoint doesn't leak existing associations if configured that way. """ @@ -685,7 +691,7 @@ def test_request_token_existing_email_inhibit_error(self): }, } ) - def test_reject_invalid_email(self): + def test_reject_invalid_email(self) -> None: """Check that bad emails are rejected""" # Test for email with multiple @ @@ -731,7 +737,7 @@ def test_reject_invalid_email(self): "inhibit_user_in_use_error": True, } ) - def test_inhibit_user_in_use_error(self): + def test_inhibit_user_in_use_error(self) -> None: """Tests that the 'inhibit_user_in_use_error' configuration flag behaves correctly. """ @@ -779,7 +785,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): account_validity.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # Test for account expiring after a week. config["enable_registration"] = True @@ -791,7 +797,7 @@ def make_homeserver(self, reactor, clock): return self.hs - def test_validity_period(self): + def test_validity_period(self) -> None: self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") @@ -810,7 +816,7 @@ def test_validity_period(self): channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) - def test_manual_renewal(self): + def test_manual_renewal(self) -> None: user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") @@ -833,7 +839,7 @@ def test_manual_renewal(self): channel = self.make_request(b"GET", "/sync", access_token=tok) self.assertEqual(channel.result["code"], b"200", channel.result) - def test_manual_expire(self): + def test_manual_expire(self) -> None: user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") @@ -858,7 +864,7 @@ def test_manual_expire(self): channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) - def test_logging_out_expired_user(self): + def test_logging_out_expired_user(self) -> None: user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") @@ -898,7 +904,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): account.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # Test for account expiring after a week and renewal emails being sent 2 @@ -935,17 +941,17 @@ def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver(config=config) - async def sendmail(*args, **kwargs): + async def sendmail(*args: Any, **kwargs: Any) -> None: self.email_attempts.append((args, kwargs)) - self.email_attempts = [] + self.email_attempts: List[Tuple[Any, Any]] = [] self.hs.get_send_email_handler()._sendmail = sendmail self.store = self.hs.get_datastores().main return self.hs - def test_renewal_email(self): + def test_renewal_email(self) -> None: self.email_attempts = [] (user_id, tok) = self.create_user() @@ -999,7 +1005,7 @@ def test_renewal_email(self): channel = self.make_request(b"GET", "/sync", access_token=tok) self.assertEqual(channel.result["code"], b"200", channel.result) - def test_renewal_invalid_token(self): + def test_renewal_invalid_token(self) -> None: # Hit the renewal endpoint with an invalid token and check that it behaves as # expected, i.e. that it responds with 404 Not Found and the correct HTML. url = "/_matrix/client/unstable/account_validity/renew?token=123" @@ -1019,7 +1025,7 @@ def test_renewal_invalid_token(self): channel.result["body"], expected_html.encode("utf8"), channel.result ) - def test_manual_email_send(self): + def test_manual_email_send(self) -> None: self.email_attempts = [] (user_id, tok) = self.create_user() @@ -1032,7 +1038,7 @@ def test_manual_email_send(self): self.assertEqual(len(self.email_attempts), 1) - def test_deactivated_user(self): + def test_deactivated_user(self) -> None: self.email_attempts = [] (user_id, tok) = self.create_user() @@ -1056,7 +1062,7 @@ def test_deactivated_user(self): self.assertEqual(len(self.email_attempts), 0) - def create_user(self): + def create_user(self) -> Tuple[str, str]: user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") # We need to manually add an email address otherwise the handler will do @@ -1073,7 +1079,7 @@ def create_user(self): ) return user_id, tok - def test_manual_email_send_expired_account(self): + def test_manual_email_send_expired_account(self) -> None: user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") @@ -1112,7 +1118,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.validity_period = 10 self.max_delta = self.validity_period * 10.0 / 100.0 @@ -1135,7 +1141,7 @@ def make_homeserver(self, reactor, clock): return self.hs - def test_background_job(self): + def test_background_job(self) -> None: """ Tests the same thing as test_background_job, except that it sets the startup_job_max_delta parameter and checks that the expiration date is within the @@ -1158,12 +1164,12 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase): servlets = [register.register_servlets] url = "/_matrix/client/v1/register/m.login.registration_token/validity" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["registration_requires_token"] = True return config - def test_GET_token_valid(self): + def test_GET_token_valid(self) -> None: token = "abcd" store = self.hs.get_datastores().main self.get_success( @@ -1186,7 +1192,7 @@ def test_GET_token_valid(self): self.assertEqual(channel.result["code"], b"200", channel.result) self.assertEqual(channel.json_body["valid"], True) - def test_GET_token_invalid(self): + def test_GET_token_invalid(self) -> None: token = "1234" channel = self.make_request( b"GET", @@ -1198,7 +1204,7 @@ def test_GET_token_invalid(self): @override_config( {"rc_registration_token_validity": {"per_second": 0.1, "burst_count": 5}} ) - def test_GET_ratelimiting(self): + def test_GET_ratelimiting(self) -> None: token = "1234" for i in range(0, 6): From 9d11fee8f223787c04c6574b8a30967e2b73cc35 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Mar 2022 09:34:30 +0000 Subject: [PATCH 312/474] Improve exception handling for concurrent execution (#12109) * fix incorrect unwrapFirstError import this was being imported from the wrong place * Refactor `concurrently_execute` to use `yieldable_gather_results` * Improve exception handling in `yieldable_gather_results` Try to avoid swallowing so many stack traces. * mark unwrapFirstError deprecated * changelog --- changelog.d/12109.misc | 1 + synapse/handlers/message.py | 4 +- synapse/util/__init__.py | 4 +- synapse/util/async_helpers.py | 54 +++++++++------ tests/util/test_async_helpers.py | 115 ++++++++++++++++++++++++++++++- 5 files changed, 151 insertions(+), 27 deletions(-) create mode 100644 changelog.d/12109.misc diff --git a/changelog.d/12109.misc b/changelog.d/12109.misc new file mode 100644 index 000000000000..3295e49f43b9 --- /dev/null +++ b/changelog.d/12109.misc @@ -0,0 +1 @@ +Improve exception handling for concurrent execution. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a9c964cd7533..ce1fa3c78ea3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -55,8 +55,8 @@ from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester -from synapse.util import json_decoder, json_encoder, log_failure -from synapse.util.async_helpers import Linearizer, gather_results, unwrapFirstError +from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError +from synapse.util.async_helpers import Linearizer, gather_results from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 511f52534b3c..58b4220ff355 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -81,7 +81,9 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]: def unwrapFirstError(failure: Failure) -> Failure: - # defer.gatherResults and DeferredLists wrap failures. + # Deprecated: you probably just want to catch defer.FirstError and reraise + # the subFailure's value, which will do a better job of preserving stacktraces. + # (actually, you probably want to use yieldable_gather_results anyway) failure.trap(defer.FirstError) return failure.value.subFailure # type: ignore[union-attr] # Issue in Twisted's annotations diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 3f7299aff7eb..a83296a2292b 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -29,6 +29,7 @@ Hashable, Iterable, Iterator, + List, Optional, Set, Tuple, @@ -51,7 +52,7 @@ make_deferred_yieldable, run_in_background, ) -from synapse.util import Clock, unwrapFirstError +from synapse.util import Clock logger = logging.getLogger(__name__) @@ -193,9 +194,9 @@ def __repr__(self) -> str: T = TypeVar("T") -def concurrently_execute( +async def concurrently_execute( func: Callable[[T], Any], args: Iterable[T], limit: int -) -> defer.Deferred: +) -> None: """Executes the function with each argument concurrently while limiting the number of concurrent executions. @@ -221,20 +222,14 @@ async def _concurrently_execute_inner(value: T) -> None: # We use `itertools.islice` to handle the case where the number of args is # less than the limit, avoiding needlessly spawning unnecessary background # tasks. - return make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background(_concurrently_execute_inner, value) - for value in itertools.islice(it, limit) - ], - consumeErrors=True, - ) - ).addErrback(unwrapFirstError) + await yieldable_gather_results( + _concurrently_execute_inner, (value for value in itertools.islice(it, limit)) + ) -def yieldable_gather_results( - func: Callable, iter: Iterable, *args: Any, **kwargs: Any -) -> defer.Deferred: +async def yieldable_gather_results( + func: Callable[..., Awaitable[T]], iter: Iterable, *args: Any, **kwargs: Any +) -> List[T]: """Executes the function with each argument concurrently. Args: @@ -245,15 +240,30 @@ def yieldable_gather_results( **kwargs: Keyword arguments to be passed to each call to func Returns - Deferred[list]: Resolved when all functions have been invoked, or errors if - one of the function calls fails. + A list containing the results of the function """ - return make_deferred_yieldable( - defer.gatherResults( - [run_in_background(func, item, *args, **kwargs) for item in iter], - consumeErrors=True, + try: + return await make_deferred_yieldable( + defer.gatherResults( + [run_in_background(func, item, *args, **kwargs) for item in iter], + consumeErrors=True, + ) ) - ).addErrback(unwrapFirstError) + except defer.FirstError as dfe: + # unwrap the error from defer.gatherResults. + + # The raised exception's traceback only includes func() etc if + # the 'await' happens before the exception is thrown - ie if the failure + # happens *asynchronously* - otherwise Twisted throws away the traceback as it + # could be large. + # + # We could maybe reconstruct a fake traceback from Failure.frames. Or maybe + # we could throw Twisted into the fires of Mordor. + + # suppress exception chaining, because the FirstError doesn't tell us anything + # very interesting. + assert isinstance(dfe.subFailure.value, BaseException) + raise dfe.subFailure.value from None T1 = TypeVar("T1") diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index ab89cab81256..cce8d595fc7e 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import traceback + from twisted.internet import defer -from twisted.internet.defer import CancelledError, Deferred +from twisted.internet.defer import CancelledError, Deferred, ensureDeferred from twisted.internet.task import Clock +from twisted.python.failure import Failure from synapse.logging.context import ( SENTINEL_CONTEXT, @@ -21,7 +24,11 @@ PreserveLoggingContext, current_context, ) -from synapse.util.async_helpers import ObservableDeferred, timeout_deferred +from synapse.util.async_helpers import ( + ObservableDeferred, + concurrently_execute, + timeout_deferred, +) from tests.unittest import TestCase @@ -171,3 +178,107 @@ def errback(res, deferred_name): ) self.failureResultOf(timing_out_d, defer.TimeoutError) self.assertIs(current_context(), context_one) + + +class _TestException(Exception): + pass + + +class ConcurrentlyExecuteTest(TestCase): + def test_limits_runners(self): + """If we have more tasks than runners, we should get the limit of runners""" + started = 0 + waiters = [] + processed = [] + + async def callback(v): + # when we first enter, bump the start count + nonlocal started + started += 1 + + # record the fact we got an item + processed.append(v) + + # wait for the goahead before returning + d2 = Deferred() + waiters.append(d2) + await d2 + + # set it going + d2 = ensureDeferred(concurrently_execute(callback, [1, 2, 3, 4, 5], 3)) + + # check we got exactly 3 processes + self.assertEqual(started, 3) + self.assertEqual(len(waiters), 3) + + # let one finish + waiters.pop().callback(0) + + # ... which should start another + self.assertEqual(started, 4) + self.assertEqual(len(waiters), 3) + + # we still shouldn't be done + self.assertNoResult(d2) + + # finish the job + while waiters: + waiters.pop().callback(0) + + # check everything got done + self.assertEqual(started, 5) + self.assertCountEqual(processed, [1, 2, 3, 4, 5]) + self.successResultOf(d2) + + def test_preserves_stacktraces(self): + """Test that the stacktrace from an exception thrown in the callback is preserved""" + d1 = Deferred() + + async def callback(v): + # alas, this doesn't work at all without an await here + await d1 + raise _TestException("bah") + + async def caller(): + try: + await concurrently_execute(callback, [1], 2) + except _TestException as e: + tb = traceback.extract_tb(e.__traceback__) + # we expect to see "caller", "concurrently_execute" and "callback". + self.assertEqual(tb[0].name, "caller") + self.assertEqual(tb[1].name, "concurrently_execute") + self.assertEqual(tb[-1].name, "callback") + else: + self.fail("No exception thrown") + + d2 = ensureDeferred(caller()) + d1.callback(0) + self.successResultOf(d2) + + def test_preserves_stacktraces_on_preformed_failure(self): + """Test that the stacktrace on a Failure returned by the callback is preserved""" + d1 = Deferred() + f = Failure(_TestException("bah")) + + async def callback(v): + # alas, this doesn't work at all without an await here + await d1 + await defer.fail(f) + + async def caller(): + try: + await concurrently_execute(callback, [1], 2) + except _TestException as e: + tb = traceback.extract_tb(e.__traceback__) + # we expect to see "caller", "concurrently_execute", "callback", + # and some magic from inside ensureDeferred that happens when .fail + # is called. + self.assertEqual(tb[0].name, "caller") + self.assertEqual(tb[1].name, "concurrently_execute") + self.assertEqual(tb[-2].name, "callback") + else: + self.fail("No exception thrown") + + d2 = ensureDeferred(caller()) + d1.callback(0) + self.successResultOf(d2) From 5458eb8551be676fea7ff21e2b0d3c3762c871a7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Mar 2022 09:51:38 +0000 Subject: [PATCH 313/474] Fix 'Unhandled error in Deferred' (#12089) * Fix 'Unhandled error in Deferred' Fixes a CRITICAL "Unhandled error in Deferred" log message which happened when a function wrapped with `@cachedList` failed * Minor optimisation to cachedListDescriptor we can avoid re-using `missing`, which saves looking up entries in `deferreds_map`, and means we don't need to copy it. * Improve type annotation on CachedListDescriptor --- changelog.d/12089.bugfix | 1 + synapse/util/caches/descriptors.py | 64 +++++++++++++-------------- tests/util/caches/test_descriptors.py | 10 ++--- 3 files changed, 38 insertions(+), 37 deletions(-) create mode 100644 changelog.d/12089.bugfix diff --git a/changelog.d/12089.bugfix b/changelog.d/12089.bugfix new file mode 100644 index 000000000000..27172c482803 --- /dev/null +++ b/changelog.d/12089.bugfix @@ -0,0 +1 @@ +Fix occasional 'Unhandled error in Deferred' error message. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index df4fb156c2d0..1cdead02f14b 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -18,6 +18,7 @@ import logging from typing import ( Any, + Awaitable, Callable, Dict, Generic, @@ -346,15 +347,15 @@ class DeferredCacheListDescriptor(_CacheDescriptorBase): """Wraps an existing cache to support bulk fetching of keys. Given an iterable of keys it looks in the cache to find any hits, then passes - the tuple of missing keys to the wrapped function. + the set of missing keys to the wrapped function. - Once wrapped, the function returns a Deferred which resolves to the list - of results. + Once wrapped, the function returns a Deferred which resolves to a Dict mapping from + input key to output value. """ def __init__( self, - orig: Callable[..., Any], + orig: Callable[..., Awaitable[Dict]], cached_method_name: str, list_name: str, num_args: Optional[int] = None, @@ -385,13 +386,13 @@ def __init__( def __get__( self, obj: Optional[Any], objtype: Optional[Type] = None - ) -> Callable[..., Any]: + ) -> Callable[..., "defer.Deferred[Dict[Hashable, Any]]"]: cached_method = getattr(obj, self.cached_method_name) cache: DeferredCache[CacheKey, Any] = cached_method.cache num_args = cached_method.num_args @functools.wraps(self.orig) - def wrapped(*args: Any, **kwargs: Any) -> Any: + def wrapped(*args: Any, **kwargs: Any) -> "defer.Deferred[Dict]": # If we're passed a cache_context then we'll want to call its # invalidate() whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) @@ -444,39 +445,38 @@ def arg_to_cache_key(arg: Hashable) -> Hashable: deferred: "defer.Deferred[Any]" = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) - cache.set(key, deferred, callback=invalidate_callback) + cached_defers.append( + cache.set(key, deferred, callback=invalidate_callback) + ) def complete_all(res: Dict[Hashable, Any]) -> None: - # the wrapped function has completed. It returns a - # a dict. We can now resolve the observable deferreds in - # the cache and update our own result map. - for e in missing: + # the wrapped function has completed. It returns a dict. + # We can now update our own result map, and then resolve the + # observable deferreds in the cache. + for e, d1 in deferreds_map.items(): val = res.get(e, None) - deferreds_map[e].callback(val) + # make sure we update the results map before running the + # deferreds, because as soon as we run the last deferred, the + # gatherResults() below will complete and return the result + # dict to our caller. results[e] = val + d1.callback(val) - def errback(f: Failure) -> Failure: - # the wrapped function has failed. Invalidate any cache - # entries we're supposed to be populating, and fail - # their deferreds. - for e in missing: - key = arg_to_cache_key(e) - cache.invalidate(key) - deferreds_map[e].errback(f) - - # return the failure, to propagate to our caller. - return f + def errback_all(f: Failure) -> None: + # the wrapped function has failed. Propagate the failure into + # the cache, which will invalidate the entry, and cause the + # relevant cached_deferreds to fail, which will propagate the + # failure to our caller. + for d1 in deferreds_map.values(): + d1.errback(f) args_to_call = dict(arg_dict) - # copy the missing set before sending it to the callee, to guard against - # modification. - args_to_call[self.list_name] = tuple(missing) - - cached_defers.append( - defer.maybeDeferred( - preserve_fn(self.orig), **args_to_call - ).addCallbacks(complete_all, errback) - ) + args_to_call[self.list_name] = missing + + # dispatch the call, and attach the two handlers + defer.maybeDeferred( + preserve_fn(self.orig), **args_to_call + ).addCallbacks(complete_all, errback_all) if cached_defers: d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index b92d3f0c1b51..19741ffcdaf1 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -673,14 +673,14 @@ async def list_fn(self, args1, arg2): self.assertEqual(current_context(), SENTINEL_CONTEXT) r = yield d1 self.assertEqual(current_context(), c1) - obj.mock.assert_called_once_with((10, 20), 2) + obj.mock.assert_called_once_with({10, 20}, 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) - obj.mock.assert_called_once_with((30,), 2) + obj.mock.assert_called_once_with({30}, 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() @@ -701,7 +701,7 @@ async def list_fn(self, args1, arg2): obj.mock.return_value = {40: "gravy"} iterable = (x for x in [10, 40, 40]) r = yield obj.list_fn(iterable, 2) - obj.mock.assert_called_once_with((40,), 2) + obj.mock.assert_called_once_with({40}, 2) self.assertEqual(r, {10: "fish", 40: "gravy"}) def test_concurrent_lookups(self): @@ -729,7 +729,7 @@ def list_fn(self, args1) -> "Deferred[dict]": d3 = obj.list_fn([10]) # the mock should have been called exactly once - obj.mock.assert_called_once_with((10,)) + obj.mock.assert_called_once_with({10}) obj.mock.reset_mock() # ... and none of the calls should yet be complete @@ -771,7 +771,7 @@ async def list_fn(self, args1, arg2): # cache miss obj.mock.return_value = {10: "fish", 20: "chips"} r1 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate0) - obj.mock.assert_called_once_with((10, 20), 2) + obj.mock.assert_called_once_with({10, 20}, 2) self.assertEqual(r1, {10: "fish", 20: "chips"}) obj.mock.reset_mock() From 4ccc2d09aae71da0be725ac177a9d4aced9a53c9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 1 Mar 2022 12:35:32 +0000 Subject: [PATCH 314/474] Advertise Python 3.10 support in setup.py (#12111) --- changelog.d/12111.misc | 1 + setup.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/12111.misc diff --git a/changelog.d/12111.misc b/changelog.d/12111.misc new file mode 100644 index 000000000000..be84789c9dfc --- /dev/null +++ b/changelog.d/12111.misc @@ -0,0 +1 @@ +Advertise support for Python 3.10 in packaging files. \ No newline at end of file diff --git a/setup.py b/setup.py index c80cb6f20767..26f4650348d5 100755 --- a/setup.py +++ b/setup.py @@ -165,6 +165,7 @@ def exec_file(path_segments): "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", ], scripts=["synctl"] + glob.glob("scripts/*"), cmdclass={"test": TestCommand}, From e2e1d90a5e4030616a3de242cde26c0cfff4a6b5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Mar 2022 12:49:54 +0000 Subject: [PATCH 315/474] Faster joins: persist to database (#12012) When we get a partial_state response from send_join, store information in the database about it: * store a record about the room as a whole having partial state, and stash the list of member servers too. * flag the join event itself as having partial state * also, for any new events whose prev-events are partial-stated, note that they will *also* be partial-stated. We don't yet make any attempt to interpret this data, so API calls (and a bunch of other things) are just going to get incorrect data. --- changelog.d/12012.misc | 1 + synapse/events/snapshot.py | 9 +++ synapse/handlers/federation.py | 11 ++- synapse/handlers/federation_event.py | 13 +++- synapse/handlers/message.py | 2 + synapse/state/__init__.py | 31 +++++++- synapse/storage/databases/main/events.py | 25 +++++++ .../storage/databases/main/events_worker.py | 28 ++++++++ synapse/storage/databases/main/room.py | 37 ++++++++++ .../main/delta/68/04partial_state_rooms.sql | 41 +++++++++++ .../68/05partial_state_rooms_triggers.py | 72 +++++++++++++++++++ tests/test_state.py | 59 ++++++++------- 12 files changed, 297 insertions(+), 32 deletions(-) create mode 100644 changelog.d/12012.misc create mode 100644 synapse/storage/schema/main/delta/68/04partial_state_rooms.sql create mode 100644 synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py diff --git a/changelog.d/12012.misc b/changelog.d/12012.misc new file mode 100644 index 000000000000..a473f41e7856 --- /dev/null +++ b/changelog.d/12012.misc @@ -0,0 +1 @@ +Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 5833fee25fce..46042b2bf7af 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -101,6 +101,9 @@ class EventContext: As with _current_state_ids, this is a private attribute. It should be accessed via get_prev_state_ids. + + partial_state: if True, we may be storing this event with a temporary, + incomplete state. """ rejected: Union[bool, str] = False @@ -113,12 +116,15 @@ class EventContext: _current_state_ids: Optional[StateMap[str]] = None _prev_state_ids: Optional[StateMap[str]] = None + partial_state: bool = False + @staticmethod def with_state( state_group: Optional[int], state_group_before_event: Optional[int], current_state_ids: Optional[StateMap[str]], prev_state_ids: Optional[StateMap[str]], + partial_state: bool, prev_group: Optional[int] = None, delta_ids: Optional[StateMap[str]] = None, ) -> "EventContext": @@ -129,6 +135,7 @@ def with_state( state_group_before_event=state_group_before_event, prev_group=prev_group, delta_ids=delta_ids, + partial_state=partial_state, ) @staticmethod @@ -170,6 +177,7 @@ async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict: "prev_group": self.prev_group, "delta_ids": _encode_state_dict(self.delta_ids), "app_service_id": self.app_service.id if self.app_service else None, + "partial_state": self.partial_state, } @staticmethod @@ -196,6 +204,7 @@ def deserialize(storage: "Storage", input: JsonDict) -> "EventContext": prev_group=input["prev_group"], delta_ids=_decode_state_dict(input["delta_ids"]), rejected=input["rejected"], + partial_state=input.get("partial_state", False), ) app_service_id = input["app_service_id"] diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index c055c26ecaa7..eb03a5accbac 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -519,8 +519,17 @@ async def do_invite_join( state_events=state, ) + if ret.partial_state: + await self.store.store_partial_state_room(room_id, ret.servers_in_room) + max_stream_id = await self._federation_event_handler.process_remote_join( - origin, room_id, auth_chain, state, event, room_version_obj + origin, + room_id, + auth_chain, + state, + event, + room_version_obj, + partial_state=ret.partial_state, ) # We wait here until this instance has seen the events come down diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 09d0de1ead82..4bd87709f373 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -397,6 +397,7 @@ async def process_remote_join( state: List[EventBase], event: EventBase, room_version: RoomVersion, + partial_state: bool, ) -> int: """Persists the events returned by a send_join @@ -412,6 +413,7 @@ async def process_remote_join( event room_version: The room version we expect this room to have, and will raise if it doesn't match the version in the create event. + partial_state: True if the state omits non-critical membership events Returns: The stream ID after which all events have been persisted. @@ -453,10 +455,14 @@ async def process_remote_join( ) # and now persist the join event itself. - logger.info("Peristing join-via-remote %s", event) + logger.info( + "Peristing join-via-remote %s (partial_state: %s)", event, partial_state + ) with nested_logging_context(suffix=event.event_id): context = await self._state_handler.compute_event_context( - event, old_state=state + event, + old_state=state, + partial_state=partial_state, ) context = await self._check_event_auth(origin, event, context) @@ -698,6 +704,8 @@ async def _process_pulled_event( try: state = await self._resolve_state_at_missing_prevs(origin, event) + # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does + # not return partial state await self._process_received_pdu( origin, event, state=state, backfilled=backfilled ) @@ -1791,6 +1799,7 @@ async def _update_context_for_auth_events( prev_state_ids=prev_state_ids, prev_group=prev_group, delta_ids=state_updates, + partial_state=context.partial_state, ) async def _run_push_actions_and_persist_event( diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ce1fa3c78ea3..61cb133ef265 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -992,6 +992,8 @@ async def create_new_client_event( and full_state_ids_at_event and builder.internal_metadata.is_historical() ): + # TODO(faster_joins): figure out how this works, and make sure that the + # old state is complete. old_state = await self.store.get_events_as_list(full_state_ids_at_event) context = await self.state.compute_event_context(event, old_state=old_state) else: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index fcc24ad1296a..6babd5963cc1 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -258,7 +258,10 @@ async def get_hosts_in_room_at_events( return await self.store.get_joined_hosts(room_id, entry) async def compute_event_context( - self, event: EventBase, old_state: Optional[Iterable[EventBase]] = None + self, + event: EventBase, + old_state: Optional[Iterable[EventBase]] = None, + partial_state: bool = False, ) -> EventContext: """Build an EventContext structure for a non-outlier event. @@ -273,6 +276,8 @@ async def compute_event_context( calculated from existing events. This is normally only specified when receiving an event from federation where we don't have the prev events for, e.g. when backfilling. + partial_state: True if `old_state` is partial and omits non-critical + membership events Returns: The event context. """ @@ -295,8 +300,28 @@ async def compute_event_context( else: # otherwise, we'll need to resolve the state across the prev_events. - logger.debug("calling resolve_state_groups from compute_event_context") + # partial_state should not be set explicitly in this case: + # we work it out dynamically + assert not partial_state + + # if any of the prev-events have partial state, so do we. + # (This is slightly racy - the prev-events might get fixed up before we use + # their states - but I don't think that really matters; it just means we + # might redundantly recalculate the state for this event later.) + prev_event_ids = event.prev_event_ids() + incomplete_prev_events = await self.store.get_partial_state_events( + prev_event_ids + ) + if any(incomplete_prev_events.values()): + logger.debug( + "New/incoming event %s refers to prev_events %s with partial state", + event.event_id, + [k for (k, v) in incomplete_prev_events.items() if v], + ) + partial_state = True + + logger.debug("calling resolve_state_groups from compute_event_context") entry = await self.resolve_state_groups_for_events( event.room_id, event.prev_event_ids() ) @@ -342,6 +367,7 @@ async def compute_event_context( prev_state_ids=state_ids_before_event, prev_group=state_group_before_event_prev_group, delta_ids=deltas_to_state_group_before_event, + partial_state=partial_state, ) # @@ -373,6 +399,7 @@ async def compute_event_context( prev_state_ids=state_ids_before_event, prev_group=state_group_before_event, delta_ids=delta_ids, + partial_state=partial_state, ) @measure_func() diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 23fa089bca98..ca2a9ba9d116 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2145,6 +2145,14 @@ def _store_event_state_mappings_txn( state_groups = {} for event, context in events_and_contexts: if event.internal_metadata.is_outlier(): + # double-check that we don't have any events that claim to be outliers + # *and* have partial state (which is meaningless: we should have no + # state at all for an outlier) + if context.partial_state: + raise ValueError( + "Outlier event %s claims to have partial state", event.event_id + ) + continue # if the event was rejected, just give it the same state as its @@ -2155,6 +2163,23 @@ def _store_event_state_mappings_txn( state_groups[event.event_id] = context.state_group + # if we have partial state for these events, record the fact. (This happens + # here rather than in _store_event_txn because it also needs to happen when + # we de-outlier an event.) + self.db_pool.simple_insert_many_txn( + txn, + table="partial_state_events", + keys=("room_id", "event_id"), + values=[ + ( + event.room_id, + event.event_id, + ) + for event, ctx in events_and_contexts + if ctx.partial_state + ], + ) + self.db_pool.simple_upsert_many_txn( txn, table="event_to_state_groups", diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 2a255d103101..26784f755e40 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1953,3 +1953,31 @@ def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: "get_event_id_for_timestamp_txn", get_event_id_for_timestamp_txn, ) + + @cachedList("is_partial_state_event", list_name="event_ids") + async def get_partial_state_events( + self, event_ids: Collection[str] + ) -> Dict[str, bool]: + """Checks which of the given events have partial state""" + result = await self.db_pool.simple_select_many_batch( + table="partial_state_events", + column="event_id", + iterable=event_ids, + retcols=["event_id"], + desc="get_partial_state_events", + ) + # convert the result to a dict, to make @cachedList work + partial = {r["event_id"] for r in result} + return {e_id: e_id in partial for e_id in event_ids} + + @cached() + async def is_partial_state_event(self, event_id: str) -> bool: + """Checks if the given event has partial state""" + result = await self.db_pool.simple_select_one_onecol( + table="partial_state_events", + keyvalues={"event_id": event_id}, + retcol="1", + allow_none=True, + desc="is_partial_state_event", + ) + return result is not None diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 0416df64ce8d..94068940b90d 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -20,6 +20,7 @@ TYPE_CHECKING, Any, Awaitable, + Collection, Dict, List, Optional, @@ -1543,6 +1544,42 @@ async def upsert_room_on_join( lock=False, ) + async def store_partial_state_room( + self, + room_id: str, + servers: Collection[str], + ) -> None: + """Mark the given room as containing events with partial state + + Args: + room_id: the ID of the room + servers: other servers known to be in the room + """ + await self.db_pool.runInteraction( + "store_partial_state_room", + self._store_partial_state_room_txn, + room_id, + servers, + ) + + @staticmethod + def _store_partial_state_room_txn( + txn: LoggingTransaction, room_id: str, servers: Collection[str] + ) -> None: + DatabasePool.simple_insert_txn( + txn, + table="partial_state_rooms", + values={ + "room_id": room_id, + }, + ) + DatabasePool.simple_insert_many_txn( + txn, + table="partial_state_rooms_servers", + keys=("room_id", "server_name"), + values=((room_id, s) for s in servers), + ) + async def maybe_store_room_on_outlier_membership( self, room_id: str, room_version: RoomVersion ) -> None: diff --git a/synapse/storage/schema/main/delta/68/04partial_state_rooms.sql b/synapse/storage/schema/main/delta/68/04partial_state_rooms.sql new file mode 100644 index 000000000000..815c0cc390b6 --- /dev/null +++ b/synapse/storage/schema/main/delta/68/04partial_state_rooms.sql @@ -0,0 +1,41 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- rooms which we have done a partial-state-style join to +CREATE TABLE IF NOT EXISTS partial_state_rooms ( + room_id TEXT PRIMARY KEY, + FOREIGN KEY(room_id) REFERENCES rooms(room_id) +); + +-- a list of remote servers we believe are in the room +CREATE TABLE IF NOT EXISTS partial_state_rooms_servers ( + room_id TEXT NOT NULL REFERENCES partial_state_rooms(room_id), + server_name TEXT NOT NULL, + UNIQUE(room_id, server_name) +); + +-- a list of events with partial state. We can't store this in the `events` table +-- itself, because `events` is meant to be append-only. +CREATE TABLE IF NOT EXISTS partial_state_events ( + -- the room_id is denormalised for efficient indexing (the canonical source is `events`) + room_id TEXT NOT NULL REFERENCES partial_state_rooms(room_id), + event_id TEXT NOT NULL REFERENCES events(event_id), + UNIQUE(event_id) +); + +CREATE INDEX IF NOT EXISTS partial_state_events_room_id_idx + ON partial_state_events (room_id); + + diff --git a/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py new file mode 100644 index 000000000000..a2ec4fc26edb --- /dev/null +++ b/synapse/storage/schema/main/delta/68/05partial_state_rooms_triggers.py @@ -0,0 +1,72 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This migration adds triggers to the partial_state_events tables to enforce uniqueness + +Triggers cannot be expressed in .sql files, so we have to use a separate file. +""" +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine +from synapse.storage.types import Cursor + + +def run_create(cur: Cursor, database_engine: BaseDatabaseEngine, *args, **kwargs): + # complain if the room_id in partial_state_events doesn't match + # that in `events`. We already have a fk constraint which ensures that the event + # exists in `events`, so all we have to do is raise if there is a row with a + # matching stream_ordering but not a matching room_id. + if isinstance(database_engine, Sqlite3Engine): + cur.execute( + """ + CREATE TRIGGER IF NOT EXISTS partial_state_events_bad_room_id + BEFORE INSERT ON partial_state_events + FOR EACH ROW + BEGIN + SELECT RAISE(ABORT, 'Incorrect room_id in partial_state_events') + WHERE EXISTS ( + SELECT 1 FROM events + WHERE events.event_id = NEW.event_id + AND events.room_id != NEW.room_id + ); + END; + """ + ) + elif isinstance(database_engine, PostgresEngine): + cur.execute( + """ + CREATE OR REPLACE FUNCTION check_partial_state_events() RETURNS trigger AS $BODY$ + BEGIN + IF EXISTS ( + SELECT 1 FROM events + WHERE events.event_id = NEW.event_id + AND events.room_id != NEW.room_id + ) THEN + RAISE EXCEPTION 'Incorrect room_id in partial_state_events'; + END IF; + RETURN NEW; + END; + $BODY$ LANGUAGE plpgsql; + """ + ) + + cur.execute( + """ + CREATE TRIGGER check_partial_state_events BEFORE INSERT OR UPDATE ON partial_state_events + FOR EACH ROW + EXECUTE PROCEDURE check_partial_state_events() + """ + ) + else: + raise NotImplementedError("Unknown database engine") diff --git a/tests/test_state.py b/tests/test_state.py index 90800421fb86..e4baa6913746 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +from typing import Collection, Dict, List, Optional from unittest.mock import Mock from twisted.internet import defer @@ -70,7 +70,7 @@ def create_event( return event -class StateGroupStore: +class _DummyStore: def __init__(self): self._event_to_state_group = {} self._group_to_state = {} @@ -105,6 +105,11 @@ async def get_events(self, event_ids, **kwargs): if e_id in self._event_id_to_event } + async def get_partial_state_events( + self, event_ids: Collection[str] + ) -> Dict[str, bool]: + return {e: False for e in event_ids} + async def get_state_group_delta(self, name): return None, None @@ -157,8 +162,8 @@ def get_leaves(self): class StateTestCase(unittest.TestCase): def setUp(self): - self.store = StateGroupStore() - storage = Mock(main=self.store, state=self.store) + self.dummy_store = _DummyStore() + storage = Mock(main=self.dummy_store, state=self.dummy_store) hs = Mock( spec_set=[ "config", @@ -173,7 +178,7 @@ def setUp(self): ] ) hs.config = default_config("tesths", True) - hs.get_datastores.return_value = Mock(main=self.store) + hs.get_datastores.return_value = Mock(main=self.dummy_store) hs.get_state_handler.return_value = None hs.get_clock.return_value = MockClock() hs.get_auth.return_value = Auth(hs) @@ -198,7 +203,7 @@ def test_branch_no_conflict(self): edges={"A": ["START"], "B": ["A"], "C": ["A"], "D": ["B", "C"]}, ) - self.store.register_events(graph.walk()) + self.dummy_store.register_events(graph.walk()) context_store: dict[str, EventContext] = {} @@ -206,7 +211,7 @@ def test_branch_no_conflict(self): context = yield defer.ensureDeferred( self.state.compute_event_context(event) ) - self.store.register_event_context(event, context) + self.dummy_store.register_event_context(event, context) context_store[event.event_id] = context ctx_c = context_store["C"] @@ -242,7 +247,7 @@ def test_branch_basic_conflict(self): edges={"A": ["START"], "B": ["A"], "C": ["A"], "D": ["B", "C"]}, ) - self.store.register_events(graph.walk()) + self.dummy_store.register_events(graph.walk()) context_store = {} @@ -250,7 +255,7 @@ def test_branch_basic_conflict(self): context = yield defer.ensureDeferred( self.state.compute_event_context(event) ) - self.store.register_event_context(event, context) + self.dummy_store.register_event_context(event, context) context_store[event.event_id] = context # C ends up winning the resolution between B and C @@ -300,7 +305,7 @@ def test_branch_have_banned_conflict(self): edges={"A": ["START"], "B": ["A"], "C": ["B"], "D": ["B"], "E": ["C", "D"]}, ) - self.store.register_events(graph.walk()) + self.dummy_store.register_events(graph.walk()) context_store = {} @@ -308,7 +313,7 @@ def test_branch_have_banned_conflict(self): context = yield defer.ensureDeferred( self.state.compute_event_context(event) ) - self.store.register_event_context(event, context) + self.dummy_store.register_event_context(event, context) context_store[event.event_id] = context # C ends up winning the resolution between C and D because bans win over other @@ -375,7 +380,7 @@ def test_branch_have_perms_conflict(self): self._add_depths(nodes, edges) graph = Graph(nodes, edges) - self.store.register_events(graph.walk()) + self.dummy_store.register_events(graph.walk()) context_store = {} @@ -383,7 +388,7 @@ def test_branch_have_perms_conflict(self): context = yield defer.ensureDeferred( self.state.compute_event_context(event) ) - self.store.register_event_context(event, context) + self.dummy_store.register_event_context(event, context) context_store[event.event_id] = context # B ends up winning the resolution between B and C because power levels @@ -476,7 +481,7 @@ def test_trivial_annotate_message(self): ] group_name = yield defer.ensureDeferred( - self.store.store_state_group( + self.dummy_store.store_state_group( prev_event_id, event.room_id, None, @@ -484,7 +489,7 @@ def test_trivial_annotate_message(self): {(e.type, e.state_key): e.event_id for e in old_state}, ) ) - self.store.register_event_id_state_group(prev_event_id, group_name) + self.dummy_store.register_event_id_state_group(prev_event_id, group_name) context = yield defer.ensureDeferred(self.state.compute_event_context(event)) @@ -510,7 +515,7 @@ def test_trivial_annotate_state(self): ] group_name = yield defer.ensureDeferred( - self.store.store_state_group( + self.dummy_store.store_state_group( prev_event_id, event.room_id, None, @@ -518,7 +523,7 @@ def test_trivial_annotate_state(self): {(e.type, e.state_key): e.event_id for e in old_state}, ) ) - self.store.register_event_id_state_group(prev_event_id, group_name) + self.dummy_store.register_event_id_state_group(prev_event_id, group_name) context = yield defer.ensureDeferred(self.state.compute_event_context(event)) @@ -554,8 +559,8 @@ def test_resolve_message_conflict(self): create_event(type="test4", state_key=""), ] - self.store.register_events(old_state_1) - self.store.register_events(old_state_2) + self.dummy_store.register_events(old_state_1) + self.dummy_store.register_events(old_state_2) context = yield self._get_context( event, prev_event_id1, old_state_1, prev_event_id2, old_state_2 @@ -594,10 +599,10 @@ def test_resolve_state_conflict(self): create_event(type="test4", state_key=""), ] - store = StateGroupStore() + store = _DummyStore() store.register_events(old_state_1) store.register_events(old_state_2) - self.store.get_events = store.get_events + self.dummy_store.get_events = store.get_events context = yield self._get_context( event, prev_event_id1, old_state_1, prev_event_id2, old_state_2 @@ -649,10 +654,10 @@ def test_standard_depth_conflict(self): create_event(type="test1", state_key="1", depth=2), ] - store = StateGroupStore() + store = _DummyStore() store.register_events(old_state_1) store.register_events(old_state_2) - self.store.get_events = store.get_events + self.dummy_store.get_events = store.get_events context = yield self._get_context( event, prev_event_id1, old_state_1, prev_event_id2, old_state_2 @@ -695,7 +700,7 @@ def _get_context( self, event, prev_event_id_1, old_state_1, prev_event_id_2, old_state_2 ): sg1 = yield defer.ensureDeferred( - self.store.store_state_group( + self.dummy_store.store_state_group( prev_event_id_1, event.room_id, None, @@ -703,10 +708,10 @@ def _get_context( {(e.type, e.state_key): e.event_id for e in old_state_1}, ) ) - self.store.register_event_id_state_group(prev_event_id_1, sg1) + self.dummy_store.register_event_id_state_group(prev_event_id_1, sg1) sg2 = yield defer.ensureDeferred( - self.store.store_state_group( + self.dummy_store.store_state_group( prev_event_id_2, event.room_id, None, @@ -714,7 +719,7 @@ def _get_context( {(e.type, e.state_key): e.event_id for e in old_state_2}, ) ) - self.store.register_event_id_state_group(prev_event_id_2, sg2) + self.dummy_store.register_event_id_state_group(prev_event_id_2, sg2) result = yield defer.ensureDeferred(self.state.compute_event_context(event)) return result From c893632319f9bcd76d105573008e8cb0ec2fe7ce Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 1 Mar 2022 13:41:57 +0000 Subject: [PATCH 316/474] Order in-flight state group queries in biggest-first order (#11610) Co-authored-by: Patrick Cloke --- changelog.d/11610.misc | 1 + synapse/storage/databases/state/store.py | 30 +++++- tests/storage/databases/test_state_store.py | 104 +++++++++++++++++++- 3 files changed, 131 insertions(+), 4 deletions(-) create mode 100644 changelog.d/11610.misc diff --git a/changelog.d/11610.misc b/changelog.d/11610.misc new file mode 100644 index 000000000000..3af049b96961 --- /dev/null +++ b/changelog.d/11610.misc @@ -0,0 +1 @@ +Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index b8016f679a7f..dadf3d1e3ab3 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -25,6 +25,7 @@ ) import attr +from sortedcontainers import SortedDict from twisted.internet import defer @@ -72,6 +73,24 @@ def __len__(self) -> int: return len(self.delta_ids) if self.delta_ids else 0 +def state_filter_rough_priority_comparator( + state_filter: StateFilter, +) -> Tuple[int, int]: + """ + Returns a comparable value that roughly indicates the relative size of this + state filter compared to others. + 'Larger' state filters should sort first when using ascending order, so + this is essentially the opposite of 'size'. + It should be treated as a rough guide only and should not be interpreted to + have any particular meaning. The representation may also change + + The current implementation returns a tuple of the form: + * -1 for include_others, 0 otherwise + * -(number of entries in state_filter.types) + """ + return -int(state_filter.include_others), -len(state_filter.types) + + class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """A data store for fetching/storing state groups.""" @@ -127,7 +146,7 @@ def __init__( # Current ongoing get_state_for_groups in-flight requests # {group ID -> {StateFilter -> ObservableDeferred}} self._state_group_inflight_requests: Dict[ - int, Dict[StateFilter, AbstractObservableDeferred[StateMap[str]]] + int, SortedDict[StateFilter, AbstractObservableDeferred[StateMap[str]]] ] = {} def get_max_state_group_txn(txn: Cursor) -> int: @@ -279,7 +298,10 @@ def _get_state_for_group_gather_inflight_requests( # The list of ongoing requests which will help narrow the current request. reusable_requests = [] - for (request_state_filter, request_deferred) in inflight_requests.items(): + + # Iterate over existing requests in roughly biggest-first order. + for request_state_filter in inflight_requests: + request_deferred = inflight_requests[request_state_filter] new_state_filter_left_over = state_filter_left_over.approx_difference( request_state_filter ) @@ -358,7 +380,9 @@ async def _the_request() -> StateMap[str]: observable_deferred = ObservableDeferred(request_deferred, consumeErrors=True) # Insert the ObservableDeferred into the cache - group_request_dict = self._state_group_inflight_requests.setdefault(group, {}) + group_request_dict = self._state_group_inflight_requests.setdefault( + group, SortedDict(state_filter_rough_priority_comparator) + ) group_request_dict[db_state_filter] = observable_deferred return await make_deferred_yieldable(observable_deferred.observe()) diff --git a/tests/storage/databases/test_state_store.py b/tests/storage/databases/test_state_store.py index 076b66080946..2b484c95a935 100644 --- a/tests/storage/databases/test_state_store.py +++ b/tests/storage/databases/test_state_store.py @@ -15,11 +15,16 @@ from typing import Dict, List, Sequence, Tuple from unittest.mock import patch +from parameterized import parameterized + from twisted.internet.defer import Deferred, ensureDeferred from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes -from synapse.storage.databases.state.store import MAX_INFLIGHT_REQUESTS_PER_GROUP +from synapse.storage.databases.state.store import ( + MAX_INFLIGHT_REQUESTS_PER_GROUP, + state_filter_rough_priority_comparator, +) from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util import Clock @@ -350,3 +355,100 @@ def test_inflight_requests_capped(self) -> None: self._complete_request_fake(groups, sf, d) self.assertTrue(reqs[CAP_COUNT].called) self.assertTrue(reqs[CAP_COUNT + 1].called) + + @parameterized.expand([(False,), (True,)]) + def test_ordering_of_request_reuse(self, reverse: bool) -> None: + """ + Tests that 'larger' in-flight requests are ordered first. + + This is mostly a design decision in order to prevent a request from + hanging on to multiple queries when it would have been sufficient to + hang on to only one bigger query. + + The 'size' of a state filter is a rough heuristic. + + - requests two pieces of state, one 'larger' than the other, but each + spawning a query + - requests a third piece of state + - completes the larger of the first two queries + - checks that the third request gets completed (and doesn't needlessly + wait for the other query) + + Parameters: + reverse: whether to reverse the order of the initial requests, to ensure + that the effect doesn't depend on the order of request submission. + """ + + # We add in an extra state type to make sure that both requests spawn + # queries which are not optimised out. + state_filters = [ + StateFilter.freeze( + {"state.type": {"A"}, "other.state.type": {"a"}}, include_others=False + ), + StateFilter.freeze( + { + "state.type": None, + "other.state.type": {"b"}, + # The current rough size comparator uses the number of state types + # as an indicator of size. + # To influence it to make this state filter bigger than the previous one, + # we add another dummy state type. + "extra.state.type": {"c"}, + }, + include_others=False, + ), + ] + + if reverse: + # For fairness, we perform one test run with the list reversed. + state_filters.reverse() + smallest_state_filter_idx = 1 + biggest_state_filter_idx = 0 + else: + smallest_state_filter_idx = 0 + biggest_state_filter_idx = 1 + + # This assertion is for our own sanity more than anything else. + self.assertLess( + state_filter_rough_priority_comparator( + state_filters[biggest_state_filter_idx] + ), + state_filter_rough_priority_comparator( + state_filters[smallest_state_filter_idx] + ), + "Test invalid: bigger state filter is not actually bigger.", + ) + + # Spawn the initial two requests + for state_filter in state_filters: + ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, + state_filter, + ) + ) + + # Spawn a third request + req = ensureDeferred( + self.state_datastore._get_state_for_group_using_inflight_cache( + 42, + StateFilter.freeze( + { + "state.type": {"A"}, + }, + include_others=False, + ), + ) + ) + self.pump(by=0.1) + + self.assertFalse(req.called) + + # Complete the largest request's query to make sure that the final request + # only waits for that one (and doesn't needlessly wait for both queries) + self._complete_request_fake( + *self.get_state_group_calls[biggest_state_filter_idx] + ) + + # That should have been sufficient to complete the third request + self.assertTrue(req.called) From 91bc15c772d22fbe814170ab2e0fdbfa50f9c372 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 1 Mar 2022 13:51:03 +0000 Subject: [PATCH 317/474] Add `stop_cancellation` utility function (#12106) --- changelog.d/12106.misc | 1 + synapse/util/async_helpers.py | 19 ++++++++++++++ tests/util/test_async_helpers.py | 45 ++++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+) create mode 100644 changelog.d/12106.misc diff --git a/changelog.d/12106.misc b/changelog.d/12106.misc new file mode 100644 index 000000000000..d918e9e3b16d --- /dev/null +++ b/changelog.d/12106.misc @@ -0,0 +1 @@ +Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index a83296a2292b..81320b89721e 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -665,3 +665,22 @@ def maybe_awaitable(value: Union[Awaitable[R], R]) -> Awaitable[R]: return value return DoneAwaitable(value) + + +def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": + """Prevent a `Deferred` from being cancelled by wrapping it in another `Deferred`. + + Args: + deferred: The `Deferred` to protect against cancellation. Must not follow the + Synapse logcontext rules. + + Returns: + A new `Deferred`, which will contain the result of the original `Deferred`, + but will not propagate cancellation through to the original. When cancelled, + the new `Deferred` will fail with a `CancelledError` and will not follow the + Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap + the new `Deferred`. + """ + new_deferred: defer.Deferred[T] = defer.Deferred() + deferred.chainDeferred(new_deferred) + return new_deferred diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index cce8d595fc7e..362014f4cb6f 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -27,6 +27,7 @@ from synapse.util.async_helpers import ( ObservableDeferred, concurrently_execute, + stop_cancellation, timeout_deferred, ) @@ -282,3 +283,47 @@ async def caller(): d2 = ensureDeferred(caller()) d1.callback(0) self.successResultOf(d2) + + +class StopCancellationTests(TestCase): + """Tests for the `stop_cancellation` function.""" + + def test_succeed(self): + """Test that the new `Deferred` receives the result.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = stop_cancellation(deferred) + + # Success should propagate through. + deferred.callback("success") + self.assertTrue(wrapper_deferred.called) + self.assertEqual("success", self.successResultOf(wrapper_deferred)) + + def test_failure(self): + """Test that the new `Deferred` receives the `Failure`.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = stop_cancellation(deferred) + + # Failure should propagate through. + deferred.errback(ValueError("abc")) + self.assertTrue(wrapper_deferred.called) + self.failureResultOf(wrapper_deferred, ValueError) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + def test_cancellation(self): + """Test that cancellation of the new `Deferred` leaves the original running.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = stop_cancellation(deferred) + + # Cancel the new `Deferred`. + wrapper_deferred.cancel() + self.assertTrue(wrapper_deferred.called) + self.failureResultOf(wrapper_deferred, CancelledError) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled." + ) + + # Now make the inner `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") From f26e390a40288be2801b3b9b3a99269b3f3ff81f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 1 Mar 2022 13:55:18 +0000 Subject: [PATCH 318/474] Use Python 3.9 in Synapse dockerfiles by default (#12112) --- changelog.d/12112.docker | 1 + docker/Dockerfile | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12112.docker diff --git a/changelog.d/12112.docker b/changelog.d/12112.docker new file mode 100644 index 000000000000..b9e630653d24 --- /dev/null +++ b/changelog.d/12112.docker @@ -0,0 +1 @@ +Use Python 3.9 in Docker images by default. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index e4c1c19b86a7..a8bb9b0e7f7f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -11,10 +11,10 @@ # There is an optional PYTHON_VERSION build argument which sets the # version of python to build against: for example: # -# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.9 . +# DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile --build-arg PYTHON_VERSION=3.10 . # -ARG PYTHON_VERSION=3.8 +ARG PYTHON_VERSION=3.9 ### ### Stage 0: builder From 300ed0b8a6050b5187a2a524a82cf87baad3ca73 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 1 Mar 2022 15:00:03 +0000 Subject: [PATCH 319/474] Add module callbacks called for reacting to deactivation status change and profile update (#12062) --- changelog.d/12062.feature | 1 + docs/modules/third_party_rules_callbacks.md | 56 +++++ synapse/events/third_party_rules.py | 56 ++++- synapse/handlers/deactivate_account.py | 20 +- synapse/handlers/profile.py | 14 ++ synapse/module_api/__init__.py | 1 + tests/rest/client/test_third_party_rules.py | 219 +++++++++++++++++++- 7 files changed, 360 insertions(+), 7 deletions(-) create mode 100644 changelog.d/12062.feature diff --git a/changelog.d/12062.feature b/changelog.d/12062.feature new file mode 100644 index 000000000000..46a606709da8 --- /dev/null +++ b/changelog.d/12062.feature @@ -0,0 +1 @@ +Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index a3a17096a8f5..09ac838107b8 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -148,6 +148,62 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c If multiple modules implement this callback, Synapse runs them all in order. +### `on_profile_update` + +_First introduced in Synapse v1.54.0_ + +```python +async def on_profile_update( + user_id: str, + new_profile: "synapse.module_api.ProfileInfo", + by_admin: bool, + deactivation: bool, +) -> None: +``` + +Called after updating a local user's profile. The update can be triggered either by the +user themselves or a server admin. The update can also be triggered by a user being +deactivated (in which case their display name is set to an empty string (`""`) and the +avatar URL is set to `None`). The module is passed the Matrix ID of the user whose profile +has been updated, their new profile, as well as a `by_admin` boolean that is `True` if the +update was triggered by a server admin (and `False` otherwise), and a `deactivated` +boolean that is `True` if the update is a result of the user being deactivated. + +Note that the `by_admin` boolean is also `True` if the profile change happens as a result +of the user logging in through Single Sign-On, or if a server admin updates their own +profile. + +Per-room profile changes do not trigger this callback to be called. Synapse administrators +wishing this callback to be called on every profile change are encouraged to disable +per-room profiles globally using the `allow_per_room_profiles` configuration setting in +Synapse's configuration file. +This callback is not called when registering a user, even when setting it through the +[`get_displayname_for_registration`](https://matrix-org.github.io/synapse/latest/modules/password_auth_provider_callbacks.html#get_displayname_for_registration) +module callback. + +If multiple modules implement this callback, Synapse runs them all in order. + +### `on_user_deactivation_status_changed` + +_First introduced in Synapse v1.54.0_ + +```python +async def on_user_deactivation_status_changed( + user_id: str, deactivated: bool, by_admin: bool +) -> None: +``` + +Called after deactivating a local user, or reactivating them through the admin API. The +deactivation can be triggered either by the user themselves or a server admin. The module +is passed the Matrix ID of the user whose status is changed, as well as a `deactivated` +boolean that is `True` if the user is being deactivated and `False` if they're being +reactivated, and a `by_admin` boolean that is `True` if the deactivation was triggered by +a server admin (and `False` otherwise). This latter `by_admin` boolean is always `True` +if the user is being reactivated, as this operation can only be performed through the +admin API. + +If multiple modules implement this callback, Synapse runs them all in order. + ## Example The example below is a module that implements the third-party rules callback diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 71ec100a7fb1..dd3104faf3ac 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -17,6 +17,7 @@ from synapse.api.errors import ModuleFailedException, SynapseError from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.storage.roommember import ProfileInfo from synapse.types import Requester, StateMap from synapse.util.async_helpers import maybe_awaitable @@ -37,6 +38,8 @@ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] +ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] +ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: @@ -154,6 +157,10 @@ def __init__(self, hs: "HomeServer"): CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = [] self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = [] + self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = [] + self._on_user_deactivation_status_changed_callbacks: List[ + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + ] = [] def register_third_party_rules_callbacks( self, @@ -166,6 +173,8 @@ def register_third_party_rules_callbacks( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, + on_deactivation: Optional[ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK] = None, ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -187,6 +196,12 @@ def register_third_party_rules_callbacks( if on_new_event is not None: self._on_new_event_callbacks.append(on_new_event) + if on_profile_update is not None: + self._on_profile_update_callbacks.append(on_profile_update) + + if on_deactivation is not None: + self._on_user_deactivation_status_changed_callbacks.append(on_deactivation) + async def check_event_allowed( self, event: EventBase, context: EventContext ) -> Tuple[bool, Optional[dict]]: @@ -334,9 +349,6 @@ async def on_new_event(self, event_id: str) -> None: Args: event_id: The ID of the event. - - Raises: - ModuleFailureError if a callback raised any exception. """ # Bail out early without hitting the store if we don't have any callbacks if len(self._on_new_event_callbacks) == 0: @@ -370,3 +382,41 @@ async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: state_events[key] = room_state_events[event_id] return state_events + + async def on_profile_update( + self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool + ) -> None: + """Called after the global profile of a user has been updated. Does not include + per-room profile changes. + + Args: + user_id: The user whose profile was changed. + new_profile: The updated profile for the user. + by_admin: Whether the profile update was performed by a server admin. + deactivation: Whether this change was made while deactivating the user. + """ + for callback in self._on_profile_update_callbacks: + try: + await callback(user_id, new_profile, by_admin, deactivation) + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + + async def on_user_deactivation_status_changed( + self, user_id: str, deactivated: bool, by_admin: bool + ) -> None: + """Called after a user has been deactivated or reactivated. + + Args: + user_id: The deactivated user. + deactivated: Whether the user is now deactivated. + by_admin: Whether the deactivation was performed by a server admin. + """ + for callback in self._on_user_deactivation_status_changed_callbacks: + try: + await callback(user_id, deactivated, by_admin) + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index e4eae0305666..76ae768e6ef5 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -38,6 +38,7 @@ def __init__(self, hs: "HomeServer"): self._profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() self._server_name = hs.hostname + self._third_party_rules = hs.get_third_party_event_rules() # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False @@ -135,9 +136,13 @@ async def deactivate_account( if erase_data: user = UserID.from_string(user_id) # Remove avatar URL from this user - await self._profile_handler.set_avatar_url(user, requester, "", by_admin) + await self._profile_handler.set_avatar_url( + user, requester, "", by_admin, deactivation=True + ) # Remove displayname from this user - await self._profile_handler.set_displayname(user, requester, "", by_admin) + await self._profile_handler.set_displayname( + user, requester, "", by_admin, deactivation=True + ) logger.info("Marking %s as erased", user_id) await self.store.mark_user_erased(user_id) @@ -160,6 +165,13 @@ async def deactivate_account( # Remove account data (including ignored users and push rules). await self.store.purge_account_data_for_user(user_id) + # Let modules know the user has been deactivated. + await self._third_party_rules.on_user_deactivation_status_changed( + user_id, + True, + by_admin, + ) + return identity_server_supports_unbinding async def _reject_pending_invites_for_user(self, user_id: str) -> None: @@ -264,6 +276,10 @@ async def activate_account(self, user_id: str) -> None: # Mark the user as active. await self.store.set_user_deactivated_status(user_id, False) + await self._third_party_rules.on_user_deactivation_status_changed( + user_id, False, True + ) + # Add the user to the directory, if necessary. Note that # this must be done after the user is re-activated, because # deactivated users are excluded from the user directory. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index dd27f0accc70..6554c0d3c209 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -71,6 +71,8 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.config.server.server_name + self._third_party_rules = hs.get_third_party_event_rules() + if hs.config.worker.run_background_tasks: self.clock.looping_call( self._update_remote_profile_cache, self.PROFILE_UPDATE_MS @@ -171,6 +173,7 @@ async def set_displayname( requester: Requester, new_displayname: str, by_admin: bool = False, + deactivation: bool = False, ) -> None: """Set the displayname of a user @@ -179,6 +182,7 @@ async def set_displayname( requester: The user attempting to make this change. new_displayname: The displayname to give this user. by_admin: Whether this change was made by an administrator. + deactivation: Whether this change was made while deactivating the user. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -227,6 +231,10 @@ async def set_displayname( target_user.to_string(), profile ) + await self._third_party_rules.on_profile_update( + target_user.to_string(), profile, by_admin, deactivation + ) + await self._update_join_states(requester, target_user) async def get_avatar_url(self, target_user: UserID) -> Optional[str]: @@ -261,6 +269,7 @@ async def set_avatar_url( requester: Requester, new_avatar_url: str, by_admin: bool = False, + deactivation: bool = False, ) -> None: """Set a new avatar URL for a user. @@ -269,6 +278,7 @@ async def set_avatar_url( requester: The user attempting to make this change. new_avatar_url: The avatar URL to give this user. by_admin: Whether this change was made by an administrator. + deactivation: Whether this change was made while deactivating the user. """ if not self.hs.is_mine(target_user): raise SynapseError(400, "User is not hosted on this homeserver") @@ -315,6 +325,10 @@ async def set_avatar_url( target_user.to_string(), profile ) + await self._third_party_rules.on_profile_update( + target_user.to_string(), profile, by_admin, deactivation + ) + await self._update_join_states(requester, target_user) @cached() diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 902916d80056..7e469318695e 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -145,6 +145,7 @@ "JsonDict", "EventBase", "StateMap", + "ProfileInfo", ] logger = logging.getLogger(__name__) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 9cca9edd3026..bfc04785b7b2 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -15,12 +15,12 @@ from typing import TYPE_CHECKING, Dict, Optional, Tuple from unittest.mock import Mock -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, LoginType, Membership from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin -from synapse.rest.client import login, room +from synapse.rest.client import account, login, profile, room from synapse.types import JsonDict, Requester, StateMap from synapse.util.frozenutils import unfreeze @@ -80,6 +80,8 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): admin.register_servlets, login.register_servlets, room.register_servlets, + profile.register_servlets, + account.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -530,3 +532,216 @@ def _update_power_levels(self, event_default: int = 0): }, tok=self.tok, ) + + def test_on_profile_update(self): + """Tests that the on_profile_update module callback is correctly called on + profile updates. + """ + displayname = "Foo" + avatar_url = "mxc://matrix.org/oWQDvfewxmlRaRCkVbfetyEo" + + # Register a mock callback. + m = Mock(return_value=make_awaitable(None)) + self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m) + + # Change the display name. + channel = self.make_request( + "PUT", + "/_matrix/client/v3/profile/%s/displayname" % self.user_id, + {"displayname": displayname}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the callback has been called once for our user. + m.assert_called_once() + args = m.call_args[0] + self.assertEqual(args[0], self.user_id) + + # Test that by_admin is False. + self.assertFalse(args[2]) + # Test that deactivation is False. + self.assertFalse(args[3]) + + # Check that we've got the right profile data. + profile_info = args[1] + self.assertEqual(profile_info.display_name, displayname) + self.assertIsNone(profile_info.avatar_url) + + # Change the avatar. + channel = self.make_request( + "PUT", + "/_matrix/client/v3/profile/%s/avatar_url" % self.user_id, + {"avatar_url": avatar_url}, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the callback has been called once for our user. + self.assertEqual(m.call_count, 2) + args = m.call_args[0] + self.assertEqual(args[0], self.user_id) + + # Test that by_admin is False. + self.assertFalse(args[2]) + # Test that deactivation is False. + self.assertFalse(args[3]) + + # Check that we've got the right profile data. + profile_info = args[1] + self.assertEqual(profile_info.display_name, displayname) + self.assertEqual(profile_info.avatar_url, avatar_url) + + def test_on_profile_update_admin(self): + """Tests that the on_profile_update module callback is correctly called on + profile updates triggered by a server admin. + """ + displayname = "Foo" + avatar_url = "mxc://matrix.org/oWQDvfewxmlRaRCkVbfetyEo" + + # Register a mock callback. + m = Mock(return_value=make_awaitable(None)) + self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append(m) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Change a user's profile. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % self.user_id, + {"displayname": displayname, "avatar_url": avatar_url}, + access_token=admin_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the callback has been called twice (since we update the display name + # and avatar separately). + self.assertEqual(m.call_count, 2) + + # Get the arguments for the last call and check it's about the right user. + args = m.call_args[0] + self.assertEqual(args[0], self.user_id) + + # Check that by_admin is True. + self.assertTrue(args[2]) + # Test that deactivation is False. + self.assertFalse(args[3]) + + # Check that we've got the right profile data. + profile_info = args[1] + self.assertEqual(profile_info.display_name, displayname) + self.assertEqual(profile_info.avatar_url, avatar_url) + + def test_on_user_deactivation_status_changed(self): + """Tests that the on_user_deactivation_status_changed module callback is called + correctly when processing a user's deactivation. + """ + # Register a mocked callback. + deactivation_mock = Mock(return_value=make_awaitable(None)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._on_user_deactivation_status_changed_callbacks.append( + deactivation_mock, + ) + # Also register a mocked callback for profile updates, to check that the + # deactivation code calls it in a way that let modules know the user is being + # deactivated. + profile_mock = Mock(return_value=make_awaitable(None)) + self.hs.get_third_party_event_rules()._on_profile_update_callbacks.append( + profile_mock, + ) + + # Register a user that we'll deactivate. + user_id = self.register_user("altan", "password") + tok = self.login("altan", "password") + + # Deactivate that user. + channel = self.make_request( + "POST", + "/_matrix/client/v3/account/deactivate", + { + "auth": { + "type": LoginType.PASSWORD, + "password": "password", + "identifier": { + "type": "m.id.user", + "user": user_id, + }, + }, + "erase": True, + }, + access_token=tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the mock was called once. + deactivation_mock.assert_called_once() + args = deactivation_mock.call_args[0] + + # Check that the mock was called with the right user ID, and with a True + # deactivated flag and a False by_admin flag. + self.assertEqual(args[0], user_id) + self.assertTrue(args[1]) + self.assertFalse(args[2]) + + # Check that the profile update callback was called twice (once for the display + # name and once for the avatar URL), and that the "deactivation" boolean is true. + self.assertEqual(profile_mock.call_count, 2) + args = profile_mock.call_args[0] + self.assertTrue(args[3]) + + def test_on_user_deactivation_status_changed_admin(self): + """Tests that the on_user_deactivation_status_changed module callback is called + correctly when processing a user's deactivation triggered by a server admin as + well as a reactivation. + """ + # Register a mock callback. + m = Mock(return_value=make_awaitable(None)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._on_user_deactivation_status_changed_callbacks.append(m) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Register a user that we'll deactivate. + user_id = self.register_user("altan", "password") + + # Deactivate the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + {"deactivated": True}, + access_token=admin_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the mock was called once. + m.assert_called_once() + args = m.call_args[0] + + # Check that the mock was called with the right user ID, and with True deactivated + # and by_admin flags. + self.assertEqual(args[0], user_id) + self.assertTrue(args[1]) + self.assertTrue(args[2]) + + # Reactivate the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + {"deactivated": False, "password": "hackme"}, + access_token=admin_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the mock was called once. + self.assertEqual(m.call_count, 2) + args = m.call_args[0] + + # Check that the mock was called with the right user ID, and with a False + # deactivated flag and a True by_admin flag. + self.assertEqual(args[0], user_id) + self.assertFalse(args[1]) + self.assertTrue(args[2]) From 4d6b6c17c860a6ef258e513d841dbda6ea151cbd Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 1 Mar 2022 15:27:15 +0000 Subject: [PATCH 320/474] Fix rare error in `ReadWriteLock` when writers complete immediately (#12105) Signed-off-by: Sean Quah --- changelog.d/12105.bugfix | 1 + synapse/util/async_helpers.py | 5 ++++- tests/util/test_rwlock.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12105.bugfix diff --git a/changelog.d/12105.bugfix b/changelog.d/12105.bugfix new file mode 100644 index 000000000000..f42e63e01fb7 --- /dev/null +++ b/changelog.d/12105.bugfix @@ -0,0 +1 @@ +Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 81320b89721e..60c03a66fd16 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -555,7 +555,10 @@ def _ctx_manager() -> Iterator[None]: finally: with PreserveLoggingContext(): new_defer.callback(None) - if self.key_to_current_writer[key] == new_defer: + # `self.key_to_current_writer[key]` may be missing if there was another + # writer waiting for us and it completed entirely within the + # `new_defer.callback()` call above. + if self.key_to_current_writer.get(key) == new_defer: self.key_to_current_writer.pop(key) return _ctx_manager() diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index a10071c70fc9..0774625b8551 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -13,6 +13,7 @@ # limitations under the License. from twisted.internet import defer +from twisted.internet.defer import Deferred from synapse.util.async_helpers import ReadWriteLock @@ -83,3 +84,32 @@ def test_rwlock(self): self.assertTrue(d.called) with d.result: pass + + def test_lock_handoff_to_nonblocking_writer(self): + """Test a writer handing the lock to another writer that completes instantly.""" + rwlock = ReadWriteLock() + key = "key" + + unblock: "Deferred[None]" = Deferred() + + async def blocking_write(): + with await rwlock.write(key): + await unblock + + async def nonblocking_write(): + with await rwlock.write(key): + pass + + d1 = defer.ensureDeferred(blocking_write()) + d2 = defer.ensureDeferred(nonblocking_write()) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + # Unblock the first writer. The second writer will complete without blocking. + unblock.callback(None) + self.assertTrue(d1.called) + self.assertTrue(d2.called) + + # The `ReadWriteLock` should operate as normal. + d3 = defer.ensureDeferred(nonblocking_write()) + self.assertTrue(d3.called) From 313581e4e9bc2ec3d59ccff86e3a0c02661f71c4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Mar 2022 17:44:41 +0000 Subject: [PATCH 321/474] Use importlib.metadata to read requirements (#12088) * Pull runtime dep checks into their own module * Reimplement `check_requirements` using `importlib` I've tried to make this clearer. We start by working out which of Synapse's requirements we need to be installed here and now. I was surprised that there wasn't an easier way to see which packages were installed by a given extra. I've pulled out the error messages into functions that deal with "is this for an extra or not". And I've rearranged the loop over two different sets of requirements into one loop with a "must be instaled" flag. I hope you agree that this is clearer. * Test cases --- changelog.d/12088.misc | 1 + synapse/app/__init__.py | 6 +- synapse/app/homeserver.py | 2 +- synapse/config/cache.py | 2 +- synapse/config/metrics.py | 2 +- synapse/config/oidc.py | 2 +- synapse/config/redis.py | 2 +- synapse/config/repository.py | 2 +- synapse/config/saml2.py | 2 +- synapse/config/tracer.py | 2 +- synapse/python_dependencies.py | 107 +--------------------- synapse/util/check_dependencies.py | 127 ++++++++++++++++++++++++++ tests/util/test_check_dependencies.py | 95 +++++++++++++++++++ 13 files changed, 237 insertions(+), 115 deletions(-) create mode 100644 changelog.d/12088.misc create mode 100644 synapse/util/check_dependencies.py create mode 100644 tests/util/test_check_dependencies.py diff --git a/changelog.d/12088.misc b/changelog.d/12088.misc new file mode 100644 index 000000000000..ce4213650c5e --- /dev/null +++ b/changelog.d/12088.misc @@ -0,0 +1 @@ +Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index ee51480a9e64..334c3d2c178d 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -15,13 +15,13 @@ import sys from typing import Container -from synapse import python_dependencies # noqa: E402 +from synapse.util import check_dependencies logger = logging.getLogger(__name__) try: - python_dependencies.check_requirements() -except python_dependencies.DependencyException as e: + check_dependencies.check_requirements() +except check_dependencies.DependencyException as e: sys.stderr.writelines( e.message # noqa: B306, DependencyException.message is a property ) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b9931001c25e..a6789a840ede 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -59,7 +59,6 @@ from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource @@ -70,6 +69,7 @@ from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.util.check_dependencies import check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 387ac6d115e3..9a68da9c336a 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -20,7 +20,7 @@ import attr -from synapse.python_dependencies import DependencyException, check_requirements +from synapse.util.check_dependencies import DependencyException, check_requirements from ._base import Config, ConfigError diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 1cc26e757812..f62292ecf6cb 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -15,7 +15,7 @@ import attr -from synapse.python_dependencies import DependencyException, check_requirements +from synapse.util.check_dependencies import DependencyException, check_requirements from ._base import Config, ConfigError diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index e783b1131501..f7e4f9ef22b1 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -20,11 +20,11 @@ from synapse.config._util import validate_config from synapse.config.sso import SsoAttributeRequirement -from synapse.python_dependencies import DependencyException, check_requirements from synapse.types import JsonDict from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_mxc_uri +from ..util.check_dependencies import DependencyException, check_requirements from ._base import Config, ConfigError, read_file DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider" diff --git a/synapse/config/redis.py b/synapse/config/redis.py index 33104af73472..bdb1aac3a218 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -13,7 +13,7 @@ # limitations under the License. from synapse.config._base import Config -from synapse.python_dependencies import check_requirements +from synapse.util.check_dependencies import check_requirements class RedisConfig(Config): diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 1980351e7711..0a0d901bfb65 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -20,8 +20,8 @@ import attr from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set -from synapse.python_dependencies import DependencyException, check_requirements from synapse.types import JsonDict +from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module from ._base import Config, ConfigError diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index ec9d9f65e7b5..43c456d5c675 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -17,8 +17,8 @@ from typing import Any, List, Set from synapse.config.sso import SsoAttributeRequirement -from synapse.python_dependencies import DependencyException, check_requirements from synapse.types import JsonDict +from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module, load_python_module from ._base import Config, ConfigError diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 21b9a88353a7..7aff618ea660 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -14,7 +14,7 @@ from typing import Set -from synapse.python_dependencies import DependencyException, check_requirements +from synapse.util.check_dependencies import DependencyException, check_requirements from ._base import Config, ConfigError diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f43fbb5842fa..8f48a33936c1 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -17,14 +17,7 @@ import itertools import logging -from typing import List, Set - -from pkg_resources import ( - DistributionNotFound, - Requirement, - VersionConflict, - get_provider, -) +from typing import Set logger = logging.getLogger(__name__) @@ -90,6 +83,8 @@ # ijson 3.1.4 fixes a bug with "." in property names "ijson>=3.1.4", "matrix-common~=1.1.0", + # For runtime introspection of our dependencies + "packaging~=21.3", ] CONDITIONAL_REQUIREMENTS = { @@ -144,102 +139,6 @@ def list_requirements(): return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS) -class DependencyException(Exception): - @property - def message(self): - return "\n".join( - [ - "Missing Requirements: %s" % (", ".join(self.dependencies),), - "To install run:", - " pip install --upgrade --force %s" % (" ".join(self.dependencies),), - "", - ] - ) - - @property - def dependencies(self): - for i in self.args[0]: - yield '"' + i + '"' - - -def check_requirements(for_feature=None): - deps_needed = [] - errors = [] - - if for_feature: - reqs = CONDITIONAL_REQUIREMENTS[for_feature] - else: - reqs = REQUIREMENTS - - for dependency in reqs: - try: - _check_requirement(dependency) - except VersionConflict as e: - deps_needed.append(dependency) - errors.append( - "Needed %s, got %s==%s" - % ( - dependency, - e.dist.project_name, # type: ignore[attr-defined] # noqa - e.dist.version, # type: ignore[attr-defined] # noqa - ) - ) - except DistributionNotFound: - deps_needed.append(dependency) - if for_feature: - errors.append( - "Needed %s for the '%s' feature but it was not installed" - % (dependency, for_feature) - ) - else: - errors.append("Needed %s but it was not installed" % (dependency,)) - - if not for_feature: - # Check the optional dependencies are up to date. We allow them to not be - # installed. - OPTS: List[str] = sum(CONDITIONAL_REQUIREMENTS.values(), []) - - for dependency in OPTS: - try: - _check_requirement(dependency) - except VersionConflict as e: - deps_needed.append(dependency) - errors.append( - "Needed optional %s, got %s==%s" - % ( - dependency, - e.dist.project_name, # type: ignore[attr-defined] # noqa - e.dist.version, # type: ignore[attr-defined] # noqa - ) - ) - except DistributionNotFound: - # If it's not found, we don't care - pass - - if deps_needed: - for err in errors: - logging.error(err) - - raise DependencyException(deps_needed) - - -def _check_requirement(dependency_string): - """Parses a dependency string, and checks if the specified requirement is installed - - Raises: - VersionConflict if the requirement is installed, but with the the wrong version - DistributionNotFound if nothing is found to provide the requirement - """ - req = Requirement.parse(dependency_string) - - # first check if the markers specify that this requirement needs installing - if req.marker is not None and not req.marker.evaluate(): - # not required for this environment - return - - get_provider(req) - - if __name__ == "__main__": import sys diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py new file mode 100644 index 000000000000..3a1f6b3c752b --- /dev/null +++ b/synapse/util/check_dependencies.py @@ -0,0 +1,127 @@ +import logging +from typing import Iterable, NamedTuple, Optional + +from packaging.requirements import Requirement + +DISTRIBUTION_NAME = "matrix-synapse" + +try: + from importlib import metadata +except ImportError: + import importlib_metadata as metadata # type: ignore[no-redef] + + +class DependencyException(Exception): + @property + def message(self) -> str: + return "\n".join( + [ + "Missing Requirements: %s" % (", ".join(self.dependencies),), + "To install run:", + " pip install --upgrade --force %s" % (" ".join(self.dependencies),), + "", + ] + ) + + @property + def dependencies(self) -> Iterable[str]: + for i in self.args[0]: + yield '"' + i + '"' + + +EXTRAS = set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) + + +class Dependency(NamedTuple): + requirement: Requirement + must_be_installed: bool + + +def _generic_dependencies() -> Iterable[Dependency]: + """Yield pairs (requirement, must_be_installed).""" + requirements = metadata.requires(DISTRIBUTION_NAME) + assert requirements is not None + for raw_requirement in requirements: + req = Requirement(raw_requirement) + # https://packaging.pypa.io/en/latest/markers.html#usage notes that + # > Evaluating an extra marker with no environment is an error + # so we pass in a dummy empty extra value here. + must_be_installed = req.marker is None or req.marker.evaluate({"extra": ""}) + yield Dependency(req, must_be_installed) + + +def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: + """Yield additional dependencies needed for a given `extra`.""" + requirements = metadata.requires(DISTRIBUTION_NAME) + assert requirements is not None + for raw_requirement in requirements: + req = Requirement(raw_requirement) + # Exclude mandatory deps by only selecting deps needed with this extra. + if ( + req.marker is not None + and req.marker.evaluate({"extra": extra}) + and not req.marker.evaluate({"extra": ""}) + ): + yield Dependency(req, True) + + +def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str: + if extra: + return f"Need {requirement.name} for {extra}, but it is not installed" + else: + return f"Need {requirement.name}, but it is not installed" + + +def _incorrect_version( + requirement: Requirement, got: str, extra: Optional[str] = None +) -> str: + if extra: + return f"Need {requirement} for {extra}, but got {requirement.name}=={got}" + else: + return f"Need {requirement}, but got {requirement.name}=={got}" + + +def check_requirements(extra: Optional[str] = None) -> None: + """Check Synapse's dependencies are present and correctly versioned. + + If provided, `extra` must be the name of an pacakging extra (e.g. "saml2" in + `pip install matrix-synapse[saml2]`). + + If `extra` is None, this function checks that + - all mandatory dependencies are installed and correctly versioned, and + - each optional dependency that's installed is correctly versioned. + + If `extra` is not None, this function checks that + - the dependencies needed for that extra are installed and correctly versioned. + + :raises DependencyException: if a dependency is missing or incorrectly versioned. + :raises ValueError: if this extra does not exist. + """ + # First work out which dependencies are required, and which are optional. + if extra is None: + dependencies = _generic_dependencies() + elif extra in EXTRAS: + dependencies = _dependencies_for_extra(extra) + else: + raise ValueError(f"Synapse does not provide the feature '{extra}'") + + deps_unfulfilled = [] + errors = [] + + for (requirement, must_be_installed) in dependencies: + try: + dist: metadata.Distribution = metadata.distribution(requirement.name) + except metadata.PackageNotFoundError: + if must_be_installed: + deps_unfulfilled.append(requirement.name) + errors.append(_not_installed(requirement, extra)) + else: + if not requirement.specifier.contains(dist.version): + deps_unfulfilled.append(requirement.name) + errors.append(_incorrect_version(requirement, dist.version, extra)) + + if deps_unfulfilled: + for err in errors: + logging.error(err) + + raise DependencyException(deps_unfulfilled) diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py new file mode 100644 index 000000000000..3c072522525a --- /dev/null +++ b/tests/util/test_check_dependencies.py @@ -0,0 +1,95 @@ +from contextlib import contextmanager +from typing import Generator, Optional +from unittest.mock import patch + +from synapse.util.check_dependencies import ( + DependencyException, + check_requirements, + metadata, +) + +from tests.unittest import TestCase + + +class DummyDistribution(metadata.Distribution): + def __init__(self, version: str): + self._version = version + + @property + def version(self): + return self._version + + def locate_file(self, path): + raise NotImplementedError() + + def read_text(self, filename): + raise NotImplementedError() + + +old = DummyDistribution("0.1.2") +new = DummyDistribution("1.2.3") + +# could probably use stdlib TestCase --- no need for twisted here + + +class TestDependencyChecker(TestCase): + @contextmanager + def mock_installed_package( + self, distribution: Optional[DummyDistribution] + ) -> Generator[None, None, None]: + """Pretend that looking up any distribution yields the given `distribution`.""" + + def mock_distribution(name: str): + if distribution is None: + raise metadata.PackageNotFoundError + else: + return distribution + + with patch( + "synapse.util.check_dependencies.metadata.distribution", + mock_distribution, + ): + yield + + def test_mandatory_dependency(self) -> None: + """Complain if a required package is missing or old.""" + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1"], + ): + with self.mock_installed_package(None): + self.assertRaises(DependencyException, check_requirements) + with self.mock_installed_package(old): + self.assertRaises(DependencyException, check_requirements) + with self.mock_installed_package(new): + # should not raise + check_requirements() + + def test_generic_check_of_optional_dependency(self) -> None: + """Complain if an optional package is old.""" + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'cool-extra'"], + ): + with self.mock_installed_package(None): + # should not raise + check_requirements() + with self.mock_installed_package(old): + self.assertRaises(DependencyException, check_requirements) + with self.mock_installed_package(new): + # should not raise + check_requirements() + + def test_check_for_extra_dependencies(self) -> None: + """Complain if a package required for an extra is missing or old.""" + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'cool-extra'"], + ), patch("synapse.util.check_dependencies.EXTRAS", {"cool-extra"}): + with self.mock_installed_package(None): + self.assertRaises(DependencyException, check_requirements, "cool-extra") + with self.mock_installed_package(old): + self.assertRaises(DependencyException, check_requirements, "cool-extra") + with self.mock_installed_package(new): + # should not raise + check_requirements() From 5f62a094de10b4c4382908231128dace833a1195 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Mar 2022 19:47:02 +0000 Subject: [PATCH 322/474] Detox, part 1 of N (#12119) * Don't use `tox` for `check-sampleconfig` * Don't use `tox` for check-newsfragment --- .github/workflows/tests.yml | 13 ++++++++++--- changelog.d/12119.misc | 1 + scripts-dev/check-newsfragment | 2 +- tox.ini | 10 ---------- 4 files changed, 12 insertions(+), 14 deletions(-) create mode 100644 changelog.d/12119.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bbf1033bdd2c..e9e427732239 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -10,12 +10,19 @@ concurrency: cancel-in-progress: true jobs: + check-sampleconfig: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: pip install -e . + - run: scripts-dev/generate_sample_config --check + lint: runs-on: ubuntu-latest strategy: matrix: toxenv: - - "check-sampleconfig" - "check_codestyle" - "check_isort" - "mypy" @@ -43,7 +50,7 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - uses: actions/setup-python@v2 - - run: pip install tox + - run: "pip install 'towncrier>=18.6.0rc1'" - run: scripts-dev/check-newsfragment env: PULL_REQUEST_NUMBER: ${{ github.event.number }} @@ -51,7 +58,7 @@ jobs: # Dummy step to gate other tests on without repeating the whole list linting-done: if: ${{ !cancelled() }} # Run this even if prior jobs were skipped - needs: [lint, lint-crlf, lint-newsfile] + needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig] runs-on: ubuntu-latest steps: - run: "true" diff --git a/changelog.d/12119.misc b/changelog.d/12119.misc new file mode 100644 index 000000000000..f02d140f3871 --- /dev/null +++ b/changelog.d/12119.misc @@ -0,0 +1 @@ +Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment index c764011d6adb..493558ad651b 100755 --- a/scripts-dev/check-newsfragment +++ b/scripts-dev/check-newsfragment @@ -35,7 +35,7 @@ CONTRIBUTING_GUIDE_TEXT="!! Please see the contributing guide for help writing y https://github.com/matrix-org/synapse/blob/develop/CONTRIBUTING.md#changelog" # If check-newsfragment returns a non-zero exit code, print the contributing guide and exit -tox -qe check-newsfragment || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1) +python -m towncrier.check --compare-with=origin/develop || (echo -e "$CONTRIBUTING_GUIDE_TEXT" >&2 && exit 1) echo echo "--------------------------" diff --git a/tox.ini b/tox.ini index 436ecf7552f7..04b972e2c58c 100644 --- a/tox.ini +++ b/tox.ini @@ -168,16 +168,6 @@ commands = extras = lint commands = isort -c --df {[base]lint_targets} -[testenv:check-newsfragment] -skip_install = true -usedevelop = false -deps = towncrier>=18.6.0rc1 -commands = - python -m towncrier.check --compare-with=origin/develop - -[testenv:check-sampleconfig] -commands = {toxinidir}/scripts-dev/generate_sample_config --check - [testenv:combine] skip_install = true usedevelop = false From 8e56a1b73c9819ea4bddbe6a4734966e70b3b92c Mon Sep 17 00:00:00 2001 From: lukasdenk <63459921+lukasdenk@users.noreply.github.com> Date: Wed, 2 Mar 2022 11:35:34 +0100 Subject: [PATCH 323/474] Make get_room_version use cached get_room_version_id. (#11808) --- changelog.d/11808.misc | 1 + synapse/storage/databases/main/state.py | 27 ++++++++++++------------- tests/handlers/test_room_summary.py | 5 ++++- 3 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 changelog.d/11808.misc diff --git a/changelog.d/11808.misc b/changelog.d/11808.misc new file mode 100644 index 000000000000..cdc5fc75b75b --- /dev/null +++ b/changelog.d/11808.misc @@ -0,0 +1 @@ +Make method `get_room_version` use cached `get_room_version_id`. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 2fb3e65192f5..417aef1dbcf3 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -42,6 +42,16 @@ MAX_STATE_DELTA_HOPS = 100 +def _retrieve_and_check_room_version(room_id: str, room_version_id: str) -> RoomVersion: + v = KNOWN_ROOM_VERSIONS.get(room_version_id) + if not v: + raise UnsupportedRoomVersionError( + "Room %s uses a room version %s which is no longer supported" + % (room_id, room_version_id) + ) + return v + + # this inherits from EventsWorkerStore because it calls self.get_events class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """The parts of StateGroupStore that can be called from workers.""" @@ -62,11 +72,8 @@ async def get_room_version(self, room_id: str) -> RoomVersion: Typically this happens if support for the room's version has been removed from Synapse. """ - return await self.db_pool.runInteraction( - "get_room_version_txn", - self.get_room_version_txn, - room_id, - ) + room_version_id = await self.get_room_version_id(room_id) + return _retrieve_and_check_room_version(room_id, room_version_id) def get_room_version_txn( self, txn: LoggingTransaction, room_id: str @@ -82,15 +89,7 @@ def get_room_version_txn( removed from Synapse. """ room_version_id = self.get_room_version_id_txn(txn, room_id) - v = KNOWN_ROOM_VERSIONS.get(room_version_id) - - if not v: - raise UnsupportedRoomVersionError( - "Room %s uses a room version %s which is no longer supported" - % (room_id, room_version_id) - ) - - return v + return _retrieve_and_check_room_version(room_id, room_version_id) @cached(max_entries=10000) async def get_room_version_id(self, room_id: str) -> str: diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index b33ff94a3999..cff07a8973b3 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -658,7 +658,7 @@ def test_max_depth(self): def test_unknown_room_version(self): """ - If an room with an unknown room version is encountered it should not cause + If a room with an unknown room version is encountered it should not cause the entire summary to skip. """ # Poke the database and update the room version to an unknown one. @@ -670,6 +670,9 @@ def test_unknown_room_version(self): desc="updated-room-version", ) ) + # Invalidate method so that it returns the currently updated version + # instead of the cached version. + self.hs.get_datastores().main.get_room_version_id.invalidate((self.room,)) # The result should have only the space, along with a link from space -> room. expected = [(self.space, [self.room])] From c7b2f1ccdc412c4f5f07f4fe630d2c2040caf93d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Mar 2022 10:37:04 +0000 Subject: [PATCH 324/474] Back out in-flight state caching changes. (#12126) --- changelog.d/10870.misc | 1 - changelog.d/11608.misc | 1 - changelog.d/11610.misc | 1 - changelog.d/12033.misc | 1 - changelog.d/12126.removal | 1 + synapse/storage/databases/state/store.py | 243 ++--------- tests/storage/databases/test_state_store.py | 454 -------------------- 7 files changed, 26 insertions(+), 676 deletions(-) delete mode 100644 changelog.d/10870.misc delete mode 100644 changelog.d/11608.misc delete mode 100644 changelog.d/11610.misc delete mode 100644 changelog.d/12033.misc create mode 100644 changelog.d/12126.removal delete mode 100644 tests/storage/databases/test_state_store.py diff --git a/changelog.d/10870.misc b/changelog.d/10870.misc deleted file mode 100644 index 3af049b96961..000000000000 --- a/changelog.d/10870.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/11608.misc b/changelog.d/11608.misc deleted file mode 100644 index 3af049b96961..000000000000 --- a/changelog.d/11608.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/11610.misc b/changelog.d/11610.misc deleted file mode 100644 index 3af049b96961..000000000000 --- a/changelog.d/11610.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/12033.misc b/changelog.d/12033.misc deleted file mode 100644 index 3af049b96961..000000000000 --- a/changelog.d/12033.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate in-flight requests in `_get_state_for_groups`. diff --git a/changelog.d/12126.removal b/changelog.d/12126.removal new file mode 100644 index 000000000000..8c8bf6ee7ede --- /dev/null +++ b/changelog.d/12126.removal @@ -0,0 +1 @@ +Back out in-flight state caching changes. \ No newline at end of file diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index dadf3d1e3ab3..7614d76ac646 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -13,24 +13,11 @@ # limitations under the License. import logging -from typing import ( - TYPE_CHECKING, - Collection, - Dict, - Iterable, - Optional, - Sequence, - Set, - Tuple, -) +from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple import attr -from sortedcontainers import SortedDict - -from twisted.internet import defer from synapse.api.constants import EventTypes -from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -42,12 +29,6 @@ from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator from synapse.types import MutableStateMap, StateKey, StateMap -from synapse.util import unwrapFirstError -from synapse.util.async_helpers import ( - AbstractObservableDeferred, - ObservableDeferred, - yieldable_gather_results, -) from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache @@ -56,8 +37,8 @@ logger = logging.getLogger(__name__) + MAX_STATE_DELTA_HOPS = 100 -MAX_INFLIGHT_REQUESTS_PER_GROUP = 5 @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -73,24 +54,6 @@ def __len__(self) -> int: return len(self.delta_ids) if self.delta_ids else 0 -def state_filter_rough_priority_comparator( - state_filter: StateFilter, -) -> Tuple[int, int]: - """ - Returns a comparable value that roughly indicates the relative size of this - state filter compared to others. - 'Larger' state filters should sort first when using ascending order, so - this is essentially the opposite of 'size'. - It should be treated as a rough guide only and should not be interpreted to - have any particular meaning. The representation may also change - - The current implementation returns a tuple of the form: - * -1 for include_others, 0 otherwise - * -(number of entries in state_filter.types) - """ - return -int(state_filter.include_others), -len(state_filter.types) - - class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """A data store for fetching/storing state groups.""" @@ -143,12 +106,6 @@ def __init__( 500000, ) - # Current ongoing get_state_for_groups in-flight requests - # {group ID -> {StateFilter -> ObservableDeferred}} - self._state_group_inflight_requests: Dict[ - int, SortedDict[StateFilter, AbstractObservableDeferred[StateMap[str]]] - ] = {} - def get_max_state_group_txn(txn: Cursor) -> int: txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups") return txn.fetchone()[0] # type: ignore @@ -200,7 +157,7 @@ def _get_state_group_delta_txn(txn: LoggingTransaction) -> _GetStateGroupDelta: ) async def _get_state_groups_from_groups( - self, groups: Sequence[int], state_filter: StateFilter + self, groups: List[int], state_filter: StateFilter ) -> Dict[int, StateMap[str]]: """Returns the state groups for a given set of groups from the database, filtering on types of state events. @@ -271,170 +228,6 @@ def _get_state_for_group_using_cache( return state_filter.filter_state(state_dict_ids), not missing_types - def _get_state_for_group_gather_inflight_requests( - self, group: int, state_filter_left_over: StateFilter - ) -> Tuple[Sequence[AbstractObservableDeferred[StateMap[str]]], StateFilter]: - """ - Attempts to gather in-flight requests and re-use them to retrieve state - for the given state group, filtered with the given state filter. - - If there are more than MAX_INFLIGHT_REQUESTS_PER_GROUP in-flight requests, - and there *still* isn't enough information to complete the request by solely - reusing others, a full state filter will be requested to ensure that subsequent - requests can reuse this request. - - Used as part of _get_state_for_group_using_inflight_cache. - - Returns: - Tuple of two values: - A sequence of ObservableDeferreds to observe - A StateFilter representing what else needs to be requested to fulfill the request - """ - - inflight_requests = self._state_group_inflight_requests.get(group) - if inflight_requests is None: - # no requests for this group, need to retrieve it all ourselves - return (), state_filter_left_over - - # The list of ongoing requests which will help narrow the current request. - reusable_requests = [] - - # Iterate over existing requests in roughly biggest-first order. - for request_state_filter in inflight_requests: - request_deferred = inflight_requests[request_state_filter] - new_state_filter_left_over = state_filter_left_over.approx_difference( - request_state_filter - ) - if new_state_filter_left_over == state_filter_left_over: - # Reusing this request would not gain us anything, so don't bother. - continue - - reusable_requests.append(request_deferred) - state_filter_left_over = new_state_filter_left_over - if state_filter_left_over == StateFilter.none(): - # we have managed to collect enough of the in-flight requests - # to cover our StateFilter and give us the state we need. - break - - if ( - state_filter_left_over != StateFilter.none() - and len(inflight_requests) >= MAX_INFLIGHT_REQUESTS_PER_GROUP - ): - # There are too many requests for this group. - # To prevent even more from building up, we request the whole - # state filter to guarantee that we can be reused by any subsequent - # requests for this state group. - return (), StateFilter.all() - - return reusable_requests, state_filter_left_over - - async def _get_state_for_group_fire_request( - self, group: int, state_filter: StateFilter - ) -> StateMap[str]: - """ - Fires off a request to get the state at a state group, - potentially filtering by type and/or state key. - - This request will be tracked in the in-flight request cache and automatically - removed when it is finished. - - Used as part of _get_state_for_group_using_inflight_cache. - - Args: - group: ID of the state group for which we want to get state - state_filter: the state filter used to fetch state from the database - """ - cache_sequence_nm = self._state_group_cache.sequence - cache_sequence_m = self._state_group_members_cache.sequence - - # Help the cache hit ratio by expanding the filter a bit - db_state_filter = state_filter.return_expanded() - - async def _the_request() -> StateMap[str]: - group_to_state_dict = await self._get_state_groups_from_groups( - (group,), state_filter=db_state_filter - ) - - # Now let's update the caches - self._insert_into_cache( - group_to_state_dict, - db_state_filter, - cache_seq_num_members=cache_sequence_m, - cache_seq_num_non_members=cache_sequence_nm, - ) - - # Remove ourselves from the in-flight cache - group_request_dict = self._state_group_inflight_requests[group] - del group_request_dict[db_state_filter] - if not group_request_dict: - # If there are no more requests in-flight for this group, - # clean up the cache by removing the empty dictionary - del self._state_group_inflight_requests[group] - - return group_to_state_dict[group] - - # We don't immediately await the result, so must use run_in_background - # But we DO await the result before the current log context (request) - # finishes, so don't need to run it as a background process. - request_deferred = run_in_background(_the_request) - observable_deferred = ObservableDeferred(request_deferred, consumeErrors=True) - - # Insert the ObservableDeferred into the cache - group_request_dict = self._state_group_inflight_requests.setdefault( - group, SortedDict(state_filter_rough_priority_comparator) - ) - group_request_dict[db_state_filter] = observable_deferred - - return await make_deferred_yieldable(observable_deferred.observe()) - - async def _get_state_for_group_using_inflight_cache( - self, group: int, state_filter: StateFilter - ) -> MutableStateMap[str]: - """ - Gets the state at a state group, potentially filtering by type and/or - state key. - - 1. Calls _get_state_for_group_gather_inflight_requests to gather any - ongoing requests which might overlap with the current request. - 2. Fires a new request, using _get_state_for_group_fire_request, - for any state which cannot be gathered from ongoing requests. - - Args: - group: ID of the state group for which we want to get state - state_filter: the state filter used to fetch state from the database - Returns: - state map - """ - - # first, figure out whether we can re-use any in-flight requests - # (and if so, what would be left over) - ( - reusable_requests, - state_filter_left_over, - ) = self._get_state_for_group_gather_inflight_requests(group, state_filter) - - if state_filter_left_over != StateFilter.none(): - # Fetch remaining state - remaining = await self._get_state_for_group_fire_request( - group, state_filter_left_over - ) - assembled_state: MutableStateMap[str] = dict(remaining) - else: - assembled_state = {} - - gathered = await make_deferred_yieldable( - defer.gatherResults( - (r.observe() for r in reusable_requests), consumeErrors=True - ) - ).addErrback(unwrapFirstError) - - # assemble our result. - for result_piece in gathered: - assembled_state.update(result_piece) - - # Filter out any state that may be more than what we asked for. - return state_filter.filter_state(assembled_state) - async def _get_state_for_groups( self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Dict[int, MutableStateMap[str]]: @@ -476,17 +269,31 @@ async def _get_state_for_groups( if not incomplete_groups: return state - async def get_from_cache(group: int, state_filter: StateFilter) -> None: - state[group] = await self._get_state_for_group_using_inflight_cache( - group, state_filter - ) + cache_sequence_nm = self._state_group_cache.sequence + cache_sequence_m = self._state_group_members_cache.sequence - await yieldable_gather_results( - get_from_cache, - incomplete_groups, - state_filter, + # Help the cache hit ratio by expanding the filter a bit + db_state_filter = state_filter.return_expanded() + + group_to_state_dict = await self._get_state_groups_from_groups( + list(incomplete_groups), state_filter=db_state_filter ) + # Now lets update the caches + self._insert_into_cache( + group_to_state_dict, + db_state_filter, + cache_seq_num_members=cache_sequence_m, + cache_seq_num_non_members=cache_sequence_nm, + ) + + # And finally update the result dict, by filtering out any extra + # stuff we pulled out of the database. + for group, group_state_dict in group_to_state_dict.items(): + # We just replace any existing entries, as we will have loaded + # everything we need from the database anyway. + state[group] = state_filter.filter_state(group_state_dict) + return state def _get_state_for_groups_using_cache( diff --git a/tests/storage/databases/test_state_store.py b/tests/storage/databases/test_state_store.py deleted file mode 100644 index 2b484c95a935..000000000000 --- a/tests/storage/databases/test_state_store.py +++ /dev/null @@ -1,454 +0,0 @@ -# Copyright 2022 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import typing -from typing import Dict, List, Sequence, Tuple -from unittest.mock import patch - -from parameterized import parameterized - -from twisted.internet.defer import Deferred, ensureDeferred -from twisted.test.proto_helpers import MemoryReactor - -from synapse.api.constants import EventTypes -from synapse.storage.databases.state.store import ( - MAX_INFLIGHT_REQUESTS_PER_GROUP, - state_filter_rough_priority_comparator, -) -from synapse.storage.state import StateFilter -from synapse.types import StateMap -from synapse.util import Clock - -from tests.unittest import HomeserverTestCase - -if typing.TYPE_CHECKING: - from synapse.server import HomeServer - -# StateFilter for ALL non-m.room.member state events -ALL_NON_MEMBERS_STATE_FILTER = StateFilter.freeze( - types={EventTypes.Member: set()}, - include_others=True, -) - -FAKE_STATE = { - (EventTypes.Member, "@alice:test"): "join", - (EventTypes.Member, "@bob:test"): "leave", - (EventTypes.Member, "@charlie:test"): "invite", - ("test.type", "a"): "AAA", - ("test.type", "b"): "BBB", - ("other.event.type", "state.key"): "123", -} - - -class StateGroupInflightCachingTestCase(HomeserverTestCase): - def prepare( - self, reactor: MemoryReactor, clock: Clock, homeserver: "HomeServer" - ) -> None: - self.state_storage = homeserver.get_storage().state - self.state_datastore = homeserver.get_datastores().state - # Patch out the `_get_state_groups_from_groups`. - # This is useful because it lets us pretend we have a slow database. - get_state_groups_patch = patch.object( - self.state_datastore, - "_get_state_groups_from_groups", - self._fake_get_state_groups_from_groups, - ) - get_state_groups_patch.start() - - self.addCleanup(get_state_groups_patch.stop) - self.get_state_group_calls: List[ - Tuple[Tuple[int, ...], StateFilter, Deferred[Dict[int, StateMap[str]]]] - ] = [] - - def _fake_get_state_groups_from_groups( - self, groups: Sequence[int], state_filter: StateFilter - ) -> "Deferred[Dict[int, StateMap[str]]]": - d: Deferred[Dict[int, StateMap[str]]] = Deferred() - self.get_state_group_calls.append((tuple(groups), state_filter, d)) - return d - - def _complete_request_fake( - self, - groups: Tuple[int, ...], - state_filter: StateFilter, - d: "Deferred[Dict[int, StateMap[str]]]", - ) -> None: - """ - Assemble a fake database response and complete the database request. - """ - - # Return a filtered copy of the fake state - d.callback({group: state_filter.filter_state(FAKE_STATE) for group in groups}) - - def test_duplicate_requests_deduplicated(self) -> None: - """ - Tests that duplicate requests for state are deduplicated. - - This test: - - requests some state (state group 42, 'all' state filter) - - requests it again, before the first request finishes - - checks to see that only one database query was made - - completes the database query - - checks that both requests see the same retrieved state - """ - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # No more calls should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - self.assertFalse(req2.called) - - groups, sf, d = self.get_state_group_calls[0] - self.assertEqual(groups, (42,)) - self.assertEqual(sf, StateFilter.all()) - - # Now we can complete the request - self._complete_request_fake(groups, sf, d) - - self.assertEqual(self.get_success(req1), FAKE_STATE) - self.assertEqual(self.get_success(req2), FAKE_STATE) - - def test_smaller_request_deduplicated(self) -> None: - """ - Tests that duplicate requests for state are deduplicated. - - This test: - - requests some state (state group 42, 'all' state filter) - - requests a subset of that state, before the first request finishes - - checks to see that only one database query was made - - completes the database query - - checks that both requests see the correct retrieved state - """ - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.from_types((("test.type", None),)) - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.from_types((("test.type", "b"),)) - ) - ) - self.pump(by=0.1) - - # No more calls should have gone to the database, because the second - # request was already in the in-flight cache! - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - self.assertFalse(req2.called) - - groups, sf, d = self.get_state_group_calls[0] - self.assertEqual(groups, (42,)) - # The state filter is expanded internally for increased cache hit rate, - # so we the database sees a wider state filter than requested. - self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) - - # Now we can complete the request - self._complete_request_fake(groups, sf, d) - - self.assertEqual( - self.get_success(req1), - {("test.type", "a"): "AAA", ("test.type", "b"): "BBB"}, - ) - self.assertEqual(self.get_success(req2), {("test.type", "b"): "BBB"}) - - def test_partially_overlapping_request_deduplicated(self) -> None: - """ - Tests that partially-overlapping requests are partially deduplicated. - - This test: - - requests a single type of wildcard state - (This is internally expanded to be all non-member state) - - requests the entire state in parallel - - checks to see that two database queries were made, but that the second - one is only for member state. - - completes the database queries - - checks that both requests have the correct result. - """ - - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.from_types((("test.type", None),)) - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # Because it only partially overlaps, this also went to the database - self.assertEqual(len(self.get_state_group_calls), 2) - self.assertFalse(req1.called) - self.assertFalse(req2.called) - - # First request: - groups, sf, d = self.get_state_group_calls[0] - self.assertEqual(groups, (42,)) - # The state filter is expanded internally for increased cache hit rate, - # so we the database sees a wider state filter than requested. - self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER) - self._complete_request_fake(groups, sf, d) - - # Second request: - groups, sf, d = self.get_state_group_calls[1] - self.assertEqual(groups, (42,)) - # The state filter is narrowed to only request membership state, because - # the remainder of the state is already being queried in the first request! - self.assertEqual( - sf, StateFilter.freeze({EventTypes.Member: None}, include_others=False) - ) - self._complete_request_fake(groups, sf, d) - - # Check the results are correct - self.assertEqual( - self.get_success(req1), - {("test.type", "a"): "AAA", ("test.type", "b"): "BBB"}, - ) - self.assertEqual(self.get_success(req2), FAKE_STATE) - - def test_in_flight_requests_stop_being_in_flight(self) -> None: - """ - Tests that in-flight request deduplication doesn't somehow 'hold on' - to completed requests: once they're done, they're taken out of the - in-flight cache. - """ - req1 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # This should have gone to the database - self.assertEqual(len(self.get_state_group_calls), 1) - self.assertFalse(req1.called) - - # Complete the request right away. - self._complete_request_fake(*self.get_state_group_calls[0]) - self.assertTrue(req1.called) - - # Send off another request - req2 = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, StateFilter.all() - ) - ) - self.pump(by=0.1) - - # It should have gone to the database again, because the previous request - # isn't in-flight and therefore isn't available for deduplication. - self.assertEqual(len(self.get_state_group_calls), 2) - self.assertFalse(req2.called) - - # Complete the request right away. - self._complete_request_fake(*self.get_state_group_calls[1]) - self.assertTrue(req2.called) - groups, sf, d = self.get_state_group_calls[0] - - self.assertEqual(self.get_success(req1), FAKE_STATE) - self.assertEqual(self.get_success(req2), FAKE_STATE) - - def test_inflight_requests_capped(self) -> None: - """ - Tests that the number of in-flight requests is capped to 5. - - - requests several pieces of state separately - (5 to hit the limit, 1 to 'shunt out', another that comes after the - group has been 'shunted out') - - checks to see that the torrent of requests is shunted out by - rewriting one of the filters as the 'all' state filter - - requests after that one do not cause any additional queries - """ - # 5 at the time of writing. - CAP_COUNT = MAX_INFLIGHT_REQUESTS_PER_GROUP - - reqs = [] - - # Request 7 different keys (1 to 7) of the `some.state` type. - for req_id in range(CAP_COUNT + 2): - reqs.append( - ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, - StateFilter.freeze( - {"some.state": {str(req_id + 1)}}, include_others=False - ), - ) - ) - ) - self.pump(by=0.1) - - # There should only be 6 calls to the database, not 7. - self.assertEqual(len(self.get_state_group_calls), CAP_COUNT + 1) - - # Assert that the first 5 are exact requests for the individual pieces - # wanted - for req_id in range(CAP_COUNT): - groups, sf, d = self.get_state_group_calls[req_id] - self.assertEqual( - sf, - StateFilter.freeze( - {"some.state": {str(req_id + 1)}}, include_others=False - ), - ) - - # The 6th request should be the 'all' state filter - groups, sf, d = self.get_state_group_calls[CAP_COUNT] - self.assertEqual(sf, StateFilter.all()) - - # Complete the queries and check which requests complete as a result - for req_id in range(CAP_COUNT): - # This request should not have been completed yet - self.assertFalse(reqs[req_id].called) - - groups, sf, d = self.get_state_group_calls[req_id] - self._complete_request_fake(groups, sf, d) - - # This should have only completed this one request - self.assertTrue(reqs[req_id].called) - - # Now complete the final query; the last 2 requests should complete - # as a result - self.assertFalse(reqs[CAP_COUNT].called) - self.assertFalse(reqs[CAP_COUNT + 1].called) - groups, sf, d = self.get_state_group_calls[CAP_COUNT] - self._complete_request_fake(groups, sf, d) - self.assertTrue(reqs[CAP_COUNT].called) - self.assertTrue(reqs[CAP_COUNT + 1].called) - - @parameterized.expand([(False,), (True,)]) - def test_ordering_of_request_reuse(self, reverse: bool) -> None: - """ - Tests that 'larger' in-flight requests are ordered first. - - This is mostly a design decision in order to prevent a request from - hanging on to multiple queries when it would have been sufficient to - hang on to only one bigger query. - - The 'size' of a state filter is a rough heuristic. - - - requests two pieces of state, one 'larger' than the other, but each - spawning a query - - requests a third piece of state - - completes the larger of the first two queries - - checks that the third request gets completed (and doesn't needlessly - wait for the other query) - - Parameters: - reverse: whether to reverse the order of the initial requests, to ensure - that the effect doesn't depend on the order of request submission. - """ - - # We add in an extra state type to make sure that both requests spawn - # queries which are not optimised out. - state_filters = [ - StateFilter.freeze( - {"state.type": {"A"}, "other.state.type": {"a"}}, include_others=False - ), - StateFilter.freeze( - { - "state.type": None, - "other.state.type": {"b"}, - # The current rough size comparator uses the number of state types - # as an indicator of size. - # To influence it to make this state filter bigger than the previous one, - # we add another dummy state type. - "extra.state.type": {"c"}, - }, - include_others=False, - ), - ] - - if reverse: - # For fairness, we perform one test run with the list reversed. - state_filters.reverse() - smallest_state_filter_idx = 1 - biggest_state_filter_idx = 0 - else: - smallest_state_filter_idx = 0 - biggest_state_filter_idx = 1 - - # This assertion is for our own sanity more than anything else. - self.assertLess( - state_filter_rough_priority_comparator( - state_filters[biggest_state_filter_idx] - ), - state_filter_rough_priority_comparator( - state_filters[smallest_state_filter_idx] - ), - "Test invalid: bigger state filter is not actually bigger.", - ) - - # Spawn the initial two requests - for state_filter in state_filters: - ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, - state_filter, - ) - ) - - # Spawn a third request - req = ensureDeferred( - self.state_datastore._get_state_for_group_using_inflight_cache( - 42, - StateFilter.freeze( - { - "state.type": {"A"}, - }, - include_others=False, - ), - ) - ) - self.pump(by=0.1) - - self.assertFalse(req.called) - - # Complete the largest request's query to make sure that the final request - # only waits for that one (and doesn't needlessly wait for both queries) - self._complete_request_fake( - *self.get_state_group_calls[biggest_state_filter_idx] - ) - - # That should have been sufficient to complete the third request - self.assertTrue(req.called) From a43a5ea5bfefcb25d90209958fb014a3b5e0ead0 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 10:38:10 +0000 Subject: [PATCH 325/474] Remove misleading newsfile from #12126 which backs out an unreleased change. --- changelog.d/12126.removal | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/12126.removal diff --git a/changelog.d/12126.removal b/changelog.d/12126.removal deleted file mode 100644 index 8c8bf6ee7ede..000000000000 --- a/changelog.d/12126.removal +++ /dev/null @@ -1 +0,0 @@ -Back out in-flight state caching changes. \ No newline at end of file From 879e4a7bd7a90cda4c8ea908aede53d8e038ca7c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 10:45:16 +0000 Subject: [PATCH 326/474] 1.54.0rc1 --- CHANGES.md | 96 +++++++++++++++++++++++++++++++++++++++ changelog.d/11599.doc | 1 - changelog.d/11617.feature | 1 - changelog.d/11808.misc | 1 - changelog.d/11835.feature | 1 - changelog.d/11865.removal | 1 - changelog.d/11900.misc | 1 - changelog.d/11972.misc | 1 - changelog.d/11974.misc | 1 - changelog.d/11984.misc | 1 - changelog.d/11985.feature | 1 - changelog.d/11991.misc | 1 - changelog.d/11992.bugfix | 1 - changelog.d/11994.misc | 1 - changelog.d/11996.misc | 1 - changelog.d/11997.docker | 1 - changelog.d/11999.bugfix | 1 - changelog.d/12000.feature | 1 - changelog.d/12001.feature | 1 - changelog.d/12003.doc | 1 - changelog.d/12004.doc | 1 - changelog.d/12005.misc | 1 - changelog.d/12008.removal | 1 - changelog.d/12009.feature | 1 - changelog.d/12011.misc | 1 - changelog.d/12012.misc | 1 - changelog.d/12013.misc | 1 - changelog.d/12015.misc | 1 - changelog.d/12016.misc | 1 - changelog.d/12018.removal | 1 - changelog.d/12019.misc | 1 - changelog.d/12020.feature | 1 - changelog.d/12021.feature | 1 - changelog.d/12022.feature | 1 - changelog.d/12024.bugfix | 1 - changelog.d/12025.misc | 1 - changelog.d/12030.misc | 1 - changelog.d/12031.misc | 1 - changelog.d/12034.misc | 1 - changelog.d/12037.bugfix | 1 - changelog.d/12039.misc | 1 - changelog.d/12041.misc | 1 - changelog.d/12051.misc | 1 - changelog.d/12052.misc | 1 - changelog.d/12056.bugfix | 1 - changelog.d/12058.feature | 1 - changelog.d/12059.misc | 1 - changelog.d/12060.misc | 1 - changelog.d/12062.feature | 1 - changelog.d/12063.misc | 1 - changelog.d/12066.misc | 1 - changelog.d/12067.feature | 1 - changelog.d/12068.misc | 1 - changelog.d/12069.misc | 1 - changelog.d/12070.misc | 1 - changelog.d/12072.misc | 1 - changelog.d/12073.removal | 1 - changelog.d/12077.bugfix | 1 - changelog.d/12084.misc | 1 - changelog.d/12088.misc | 1 - changelog.d/12089.bugfix | 1 - changelog.d/12092.misc | 1 - changelog.d/12094.misc | 1 - changelog.d/12098.bugfix | 1 - changelog.d/12099.misc | 1 - changelog.d/12100.bugfix | 1 - changelog.d/12105.bugfix | 1 - changelog.d/12106.misc | 1 - changelog.d/12109.misc | 1 - changelog.d/12111.misc | 1 - changelog.d/12112.docker | 1 - changelog.d/12119.misc | 1 - debian/changelog | 6 +++ synapse/__init__.py | 2 +- 74 files changed, 103 insertions(+), 72 deletions(-) delete mode 100644 changelog.d/11599.doc delete mode 100644 changelog.d/11617.feature delete mode 100644 changelog.d/11808.misc delete mode 100644 changelog.d/11835.feature delete mode 100644 changelog.d/11865.removal delete mode 100644 changelog.d/11900.misc delete mode 100644 changelog.d/11972.misc delete mode 100644 changelog.d/11974.misc delete mode 100644 changelog.d/11984.misc delete mode 100644 changelog.d/11985.feature delete mode 100644 changelog.d/11991.misc delete mode 100644 changelog.d/11992.bugfix delete mode 100644 changelog.d/11994.misc delete mode 100644 changelog.d/11996.misc delete mode 100644 changelog.d/11997.docker delete mode 100644 changelog.d/11999.bugfix delete mode 100644 changelog.d/12000.feature delete mode 100644 changelog.d/12001.feature delete mode 100644 changelog.d/12003.doc delete mode 100644 changelog.d/12004.doc delete mode 100644 changelog.d/12005.misc delete mode 100644 changelog.d/12008.removal delete mode 100644 changelog.d/12009.feature delete mode 100644 changelog.d/12011.misc delete mode 100644 changelog.d/12012.misc delete mode 100644 changelog.d/12013.misc delete mode 100644 changelog.d/12015.misc delete mode 100644 changelog.d/12016.misc delete mode 100644 changelog.d/12018.removal delete mode 100644 changelog.d/12019.misc delete mode 100644 changelog.d/12020.feature delete mode 100644 changelog.d/12021.feature delete mode 100644 changelog.d/12022.feature delete mode 100644 changelog.d/12024.bugfix delete mode 100644 changelog.d/12025.misc delete mode 100644 changelog.d/12030.misc delete mode 100644 changelog.d/12031.misc delete mode 100644 changelog.d/12034.misc delete mode 100644 changelog.d/12037.bugfix delete mode 100644 changelog.d/12039.misc delete mode 100644 changelog.d/12041.misc delete mode 100644 changelog.d/12051.misc delete mode 100644 changelog.d/12052.misc delete mode 100644 changelog.d/12056.bugfix delete mode 100644 changelog.d/12058.feature delete mode 100644 changelog.d/12059.misc delete mode 100644 changelog.d/12060.misc delete mode 100644 changelog.d/12062.feature delete mode 100644 changelog.d/12063.misc delete mode 100644 changelog.d/12066.misc delete mode 100644 changelog.d/12067.feature delete mode 100644 changelog.d/12068.misc delete mode 100644 changelog.d/12069.misc delete mode 100644 changelog.d/12070.misc delete mode 100644 changelog.d/12072.misc delete mode 100644 changelog.d/12073.removal delete mode 100644 changelog.d/12077.bugfix delete mode 100644 changelog.d/12084.misc delete mode 100644 changelog.d/12088.misc delete mode 100644 changelog.d/12089.bugfix delete mode 100644 changelog.d/12092.misc delete mode 100644 changelog.d/12094.misc delete mode 100644 changelog.d/12098.bugfix delete mode 100644 changelog.d/12099.misc delete mode 100644 changelog.d/12100.bugfix delete mode 100644 changelog.d/12105.bugfix delete mode 100644 changelog.d/12106.misc delete mode 100644 changelog.d/12109.misc delete mode 100644 changelog.d/12111.misc delete mode 100644 changelog.d/12112.docker delete mode 100644 changelog.d/12119.misc diff --git a/CHANGES.md b/CHANGES.md index 81333097aeb1..4f0318970efa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,99 @@ +Synapse 1.54.0rc1 (2022-03-02) +============================== + +Features +-------- + +- Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) +- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) +- Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) +- Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) +- Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) +- Advertise Matrix 1.1 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020)) +- Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021)) +- Advertise Matrix 1.2 support on `/_matrix/client/versions`. ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) +- Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058)) +- Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) +- Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) +- Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) +- Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) +- Fix occasional 'Unhandled error in Deferred' error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) +- Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) +- Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100)) +- Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105)) + + +Updates to the Docker image +--------------------------- + +- The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) +- Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112)) + + +Improved Documentation +---------------------- + +- Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. ([\#11599](https://github.com/matrix-org/synapse/issues/11599)) +- Explain the meaning of spam checker callbacks' return values. ([\#12003](https://github.com/matrix-org/synapse/issues/12003)) +- Clarify information about external Identity Provider IDs. ([\#12004](https://github.com/matrix-org/synapse/issues/12004)) + + +Deprecations and Removals +------------------------- + +- Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. ([\#11865](https://github.com/matrix-org/synapse/issues/11865)) +- Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). ([\#12008](https://github.com/matrix-org/synapse/issues/12008)) +- Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. ([\#12018](https://github.com/matrix-org/synapse/issues/12018)) +- Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12073](https://github.com/matrix-org/synapse/issues/12073)) + + +Internal Changes +---------------- + +- Make method `get_room_version` use cached `get_room_version_id`. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) +- Remove unnecessary condition on knock->leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) +- Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972)) +- Optimise calculating device_list changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) +- Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984)) +- Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991)) +- Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994)) +- Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996)) +- Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039)) +- Preparation for faster-room-join work: parse msc3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) +- Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012)) +- Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013)) +- Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015)) +- Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016)) +- Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019)) +- Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025)) +- Upgrade mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) +- Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070)) +- Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069)) +- After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041)) +- Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051)) +- Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059)) +- Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060)) +- Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) +- Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094)) +- Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068)) +- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088)) +- User `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) +- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) +- Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106)) +- Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109)) +- Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12119](https://github.com/matrix-org/synapse/issues/12119)) + + Synapse 1.53.0 (2022-02-22) =========================== diff --git a/changelog.d/11599.doc b/changelog.d/11599.doc deleted file mode 100644 index f07cfbef4e4c..000000000000 --- a/changelog.d/11599.doc +++ /dev/null @@ -1 +0,0 @@ -Document support for the `to_device`, `account_data`, `receipts`, and `presence` stream writers for workers. diff --git a/changelog.d/11617.feature b/changelog.d/11617.feature deleted file mode 100644 index cf03f00e7c4d..000000000000 --- a/changelog.d/11617.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. \ No newline at end of file diff --git a/changelog.d/11808.misc b/changelog.d/11808.misc deleted file mode 100644 index cdc5fc75b75b..000000000000 --- a/changelog.d/11808.misc +++ /dev/null @@ -1 +0,0 @@ -Make method `get_room_version` use cached `get_room_version_id`. diff --git a/changelog.d/11835.feature b/changelog.d/11835.feature deleted file mode 100644 index 7cee39b08c71..000000000000 --- a/changelog.d/11835.feature +++ /dev/null @@ -1 +0,0 @@ -Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. diff --git a/changelog.d/11865.removal b/changelog.d/11865.removal deleted file mode 100644 index 9fcabfc7200e..000000000000 --- a/changelog.d/11865.removal +++ /dev/null @@ -1 +0,0 @@ -Deprecate using `synctl` with the config option `synctl_cache_factor` and print a warning if a user still uses this option. diff --git a/changelog.d/11900.misc b/changelog.d/11900.misc deleted file mode 100644 index edd2852fd49f..000000000000 --- a/changelog.d/11900.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary condition on knock->leave auth rule check. \ No newline at end of file diff --git a/changelog.d/11972.misc b/changelog.d/11972.misc deleted file mode 100644 index 29c38bfd8255..000000000000 --- a/changelog.d/11972.misc +++ /dev/null @@ -1 +0,0 @@ -Add tests for device list changes between local users. \ No newline at end of file diff --git a/changelog.d/11974.misc b/changelog.d/11974.misc deleted file mode 100644 index 1debad2361ee..000000000000 --- a/changelog.d/11974.misc +++ /dev/null @@ -1 +0,0 @@ -Optimise calculating device_list changes in `/sync`. diff --git a/changelog.d/11984.misc b/changelog.d/11984.misc deleted file mode 100644 index 8e405b922674..000000000000 --- a/changelog.d/11984.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to storage classes. \ No newline at end of file diff --git a/changelog.d/11985.feature b/changelog.d/11985.feature deleted file mode 100644 index 120d888a4914..000000000000 --- a/changelog.d/11985.feature +++ /dev/null @@ -1 +0,0 @@ -Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. diff --git a/changelog.d/11991.misc b/changelog.d/11991.misc deleted file mode 100644 index 34a3b3a6b9df..000000000000 --- a/changelog.d/11991.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the search code for improved readability. diff --git a/changelog.d/11992.bugfix b/changelog.d/11992.bugfix deleted file mode 100644 index f73c86bb2598..000000000000 --- a/changelog.d/11992.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. diff --git a/changelog.d/11994.misc b/changelog.d/11994.misc deleted file mode 100644 index d64297dd78f0..000000000000 --- a/changelog.d/11994.misc +++ /dev/null @@ -1 +0,0 @@ -Move common deduplication code down into `_auth_and_persist_outliers`. diff --git a/changelog.d/11996.misc b/changelog.d/11996.misc deleted file mode 100644 index 6c675fd1931c..000000000000 --- a/changelog.d/11996.misc +++ /dev/null @@ -1 +0,0 @@ -Limit concurrent joins from applications services. \ No newline at end of file diff --git a/changelog.d/11997.docker b/changelog.d/11997.docker deleted file mode 100644 index 1b3271457eee..000000000000 --- a/changelog.d/11997.docker +++ /dev/null @@ -1 +0,0 @@ -The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. diff --git a/changelog.d/11999.bugfix b/changelog.d/11999.bugfix deleted file mode 100644 index fd8409590002..000000000000 --- a/changelog.d/11999.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. diff --git a/changelog.d/12000.feature b/changelog.d/12000.feature deleted file mode 100644 index 246cc87f0bde..000000000000 --- a/changelog.d/12000.feature +++ /dev/null @@ -1 +0,0 @@ -Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. diff --git a/changelog.d/12001.feature b/changelog.d/12001.feature deleted file mode 100644 index dc1153c49ef4..000000000000 --- a/changelog.d/12001.feature +++ /dev/null @@ -1 +0,0 @@ -Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). diff --git a/changelog.d/12003.doc b/changelog.d/12003.doc deleted file mode 100644 index 1ac8163559f2..000000000000 --- a/changelog.d/12003.doc +++ /dev/null @@ -1 +0,0 @@ -Explain the meaning of spam checker callbacks' return values. diff --git a/changelog.d/12004.doc b/changelog.d/12004.doc deleted file mode 100644 index 0b4baef21035..000000000000 --- a/changelog.d/12004.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify information about external Identity Provider IDs. diff --git a/changelog.d/12005.misc b/changelog.d/12005.misc deleted file mode 100644 index 45e21dbe5953..000000000000 --- a/changelog.d/12005.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. diff --git a/changelog.d/12008.removal b/changelog.d/12008.removal deleted file mode 100644 index 57599d9ee955..000000000000 --- a/changelog.d/12008.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for the legacy structured logging configuration (please see the the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#legacy-structured-logging-configuration-removal) if you are using `structured: true` in the Synapse configuration). diff --git a/changelog.d/12009.feature b/changelog.d/12009.feature deleted file mode 100644 index c8a531481ec3..000000000000 --- a/changelog.d/12009.feature +++ /dev/null @@ -1 +0,0 @@ -Enable modules to set a custom display name when registering a user. diff --git a/changelog.d/12011.misc b/changelog.d/12011.misc deleted file mode 100644 index 258b0e389f6c..000000000000 --- a/changelog.d/12011.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: parse msc3706 fields in send_join response. diff --git a/changelog.d/12012.misc b/changelog.d/12012.misc deleted file mode 100644 index a473f41e7856..000000000000 --- a/changelog.d/12012.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. diff --git a/changelog.d/12013.misc b/changelog.d/12013.misc deleted file mode 100644 index c0fca8dccbae..000000000000 --- a/changelog.d/12013.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. diff --git a/changelog.d/12015.misc b/changelog.d/12015.misc deleted file mode 100644 index 3aa32ab4cf78..000000000000 --- a/changelog.d/12015.misc +++ /dev/null @@ -1 +0,0 @@ -Configure `tox` to use `venv` rather than `virtualenv`. diff --git a/changelog.d/12016.misc b/changelog.d/12016.misc deleted file mode 100644 index 8856ef46a913..000000000000 --- a/changelog.d/12016.misc +++ /dev/null @@ -1 +0,0 @@ -Fix bug in `StateFilter.return_expanded()` and add some tests. \ No newline at end of file diff --git a/changelog.d/12018.removal b/changelog.d/12018.removal deleted file mode 100644 index e940b62228ac..000000000000 --- a/changelog.d/12018.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for [MSC3283](https://github.com/matrix-org/matrix-doc/pull/3283) unstable flags now that the stable flags are supported. diff --git a/changelog.d/12019.misc b/changelog.d/12019.misc deleted file mode 100644 index b2186320ead4..000000000000 --- a/changelog.d/12019.misc +++ /dev/null @@ -1 +0,0 @@ -Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. \ No newline at end of file diff --git a/changelog.d/12020.feature b/changelog.d/12020.feature deleted file mode 100644 index 1ac9d2060e2a..000000000000 --- a/changelog.d/12020.feature +++ /dev/null @@ -1 +0,0 @@ -Advertise Matrix 1.1 support on `/_matrix/client/versions`. \ No newline at end of file diff --git a/changelog.d/12021.feature b/changelog.d/12021.feature deleted file mode 100644 index 01378df8ca62..000000000000 --- a/changelog.d/12021.feature +++ /dev/null @@ -1 +0,0 @@ -Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. \ No newline at end of file diff --git a/changelog.d/12022.feature b/changelog.d/12022.feature deleted file mode 100644 index 188fb125705a..000000000000 --- a/changelog.d/12022.feature +++ /dev/null @@ -1 +0,0 @@ -Advertise Matrix 1.2 support on `/_matrix/client/versions`. \ No newline at end of file diff --git a/changelog.d/12024.bugfix b/changelog.d/12024.bugfix deleted file mode 100644 index 59bcdb93a57f..000000000000 --- a/changelog.d/12024.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. diff --git a/changelog.d/12025.misc b/changelog.d/12025.misc deleted file mode 100644 index d9475a771881..000000000000 --- a/changelog.d/12025.misc +++ /dev/null @@ -1 +0,0 @@ -Update the `olddeps` CI job to use an old version of `markupsafe`. diff --git a/changelog.d/12030.misc b/changelog.d/12030.misc deleted file mode 100644 index 607ee97ce60e..000000000000 --- a/changelog.d/12030.misc +++ /dev/null @@ -1 +0,0 @@ -Upgrade mypy to version 0.931. diff --git a/changelog.d/12031.misc b/changelog.d/12031.misc deleted file mode 100644 index d4bedc6b97cb..000000000000 --- a/changelog.d/12031.misc +++ /dev/null @@ -1 +0,0 @@ -Remove legacy `HomeServer.get_datastore()`. diff --git a/changelog.d/12034.misc b/changelog.d/12034.misc deleted file mode 100644 index 8374a63220d1..000000000000 --- a/changelog.d/12034.misc +++ /dev/null @@ -1 +0,0 @@ -Minor typing fixes. diff --git a/changelog.d/12037.bugfix b/changelog.d/12037.bugfix deleted file mode 100644 index 9295cb4dc0d9..000000000000 --- a/changelog.d/12037.bugfix +++ /dev/null @@ -1 +0,0 @@ -Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. diff --git a/changelog.d/12039.misc b/changelog.d/12039.misc deleted file mode 100644 index 45e21dbe5953..000000000000 --- a/changelog.d/12039.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. diff --git a/changelog.d/12041.misc b/changelog.d/12041.misc deleted file mode 100644 index e56dc093def6..000000000000 --- a/changelog.d/12041.misc +++ /dev/null @@ -1 +0,0 @@ -After joining a room, create a dedicated logcontext to process the queued events. diff --git a/changelog.d/12051.misc b/changelog.d/12051.misc deleted file mode 100644 index 9959191352ad..000000000000 --- a/changelog.d/12051.misc +++ /dev/null @@ -1 +0,0 @@ -Tidy up GitHub Actions config which builds distributions for PyPI. \ No newline at end of file diff --git a/changelog.d/12052.misc b/changelog.d/12052.misc deleted file mode 100644 index 11755ae61b9a..000000000000 --- a/changelog.d/12052.misc +++ /dev/null @@ -1 +0,0 @@ -Move configuration out of `setup.cfg`. diff --git a/changelog.d/12056.bugfix b/changelog.d/12056.bugfix deleted file mode 100644 index 210e30c63fed..000000000000 --- a/changelog.d/12056.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. \ No newline at end of file diff --git a/changelog.d/12058.feature b/changelog.d/12058.feature deleted file mode 100644 index 7b716922299a..000000000000 --- a/changelog.d/12058.feature +++ /dev/null @@ -1 +0,0 @@ -Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). diff --git a/changelog.d/12059.misc b/changelog.d/12059.misc deleted file mode 100644 index 9ba4759d99de..000000000000 --- a/changelog.d/12059.misc +++ /dev/null @@ -1 +0,0 @@ -Move configuration out of `setup.cfg`. \ No newline at end of file diff --git a/changelog.d/12060.misc b/changelog.d/12060.misc deleted file mode 100644 index d771e6a1b393..000000000000 --- a/changelog.d/12060.misc +++ /dev/null @@ -1 +0,0 @@ -Fix error message when a worker process fails to talk to another worker process. diff --git a/changelog.d/12062.feature b/changelog.d/12062.feature deleted file mode 100644 index 46a606709da8..000000000000 --- a/changelog.d/12062.feature +++ /dev/null @@ -1 +0,0 @@ -Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. diff --git a/changelog.d/12063.misc b/changelog.d/12063.misc deleted file mode 100644 index e48c5dd08bd7..000000000000 --- a/changelog.d/12063.misc +++ /dev/null @@ -1 +0,0 @@ -Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. diff --git a/changelog.d/12066.misc b/changelog.d/12066.misc deleted file mode 100644 index 0360dbd61edc..000000000000 --- a/changelog.d/12066.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12067.feature b/changelog.d/12067.feature deleted file mode 100644 index dc1153c49ef4..000000000000 --- a/changelog.d/12067.feature +++ /dev/null @@ -1 +0,0 @@ -Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). diff --git a/changelog.d/12068.misc b/changelog.d/12068.misc deleted file mode 100644 index 72b211e4f55f..000000000000 --- a/changelog.d/12068.misc +++ /dev/null @@ -1 +0,0 @@ -Add some logging to `/sync` to try and track down #11916. diff --git a/changelog.d/12069.misc b/changelog.d/12069.misc deleted file mode 100644 index 8374a63220d1..000000000000 --- a/changelog.d/12069.misc +++ /dev/null @@ -1 +0,0 @@ -Minor typing fixes. diff --git a/changelog.d/12070.misc b/changelog.d/12070.misc deleted file mode 100644 index d4bedc6b97cb..000000000000 --- a/changelog.d/12070.misc +++ /dev/null @@ -1 +0,0 @@ -Remove legacy `HomeServer.get_datastore()`. diff --git a/changelog.d/12072.misc b/changelog.d/12072.misc deleted file mode 100644 index 0360dbd61edc..000000000000 --- a/changelog.d/12072.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12073.removal b/changelog.d/12073.removal deleted file mode 100644 index 1f3979271270..000000000000 --- a/changelog.d/12073.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the unstable `/spaces` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). diff --git a/changelog.d/12077.bugfix b/changelog.d/12077.bugfix deleted file mode 100644 index 1bce82082dad..000000000000 --- a/changelog.d/12077.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. diff --git a/changelog.d/12084.misc b/changelog.d/12084.misc deleted file mode 100644 index 0360dbd61edc..000000000000 --- a/changelog.d/12084.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12088.misc b/changelog.d/12088.misc deleted file mode 100644 index ce4213650c5e..000000000000 --- a/changelog.d/12088.misc +++ /dev/null @@ -1 +0,0 @@ -Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/changelog.d/12089.bugfix b/changelog.d/12089.bugfix deleted file mode 100644 index 27172c482803..000000000000 --- a/changelog.d/12089.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix occasional 'Unhandled error in Deferred' error message. diff --git a/changelog.d/12092.misc b/changelog.d/12092.misc deleted file mode 100644 index 62653d6f8d9e..000000000000 --- a/changelog.d/12092.misc +++ /dev/null @@ -1 +0,0 @@ -User `assertEqual` instead of the deprecated `assertEquals` in test code. diff --git a/changelog.d/12094.misc b/changelog.d/12094.misc deleted file mode 100644 index 0360dbd61edc..000000000000 --- a/changelog.d/12094.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/client`. diff --git a/changelog.d/12098.bugfix b/changelog.d/12098.bugfix deleted file mode 100644 index 6b696692e332..000000000000 --- a/changelog.d/12098.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. \ No newline at end of file diff --git a/changelog.d/12099.misc b/changelog.d/12099.misc deleted file mode 100644 index 0553825dbce4..000000000000 --- a/changelog.d/12099.misc +++ /dev/null @@ -1 +0,0 @@ -Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. diff --git a/changelog.d/12100.bugfix b/changelog.d/12100.bugfix deleted file mode 100644 index 181095ad9929..000000000000 --- a/changelog.d/12100.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. diff --git a/changelog.d/12105.bugfix b/changelog.d/12105.bugfix deleted file mode 100644 index f42e63e01fb7..000000000000 --- a/changelog.d/12105.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. diff --git a/changelog.d/12106.misc b/changelog.d/12106.misc deleted file mode 100644 index d918e9e3b16d..000000000000 --- a/changelog.d/12106.misc +++ /dev/null @@ -1 +0,0 @@ -Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. diff --git a/changelog.d/12109.misc b/changelog.d/12109.misc deleted file mode 100644 index 3295e49f43b9..000000000000 --- a/changelog.d/12109.misc +++ /dev/null @@ -1 +0,0 @@ -Improve exception handling for concurrent execution. diff --git a/changelog.d/12111.misc b/changelog.d/12111.misc deleted file mode 100644 index be84789c9dfc..000000000000 --- a/changelog.d/12111.misc +++ /dev/null @@ -1 +0,0 @@ -Advertise support for Python 3.10 in packaging files. \ No newline at end of file diff --git a/changelog.d/12112.docker b/changelog.d/12112.docker deleted file mode 100644 index b9e630653d24..000000000000 --- a/changelog.d/12112.docker +++ /dev/null @@ -1 +0,0 @@ -Use Python 3.9 in Docker images by default. \ No newline at end of file diff --git a/changelog.d/12119.misc b/changelog.d/12119.misc deleted file mode 100644 index f02d140f3871..000000000000 --- a/changelog.d/12119.misc +++ /dev/null @@ -1 +0,0 @@ -Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 574930c085cd..df3db85b8e77 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium + + * New synapse release 1.54.0~rc1. + + -- Synapse Packaging team Wed, 02 Mar 2022 10:43:22 +0000 + matrix-synapse-py3 (1.53.0) stable; urgency=medium * New synapse release 1.53.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 903f2e815dd9..b21e1ed0f342 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.53.0" +__version__ = "1.54.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From d800108bb4e1272235aa6f5f80b2732cee9aa5bf Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 10:54:52 +0000 Subject: [PATCH 327/474] Reword changelog --- CHANGES.md | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 4f0318970efa..5485e8d47ef1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,14 @@ Synapse 1.54.0rc1 (2022-03-02) ============================== +Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. +Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. + + Features -------- -- Add support for MSC3202: sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) +- Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) - Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) - Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) - Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) @@ -22,8 +26,8 @@ Bugfixes - Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) - Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) -- Fix 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) -- Properly fix a long-standing bug where wrong data could be inserted in the `event_search` table when using sqlite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) +- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) - Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) - Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) - Fix occasional 'Unhandled error in Deferred' error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) @@ -35,7 +39,7 @@ Bugfixes Updates to the Docker image --------------------------- -- The docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) +- The Docker image no longer automatically creates a temporary volume at `/data`. This is not expected to affect normal usage. ([\#11997](https://github.com/matrix-org/synapse/issues/11997)) - Use Python 3.9 in Docker images by default. ([\#12112](https://github.com/matrix-org/synapse/issues/12112)) @@ -59,35 +63,35 @@ Deprecations and Removals Internal Changes ---------------- -- Make method `get_room_version` use cached `get_room_version_id`. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) -- Remove unnecessary condition on knock->leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) +- Make the `get_room_version` method use `get_room_version_id` to benefit from caching. ([\#11808](https://github.com/matrix-org/synapse/issues/11808)) +- Remove unnecessary condition on knock -> leave auth rule check. ([\#11900](https://github.com/matrix-org/synapse/issues/11900)) - Add tests for device list changes between local users. ([\#11972](https://github.com/matrix-org/synapse/issues/11972)) -- Optimise calculating device_list changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) +- Optimise calculating `device_list` changes in `/sync`. ([\#11974](https://github.com/matrix-org/synapse/issues/11974)) - Add missing type hints to storage classes. ([\#11984](https://github.com/matrix-org/synapse/issues/11984)) - Refactor the search code for improved readability. ([\#11991](https://github.com/matrix-org/synapse/issues/11991)) - Move common deduplication code down into `_auth_and_persist_outliers`. ([\#11994](https://github.com/matrix-org/synapse/issues/11994)) - Limit concurrent joins from applications services. ([\#11996](https://github.com/matrix-org/synapse/issues/11996)) - Preparation for faster-room-join work: when parsing the `send_join` response, get the `m.room.create` event from `state`, not `auth_chain`. ([\#12005](https://github.com/matrix-org/synapse/issues/12005), [\#12039](https://github.com/matrix-org/synapse/issues/12039)) -- Preparation for faster-room-join work: parse msc3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) +- Preparation for faster-room-join work: parse MSC3706 fields in send_join response. ([\#12011](https://github.com/matrix-org/synapse/issues/12011)) - Preparation for faster-room-join work: persist information on which events and rooms have partial state to the database. ([\#12012](https://github.com/matrix-org/synapse/issues/12012)) - Preparation for faster-room-join work: Support for calling `/federation/v1/state` on a remote server. ([\#12013](https://github.com/matrix-org/synapse/issues/12013)) - Configure `tox` to use `venv` rather than `virtualenv`. ([\#12015](https://github.com/matrix-org/synapse/issues/12015)) - Fix bug in `StateFilter.return_expanded()` and add some tests. ([\#12016](https://github.com/matrix-org/synapse/issues/12016)) - Use Matrix v1.1 endpoints (`/_matrix/client/v3/auth/...`) in fallback auth HTML forms. ([\#12019](https://github.com/matrix-org/synapse/issues/12019)) - Update the `olddeps` CI job to use an old version of `markupsafe`. ([\#12025](https://github.com/matrix-org/synapse/issues/12025)) -- Upgrade mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) +- Upgrade Mypy to version 0.931. ([\#12030](https://github.com/matrix-org/synapse/issues/12030)) - Remove legacy `HomeServer.get_datastore()`. ([\#12031](https://github.com/matrix-org/synapse/issues/12031), [\#12070](https://github.com/matrix-org/synapse/issues/12070)) - Minor typing fixes. ([\#12034](https://github.com/matrix-org/synapse/issues/12034), [\#12069](https://github.com/matrix-org/synapse/issues/12069)) - After joining a room, create a dedicated logcontext to process the queued events. ([\#12041](https://github.com/matrix-org/synapse/issues/12041)) - Tidy up GitHub Actions config which builds distributions for PyPI. ([\#12051](https://github.com/matrix-org/synapse/issues/12051)) - Move configuration out of `setup.cfg`. ([\#12052](https://github.com/matrix-org/synapse/issues/12052), [\#12059](https://github.com/matrix-org/synapse/issues/12059)) - Fix error message when a worker process fails to talk to another worker process. ([\#12060](https://github.com/matrix-org/synapse/issues/12060)) -- Fix using the complement.sh script without specifying a dir or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) +- Fix using the `complement.sh` script without specifying a directory or a branch. Contributed by Nico on behalf of Famedly. ([\#12063](https://github.com/matrix-org/synapse/issues/12063)) - Add type hints to `tests/rest/client`. ([\#12066](https://github.com/matrix-org/synapse/issues/12066), [\#12072](https://github.com/matrix-org/synapse/issues/12072), [\#12084](https://github.com/matrix-org/synapse/issues/12084), [\#12094](https://github.com/matrix-org/synapse/issues/12094)) - Add some logging to `/sync` to try and track down #11916. ([\#12068](https://github.com/matrix-org/synapse/issues/12068)) - Inspect application dependencies using `importlib.metadata` or its backport. ([\#12088](https://github.com/matrix-org/synapse/issues/12088)) -- User `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) -- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to /versions. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) +- Use `assertEqual` instead of the deprecated `assertEquals` in test code. ([\#12092](https://github.com/matrix-org/synapse/issues/12092)) +- Move experimental support for [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440) to `/versions`. ([\#12099](https://github.com/matrix-org/synapse/issues/12099)) - Add `stop_cancellation` utility function to stop `Deferred`s from being cancelled. ([\#12106](https://github.com/matrix-org/synapse/issues/12106)) - Improve exception handling for concurrent execution. ([\#12109](https://github.com/matrix-org/synapse/issues/12109)) - Advertise support for Python 3.10 in packaging files. ([\#12111](https://github.com/matrix-org/synapse/issues/12111)) From 010457011c83d4db96d84c43687ee5e291f7685f Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 11:17:36 +0000 Subject: [PATCH 328/474] Apply suggestions to changelog --- CHANGES.md | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5485e8d47ef1..a81d0a4b1443 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,14 +9,12 @@ Features -------- - Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) -- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) -- Fetch images when previewing Twitter URLs. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Improve the preview that is produced when generating URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) - Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) - Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) - Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) -- Advertise Matrix 1.1 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020)) +- Advertise Matrix 1.1 and 1.2 support on `/_matrix/client/versions`. ([\#12020](https://github.com/matrix-org/synapse/issues/12020), ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) - Support only the stable identifier for [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069)'s `is_guest` on `/_matrix/client/v3/account/whoami`. ([\#12021](https://github.com/matrix-org/synapse/issues/12021)) -- Advertise Matrix 1.2 support on `/_matrix/client/versions`. ([\#12022](https://github.com/matrix-org/synapse/issues/12022)) - Use room version 9 as the default room version (per [MSC3589](https://github.com/matrix-org/matrix-doc/pull/3589)). ([\#12058](https://github.com/matrix-org/synapse/issues/12058)) - Add module callbacks to react to user deactivation status changes (i.e. deactivations and reactivations) and profile updates. ([\#12062](https://github.com/matrix-org/synapse/issues/12062)) @@ -24,16 +22,17 @@ Features Bugfixes -------- -- Fix a bug introduced in Synapse v1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) -- Fix long standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) +- Fix long-standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) - Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) -- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an "argument of type 'int' is not iterable" error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) -- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) +- Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) +- Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) - Fix a long-standing bug where Synapse would make additional failing requests over federation for missing data. ([\#12077](https://github.com/matrix-org/synapse/issues/12077)) -- Fix occasional 'Unhandled error in Deferred' error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) -- Fix a bug introduced in Synapse 1.51.0rc1 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) +- Fix occasional `Unhandled error in Deferred` error message. ([\#12089](https://github.com/matrix-org/synapse/issues/12089)) +- Fix a bug introduced in Synapse 1.51.0 where incoming federation transactions containing at least one EDU would be dropped if debug logging was enabled for `synapse.8631_debug`. ([\#12098](https://github.com/matrix-org/synapse/issues/12098)) - Fix a long-standing bug which could cause push notifications to malfunction if `use_frozen_dicts` was set in the configuration. ([\#12100](https://github.com/matrix-org/synapse/issues/12100)) - Fix an extremely rare, long-standing bug in `ReadWriteLock` that would cause an error when a newly unblocked writer completes instantly. ([\#12105](https://github.com/matrix-org/synapse/issues/12105)) +- Make a `POST` to `/rooms//receipt/m.read/` only trigger a push notification if the count of unread messages is different to the one in the last successfully sent push. This reduces server load and load on the receiving device. ([\#11835](https://github.com/matrix-org/synapse/issues/11835)) Updates to the Docker image From 6adb89ff007500ea9c41fb5bd1a9e644cc6397cd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Mar 2022 06:56:16 -0500 Subject: [PATCH 329/474] Improve and refactor the tests for relations. (#12113) * Modernizes code (f-strings, etc.) * Fixes incorrect comments. * Splits the test case into two. * Factors out some duplicated code. --- changelog.d/12113.misc | 1 + tests/rest/client/test_relations.py | 386 +++++++++++++--------------- 2 files changed, 179 insertions(+), 208 deletions(-) create mode 100644 changelog.d/12113.misc diff --git a/changelog.d/12113.misc b/changelog.d/12113.misc new file mode 100644 index 000000000000..102e064053c2 --- /dev/null +++ b/changelog.d/12113.misc @@ -0,0 +1 @@ +Refactor the tests for event relations. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c8db45719e07..a087cd7b2149 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -34,7 +34,7 @@ from tests.test_utils.event_injection import inject_event -class RelationsTestCase(unittest.HomeserverTestCase): +class BaseRelationsTestCase(unittest.HomeserverTestCase): servlets = [ relations.register_servlets, room.register_servlets, @@ -48,7 +48,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): def default_config(self) -> dict: # We need to enable msc1849 support for aggregations config = super().default_config() - config["experimental_msc1849_support_enabled"] = True # We enable frozen dicts as relations/edits change event contents, so we # want to test that we don't modify the events in the caches. @@ -67,10 +66,62 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: res = self.helper.send(self.room, body="Hi!", tok=self.user_token) self.parent_id = res["event_id"] - def test_send_relation(self) -> None: - """Tests that sending a relation using the new /send_relation works - creates the right shape of event. + def _create_user(self, localpart: str) -> Tuple[str, str]: + user_id = self.register_user(localpart, "abc123") + access_token = self.login(localpart, "abc123") + + return user_id, access_token + + def _send_relation( + self, + relation_type: str, + event_type: str, + key: Optional[str] = None, + content: Optional[dict] = None, + access_token: Optional[str] = None, + parent_id: Optional[str] = None, + ) -> FakeChannel: + """Helper function to send a relation pointing at `self.parent_id` + + Args: + relation_type: One of `RelationTypes` + event_type: The type of the event to create + key: The aggregation key used for m.annotation relation type. + content: The content of the created event. Will be modified to configure + the m.relates_to key based on the other provided parameters. + access_token: The access token used to send the relation, defaults + to `self.user_token` + parent_id: The event_id this relation relates to. If None, then self.parent_id + + Returns: + FakeChannel """ + if not access_token: + access_token = self.user_token + + original_id = parent_id if parent_id else self.parent_id + + if content is None: + content = {} + content["m.relates_to"] = { + "event_id": original_id, + "rel_type": relation_type, + } + if key is not None: + content["m.relates_to"]["key"] = key + + channel = self.make_request( + "POST", + f"/_matrix/client/v3/rooms/{self.room}/send/{event_type}", + content, + access_token=access_token, + ) + return channel + + +class RelationsTestCase(BaseRelationsTestCase): + def test_send_relation(self) -> None: + """Tests that sending a relation works.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) @@ -79,7 +130,7 @@ def test_send_relation(self) -> None: channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, event_id), + f"/rooms/{self.room}/event/{event_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -317,9 +368,7 @@ def test_pagination_from_sync_and_messages(self) -> None: # Request /sync, limiting it such that only the latest event is returned # (and not the relation). - filter = urllib.parse.quote_plus( - '{"room": {"timeline": {"limit": 1}}}'.encode() - ) + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) @@ -404,8 +453,7 @@ def test_aggregation_pagination_groups(self) -> None: channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1%s" - % (self.room, self.parent_id, from_token), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -544,8 +592,7 @@ def test_aggregation(self) -> None: channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -560,47 +607,13 @@ def test_aggregation(self) -> None: }, ) - def test_aggregation_redactions(self) -> None: - """Test that annotations get correctly aggregated after a redaction.""" - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - to_redact_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Now lets redact one of the 'a' reactions - channel = self.make_request( - "POST", - "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room, to_redact_event_id), - access_token=self.user_token, - content={}, - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s" - % (self.room, self.parent_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual( - channel.json_body, - {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, - ) - def test_aggregation_must_be_annotation(self) -> None: """Test that aggregations must be annotations.""" channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s/%s?limit=1" - % (self.room, self.parent_id, RelationTypes.REPLACE), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations" + f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", access_token=self.user_token, ) self.assertEqual(400, channel.code, channel.json_body) @@ -986,9 +999,7 @@ def assert_bundle(event_json: JsonDict) -> None: # Request sync, but limit the timeline so it becomes limited (and includes # bundled aggregations). - filter = urllib.parse.quote_plus( - '{"room": {"timeline": {"limit": 2}}}'.encode() - ) + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 2}}}') channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) @@ -1053,7 +1064,7 @@ def test_multi_edit(self) -> None: channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1096,7 +1107,7 @@ def test_edit_reply(self) -> None: channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, reply), + f"/rooms/{self.room}/event/{reply}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1198,7 +1209,7 @@ def test_edit_edit(self) -> None: # Request the original event. channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1217,102 +1228,6 @@ def test_edit_edit(self) -> None: {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) - def test_relations_redaction_redacts_edits(self) -> None: - """Test that edits of an event are redacted when the original event - is redacted. - """ - # Send a new event - res = self.helper.send(self.room, body="Heyo!", tok=self.user_token) - original_event_id = res["event_id"] - - # Add a relation - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - parent_id=original_event_id, - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": {"msgtype": "m.text", "body": "First edit"}, - }, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check the relation is returned - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message" - % (self.room, original_event_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertIn("chunk", channel.json_body) - self.assertEqual(len(channel.json_body["chunk"]), 1) - - # Redact the original event - channel = self.make_request( - "PUT", - "/rooms/%s/redact/%s/%s" - % (self.room, original_event_id, "test_relations_redaction_redacts_edits"), - access_token=self.user_token, - content="{}", - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Try to check for remaining m.replace relations - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message" - % (self.room, original_event_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check that no relations are returned - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) - - def test_aggregations_redaction_prevents_access_to_aggregations(self) -> None: - """Test that annotations of an event are redacted when the original event - is redacted. - """ - # Send a new event - res = self.helper.send(self.room, body="Hello!", tok=self.user_token) - original_event_id = res["event_id"] - - # Add a relation - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", key="👍", parent_id=original_event_id - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Redact the original - channel = self.make_request( - "PUT", - "/rooms/%s/redact/%s/%s" - % ( - self.room, - original_event_id, - "test_aggregations_redaction_prevents_access_to_aggregations", - ), - access_token=self.user_token, - content="{}", - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check that aggregations returns zero - channel = self.make_request( - "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s/m.annotation/m.reaction" - % (self.room, original_event_id), - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) - def test_unknown_relations(self) -> None: """Unknown relations should be accepted.""" channel = self._send_relation("m.relation.test", "m.room.test") @@ -1321,8 +1236,7 @@ def test_unknown_relations(self) -> None: channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1343,7 +1257,7 @@ def test_unknown_relations(self) -> None: # When bundling the unknown relation is not included. channel = self.make_request( "GET", - "/rooms/%s/event/%s" % (self.room, self.parent_id), + f"/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1352,8 +1266,7 @@ def test_unknown_relations(self) -> None: # But unknown relations can be directly queried. channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1" - % (self.room, self.parent_id), + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -1369,58 +1282,6 @@ def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: raise AssertionError(f"Event {self.parent_id} not found in chunk") - def _send_relation( - self, - relation_type: str, - event_type: str, - key: Optional[str] = None, - content: Optional[dict] = None, - access_token: Optional[str] = None, - parent_id: Optional[str] = None, - ) -> FakeChannel: - """Helper function to send a relation pointing at `self.parent_id` - - Args: - relation_type: One of `RelationTypes` - event_type: The type of the event to create - key: The aggregation key used for m.annotation relation type. - content: The content of the created event. Will be modified to configure - the m.relates_to key based on the other provided parameters. - access_token: The access token used to send the relation, defaults - to `self.user_token` - parent_id: The event_id this relation relates to. If None, then self.parent_id - - Returns: - FakeChannel - """ - if not access_token: - access_token = self.user_token - - original_id = parent_id if parent_id else self.parent_id - - if content is None: - content = {} - content["m.relates_to"] = { - "event_id": original_id, - "rel_type": relation_type, - } - if key is not None: - content["m.relates_to"]["key"] = key - - channel = self.make_request( - "POST", - f"/_matrix/client/v3/rooms/{self.room}/send/{event_type}", - content, - access_token=access_token, - ) - return channel - - def _create_user(self, localpart: str) -> Tuple[str, str]: - user_id = self.register_user(localpart, "abc123") - access_token = self.login(localpart, "abc123") - - return user_id, access_token - def test_background_update(self) -> None: """Test the event_arbitrary_relations background update.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") @@ -1482,3 +1343,112 @@ def test_background_update(self) -> None: [ev["event_id"] for ev in channel.json_body["chunk"]], [annotation_event_id_good, thread_event_id], ) + + +class RelationRedactionTestCase(BaseRelationsTestCase): + """Test the behaviour of relations when the parent or child event is redacted.""" + + def _redact(self, event_id: str) -> None: + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{self.room}/redact/{event_id}", + access_token=self.user_token, + content={}, + ) + self.assertEqual(200, channel.code, channel.json_body) + + def test_redact_relation_annotation(self) -> None: + """Test that annotations of an event are properly handled after the + annotation is redacted. + """ + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEqual(200, channel.code, channel.json_body) + to_redact_event_id = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Redact one of the reactions. + self._redact(to_redact_event_id) + + # Ensure that the aggregations are correct. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertEqual( + channel.json_body, + {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + ) + + def test_redact_relation_edit(self) -> None: + """Test that edits of an event are redacted when the original event + is redacted. + """ + # Add a relation + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + parent_id=self.parent_id, + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "First edit"}, + }, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Check the relation is returned + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}/m.replace/m.room.message", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertIn("chunk", channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1) + + # Redact the original event + self._redact(self.parent_id) + + # Try to check for remaining m.replace relations + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}/m.replace/m.room.message", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Check that no relations are returned + self.assertIn("chunk", channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) + + def test_redact_parent(self) -> None: + """Test that annotations of an event are redacted when the original event + is redacted. + """ + # Add a relation + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + self.assertEqual(200, channel.code, channel.json_body) + + # Redact the original event. + self._redact(self.parent_id) + + # Check that aggregations returns zero + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}/m.annotation/m.reaction", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertIn("chunk", channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) From 7317b0be82e31d7b41be64e2fea92aad428283d8 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 11:59:53 +0000 Subject: [PATCH 330/474] Tweak changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a81d0a4b1443..542390592fa8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,7 +23,7 @@ Bugfixes -------- - Fix a bug introduced in Synapse 1.48.0 where an edit of the latest event in a thread would not be properly applied to the thread summary. ([\#11992](https://github.com/matrix-org/synapse/issues/11992)) -- Fix long-standing bug where `get_rooms_for_user` was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) +- Fix long-standing bug where the `get_rooms_for_user` cache was not correctly invalidated for remote users when the server left a room. ([\#11999](https://github.com/matrix-org/synapse/issues/11999)) - Fix a 500 error with Postgres when looking backwards with the [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) `/timestamp_to_event?dir=b` endpoint. ([\#12024](https://github.com/matrix-org/synapse/issues/12024)) - Properly fix a long-standing bug where wrong data could be inserted into the `event_search` table when using SQLite. This could block running `synapse_port_db` with an `argument of type 'int' is not iterable` error. This bug was partially fixed by a change in Synapse 1.44.0. ([\#12037](https://github.com/matrix-org/synapse/issues/12037)) - Fix slow performance of `/logout` in some cases where refresh tokens are in use. The slowness existed since the initial implementation of refresh tokens in version 1.38.0. ([\#12056](https://github.com/matrix-org/synapse/issues/12056)) From 3b9142f7f462f23eeb754eca6003f127bcc62271 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 2 Mar 2022 12:09:48 +0000 Subject: [PATCH 331/474] Reword changelog line about URL previews --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 542390592fa8..0a87f5cd42a7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features -------- - Add support for [MSC3202](https://github.com/matrix-org/matrix-doc/pull/3202): sending one-time key counts and fallback key usage states to Application Services. ([\#11617](https://github.com/matrix-org/synapse/issues/11617)) -- Improve the preview that is produced when generating URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) +- Improve the generated URL previews for some web pages. Contributed by @AndrewRyanChama. ([\#11985](https://github.com/matrix-org/synapse/issues/11985)) - Track cache invalidations in Prometheus metrics, as already happens for cache eviction based on size or time. ([\#12000](https://github.com/matrix-org/synapse/issues/12000)) - Implement experimental support for [MSC3720](https://github.com/matrix-org/matrix-doc/pull/3720) (account status endpoints). ([\#12001](https://github.com/matrix-org/synapse/issues/12001), [\#12067](https://github.com/matrix-org/synapse/issues/12067)) - Enable modules to set a custom display name when registering a user. ([\#12009](https://github.com/matrix-org/synapse/issues/12009)) From f3f0ab10fe766c766dedf9d80e4ef198e3e45c09 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 13:00:16 +0000 Subject: [PATCH 332/474] Move scripts directory inside synapse, exposing as setuptools entry_points (#12118) * Two scripts are basically entry_points already * Move and rename scripts/* to synapse/_scripts/*.py * Delete sync_room_to_group.pl * Expose entry points in setup.py * Update linter script and config * Fixup scripts & docs mentioning scripts that moved Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- .ci/scripts/test_export_data_command.sh | 4 +- .ci/scripts/test_synapse_port_db.sh | 12 ++--- .dockerignore | 1 - MANIFEST.in | 1 - changelog.d/12118.misc | 1 + docker/Dockerfile | 1 - docs/development/database_schema.md | 6 +-- docs/usage/administration/admin_api/README.md | 2 +- mypy.ini | 4 ++ scripts-dev/generate_sample_config | 10 ++--- scripts-dev/lint.sh | 7 --- scripts-dev/make_full_schema.sh | 6 +-- scripts/register_new_matrix_user | 19 -------- scripts/synapse_review_recent_signups | 19 -------- scripts/sync_room_to_group.pl | 45 ------------------- setup.py | 14 +++++- snap/snapcraft.yaml | 2 +- .../_scripts/export_signing_key.py | 7 ++- .../_scripts/generate_config.py | 7 ++- .../_scripts/generate_log_config.py | 7 ++- .../_scripts}/generate_signing_key.py | 7 ++- .../_scripts/hash_password.py | 12 +++-- .../move_remote_media_to_new_store.py | 2 +- .../_scripts/synapse_port_db.py | 6 ++- .../_scripts/update_synapse_database.py | 0 synapse/config/_base.py | 2 +- tox.ini | 8 ---- 27 files changed, 77 insertions(+), 135 deletions(-) create mode 100644 changelog.d/12118.misc delete mode 100755 scripts/register_new_matrix_user delete mode 100755 scripts/synapse_review_recent_signups delete mode 100755 scripts/sync_room_to_group.pl rename scripts/export_signing_key => synapse/_scripts/export_signing_key.py (99%) rename scripts/generate_config => synapse/_scripts/generate_config.py (98%) rename scripts/generate_log_config => synapse/_scripts/generate_log_config.py (98%) rename {scripts => synapse/_scripts}/generate_signing_key.py (97%) rename scripts/hash_password => synapse/_scripts/hash_password.py (96%) rename {scripts => synapse/_scripts}/move_remote_media_to_new_store.py (97%) rename scripts/synapse_port_db => synapse/_scripts/synapse_port_db.py (99%) rename scripts/update_synapse_database => synapse/_scripts/update_synapse_database.py (100%) diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh index ab96387a0aef..224cae921658 100755 --- a/.ci/scripts/test_export_data_command.sh +++ b/.ci/scripts/test_export_data_command.sh @@ -21,7 +21,7 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Run the export-data command on the sqlite test database python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ @@ -41,7 +41,7 @@ fi # Port the SQLite databse to postgres so we can check command works against postgres echo "+++ Port SQLite3 databse to postgres" -scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # Run the export-data command on postgres database python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-data @anon-20191002_181700-832:localhost:8800 \ diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh index 797904e64ca5..91bd966f32bd 100755 --- a/.ci/scripts/test_synapse_port_db.sh +++ b/.ci/scripts/test_synapse_port_db.sh @@ -25,17 +25,19 @@ python -m synapse.app.homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Create the PostgreSQL database. .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against test database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +# TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`, +# but coverage seems unable to find the entrypoints installed by `pip install -e .`. +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml # We should be able to run twice against the same database. echo "+++ Run synapse_port_db a second time" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml ##### @@ -46,7 +48,7 @@ echo "--- Prepare empty SQLite database" # we do this by deleting the sqlite db, and then doing the same again. rm .ci/test_db.db -scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates +update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # re-create the PostgreSQL database. .ci/scripts/postgres_exec.py \ @@ -54,4 +56,4 @@ scripts/update_synapse_database --database-config .ci/sqlite-config.yaml --run-b "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against empty database" -coverage run scripts/synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml +synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml diff --git a/.dockerignore b/.dockerignore index f6c638b0a221..617f7015971b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,7 +3,6 @@ # things to include !docker -!scripts !synapse !MANIFEST.in !README.rst diff --git a/MANIFEST.in b/MANIFEST.in index 76d14eb642b2..7e903518e152 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -17,7 +17,6 @@ recursive-include synapse/storage *.txt recursive-include synapse/storage *.md recursive-include docs * -recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi recursive-include tests *.py diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc new file mode 100644 index 000000000000..a2c397d90755 --- /dev/null +++ b/changelog.d/12118.misc @@ -0,0 +1 @@ +Move scripts to Synapse package and expose as setuptools entry points. diff --git a/docker/Dockerfile b/docker/Dockerfile index a8bb9b0e7f7f..327275a9cae6 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,7 +46,6 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Copy just what we need to pip install -COPY scripts /synapse/scripts/ COPY MANIFEST.in README.rst setup.py synctl /synapse/ COPY synapse/__init__.py /synapse/synapse/__init__.py COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py diff --git a/docs/development/database_schema.md b/docs/development/database_schema.md index a767d3af9fd3..d996a7caa2c6 100644 --- a/docs/development/database_schema.md +++ b/docs/development/database_schema.md @@ -158,9 +158,9 @@ same as integers. There are three separate aspects to this: * Any new boolean column must be added to the `BOOLEAN_COLUMNS` list in - `scripts/synapse_port_db`. This tells the port script to cast the integer - value from SQLite to a boolean before writing the value to the postgres - database. + `synapse/_scripts/synapse_port_db.py`. This tells the port script to cast + the integer value from SQLite to a boolean before writing the value to the + postgres database. * Before SQLite 3.23, `TRUE` and `FALSE` were not recognised as constants by SQLite, and the `IS [NOT] TRUE`/`IS [NOT] FALSE` operators were not diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index 2fca96f8be4e..3cbedc5dfa30 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -12,7 +12,7 @@ UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; ``` A new server admin user can also be created using the `register_new_matrix_user` -command. This is a script that is located in the `scripts/` directory, or possibly +command. This is a script that is distributed as part of synapse. It is possibly already on your `$PATH` depending on how Synapse was installed. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. diff --git a/mypy.ini b/mypy.ini index 38ff78760931..6b1e995e64e8 100644 --- a/mypy.ini +++ b/mypy.ini @@ -23,6 +23,10 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( + |synapse/_scripts/export_signing_key.py + |synapse/_scripts/move_remote_media_to_new_store.py + |synapse/_scripts/synapse_port_db.py + |synapse/_scripts/update_synapse_database.py |synapse/storage/databases/__init__.py |synapse/storage/databases/main/__init__.py |synapse/storage/databases/main/cache.py diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config index 4cd1d1d5b829..185e277933e3 100755 --- a/scripts-dev/generate_sample_config +++ b/scripts-dev/generate_sample_config @@ -10,19 +10,19 @@ SAMPLE_CONFIG="docs/sample_config.yaml" SAMPLE_LOG_CONFIG="docs/sample_log_config.yaml" check() { - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || return 1 + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || return 1 } if [ "$1" == "--check" ]; then - diff -u "$SAMPLE_CONFIG" <(./scripts/generate_config --header-file docs/.sample_config_header.yaml) >/dev/null || { + diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 exit 1 } - diff -u "$SAMPLE_LOG_CONFIG" <(./scripts/generate_log_config) >/dev/null || { + diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 exit 1 } else - ./scripts/generate_config --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" - ./scripts/generate_log_config -o "$SAMPLE_LOG_CONFIG" + synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml -o "$SAMPLE_CONFIG" + synapse/_scripts/generate_log_config.py -o "$SAMPLE_LOG_CONFIG" fi diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index b6554a73c115..df4d4934d06f 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -84,13 +84,6 @@ else files=( "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them - "scripts/export_signing_key" - "scripts/generate_config" - "scripts/generate_log_config" - "scripts/hash_password" - "scripts/register_new_matrix_user" - "scripts/synapse_port_db" - "scripts/update_synapse_database" "scripts-dev" "scripts-dev/build_debian_packages" "scripts-dev/sign_json" diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index c3c90f4ec637..f0e22d4ca25b 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -147,7 +147,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -scripts/update_synapse_database --database-config --run-background-updates "$SQLITE_CONFIG" +synapse/_scripts/update_synapse_database.py --database-config --run-background-updates "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." @@ -156,10 +156,10 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then # No coverage needed - scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" else # Coverage desired - coverage run scripts/synapse_port_db --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" + coverage run synapse/_scripts/synapse_port_db.py --sqlite-database "$SQLITE_DB" --postgres-config "$POSTGRES_CONFIG" fi # Delete schema_version, applied_schema_deltas and applied_module_schemas tables diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user deleted file mode 100755 index 00104b9d62cb..000000000000 --- a/scripts/register_new_matrix_user +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.register_new_matrix_user import main - -if __name__ == "__main__": - main() diff --git a/scripts/synapse_review_recent_signups b/scripts/synapse_review_recent_signups deleted file mode 100755 index a36d46e14cde..000000000000 --- a/scripts/synapse_review_recent_signups +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse._scripts.review_recent_signups import main - -if __name__ == "__main__": - main() diff --git a/scripts/sync_room_to_group.pl b/scripts/sync_room_to_group.pl deleted file mode 100755 index f0c2dfadfa11..000000000000 --- a/scripts/sync_room_to_group.pl +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use JSON::XS; -use LWP::UserAgent; -use URI::Escape; - -if (@ARGV < 4) { - die "usage: $0 \n"; -} - -my ($hs, $access_token, $room_id, $group_id) = @ARGV; -my $ua = LWP::UserAgent->new(); -$ua->timeout(10); - -if ($room_id =~ /^#/) { - $room_id = uri_escape($room_id); - $room_id = decode_json($ua->get("${hs}/_matrix/client/r0/directory/room/${room_id}?access_token=${access_token}")->decoded_content)->{room_id}; -} - -my $room_users = [ keys %{decode_json($ua->get("${hs}/_matrix/client/r0/rooms/${room_id}/joined_members?access_token=${access_token}")->decoded_content)->{joined}} ]; -my $group_users = [ - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/users?access_token=${access_token}" )->decoded_content)->{chunk}}), - (map { $_->{user_id} } @{decode_json($ua->get("${hs}/_matrix/client/unstable/groups/${group_id}/invited_users?access_token=${access_token}" )->decoded_content)->{chunk}}), -]; - -die "refusing to sync from empty room" unless (@$room_users); -die "refusing to sync to empty group" unless (@$group_users); - -my $diff = {}; -foreach my $user (@$room_users) { $diff->{$user}++ } -foreach my $user (@$group_users) { $diff->{$user}-- } - -foreach my $user (keys %$diff) { - if ($diff->{$user} == 1) { - warn "inviting $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/invite/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } - elsif ($diff->{$user} == -1) { - warn "removing $user"; - print STDERR $ua->put("${hs}/_matrix/client/unstable/groups/${group_id}/admin/users/remove/${user}?access_token=${access_token}", Content=>'{}')->status_line."\n"; - } -} diff --git a/setup.py b/setup.py index 26f4650348d5..318df16766ec 100755 --- a/setup.py +++ b/setup.py @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import glob import os from typing import Any, Dict @@ -153,8 +152,19 @@ def exec_file(path_segments): python_requires="~=3.7", entry_points={ "console_scripts": [ + # Application "synapse_homeserver = synapse.app.homeserver:main", "synapse_worker = synapse.app.generic_worker:main", + # Scripts + "export_signing_key = synapse._scripts.export_signing_key:main", + "generate_config = synapse._scripts.generate_config:main", + "generate_log_config = synapse._scripts.generate_log_config:main", + "generate_signing_key = synapse._scripts.generate_signing_key:main", + "hash_password = synapse._scripts.hash_password:main", + "register_new_matrix_user = synapse._scripts.register_new_matrix_user:main", + "synapse_port_db = synapse._scripts.synapse_port_db:main", + "synapse_review_recent_signups = synapse._scripts.review_recent_signups:main", + "update_synapse_database = synapse._scripts.update_synapse_database:main", ] }, classifiers=[ @@ -167,6 +177,6 @@ def exec_file(path_segments): "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], - scripts=["synctl"] + glob.glob("scripts/*"), + scripts=["synctl"], cmdclass={"test": TestCommand}, ) diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 9a01152c156b..dd4c8478d59d 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -20,7 +20,7 @@ apps: generate-config: command: generate_config generate-signing-key: - command: generate_signing_key.py + command: generate_signing_key register-new-matrix-user: command: register_new_matrix_user plugs: [network] diff --git a/scripts/export_signing_key b/synapse/_scripts/export_signing_key.py similarity index 99% rename from scripts/export_signing_key rename to synapse/_scripts/export_signing_key.py index bf0139bd64b8..3d254348f165 100755 --- a/scripts/export_signing_key +++ b/synapse/_scripts/export_signing_key.py @@ -50,7 +50,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): ) -if __name__ == "__main__": +def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -85,7 +85,6 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): else format_plain ) - keys = [] for file in args.key_file: try: res = read_signing_keys(file) @@ -98,3 +97,7 @@ def format_for_config(public_key: nacl.signing.VerifyKey, expiry_ts: int): res = [] for key in res: formatter(get_verify_key(key)) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_config b/synapse/_scripts/generate_config.py similarity index 98% rename from scripts/generate_config rename to synapse/_scripts/generate_config.py index 931b40c045f3..75fce20b1234 100755 --- a/scripts/generate_config +++ b/synapse/_scripts/generate_config.py @@ -6,7 +6,8 @@ from synapse.config.homeserver import HomeServerConfig -if __name__ == "__main__": + +def main(): parser = argparse.ArgumentParser() parser.add_argument( "--config-dir", @@ -76,3 +77,7 @@ shutil.copyfileobj(args.header_file, args.output_file) args.output_file.write(conf) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_log_config b/synapse/_scripts/generate_log_config.py similarity index 98% rename from scripts/generate_log_config rename to synapse/_scripts/generate_log_config.py index e72a0dafb769..82fc7631401a 100755 --- a/scripts/generate_log_config +++ b/synapse/_scripts/generate_log_config.py @@ -19,7 +19,8 @@ from synapse.config.logger import DEFAULT_LOG_CONFIG -if __name__ == "__main__": + +def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -42,3 +43,7 @@ out = args.output_file out.write(DEFAULT_LOG_CONFIG.substitute(log_file=args.log_file)) out.flush() + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_signing_key.py b/synapse/_scripts/generate_signing_key.py similarity index 97% rename from scripts/generate_signing_key.py rename to synapse/_scripts/generate_signing_key.py index 07df25a8099d..bc26d25bfd36 100755 --- a/scripts/generate_signing_key.py +++ b/synapse/_scripts/generate_signing_key.py @@ -19,7 +19,8 @@ from synapse.util.stringutils import random_string -if __name__ == "__main__": + +def main(): parser = argparse.ArgumentParser() parser.add_argument( @@ -34,3 +35,7 @@ key_id = "a_" + random_string(4) key = (generate_signing_key(key_id),) write_signing_keys(args.output_file, key) + + +if __name__ == "__main__": + main() diff --git a/scripts/hash_password b/synapse/_scripts/hash_password.py similarity index 96% rename from scripts/hash_password rename to synapse/_scripts/hash_password.py index 1d6fb0d70022..708640c7de8d 100755 --- a/scripts/hash_password +++ b/synapse/_scripts/hash_password.py @@ -8,9 +8,6 @@ import bcrypt import yaml -bcrypt_rounds = 12 -password_pepper = "" - def prompt_for_pass(): password = getpass.getpass("Password: ") @@ -26,7 +23,10 @@ def prompt_for_pass(): return password -if __name__ == "__main__": +def main(): + bcrypt_rounds = 12 + password_pepper = "" + parser = argparse.ArgumentParser( description=( "Calculate the hash of a new password, so that passwords can be reset" @@ -77,3 +77,7 @@ def prompt_for_pass(): ).decode("ascii") print(hashed) + + +if __name__ == "__main__": + main() diff --git a/scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py similarity index 97% rename from scripts/move_remote_media_to_new_store.py rename to synapse/_scripts/move_remote_media_to_new_store.py index 875aa4781f49..9667d95dfe44 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/synapse/_scripts/move_remote_media_to_new_store.py @@ -28,7 +28,7 @@ To use, pipe the above into:: - PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py + PYTHON_PATH=. synapse/_scripts/move_remote_media_to_new_store.py """ import argparse diff --git a/scripts/synapse_port_db b/synapse/_scripts/synapse_port_db.py similarity index 99% rename from scripts/synapse_port_db rename to synapse/_scripts/synapse_port_db.py index db354b3c8c5c..c38666da18e6 100755 --- a/scripts/synapse_port_db +++ b/synapse/_scripts/synapse_port_db.py @@ -1146,7 +1146,7 @@ def set_state(self, state): ############################################## -if __name__ == "__main__": +def main(): parser = argparse.ArgumentParser( description="A script to port an existing synapse SQLite database to" " a new PostgreSQL database." @@ -1251,3 +1251,7 @@ def run(): sys.stderr.write(end_error) sys.exit(5) + + +if __name__ == "__main__": + main() diff --git a/scripts/update_synapse_database b/synapse/_scripts/update_synapse_database.py similarity index 100% rename from scripts/update_synapse_database rename to synapse/_scripts/update_synapse_database.py diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1265738dc12e..8e19e2fc2668 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -383,7 +383,7 @@ def generate_config( Build a default configuration file This is used when the user explicitly asks us to generate a config file - (eg with --generate_config). + (eg with --generate-config). Args: config_dir_path: The path where the config files are kept. Used to diff --git a/tox.ini b/tox.ini index 04b972e2c58c..8d6aa7580bb8 100644 --- a/tox.ini +++ b/tox.ini @@ -38,15 +38,7 @@ lint_targets = setup.py synapse tests - scripts # annoyingly, black doesn't find these so we have to list them - scripts/export_signing_key - scripts/generate_config - scripts/generate_log_config - scripts/hash_password - scripts/register_new_matrix_user - scripts/synapse_port_db - scripts/update_synapse_database scripts-dev scripts-dev/build_debian_packages scripts-dev/sign_json From 1103c5fe8a795eafc4aeedc547faa1b68d5a12f5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Mar 2022 08:18:51 -0500 Subject: [PATCH 333/474] Check if instances are lists, not sequences. (#12128) As a str is a sequence, the checks were not granular enough and would allow lists or strings, when only lists were valid. --- changelog.d/12128.misc | 1 + synapse/federation/federation_client.py | 8 ++++---- synapse/handlers/room_summary.py | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12128.misc diff --git a/changelog.d/12128.misc b/changelog.d/12128.misc new file mode 100644 index 000000000000..0570a8e3272f --- /dev/null +++ b/changelog.d/12128.misc @@ -0,0 +1 @@ +Fix data validation to compare to lists, not sequences. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 64e595e748f4..467275b98c3c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1428,7 +1428,7 @@ async def send_request( # Validate children_state of the room. children_state = room.pop("children_state", []) - if not isinstance(children_state, Sequence): + if not isinstance(children_state, list): raise InvalidResponseError("'room.children_state' must be a list") if any(not isinstance(e, dict) for e in children_state): raise InvalidResponseError("Invalid event in 'children_state' list") @@ -1440,14 +1440,14 @@ async def send_request( # Validate the children rooms. children = res.get("children", []) - if not isinstance(children, Sequence): + if not isinstance(children, list): raise InvalidResponseError("'children' must be a list") if any(not isinstance(r, dict) for r in children): raise InvalidResponseError("Invalid room in 'children' list") # Validate the inaccessible children. inaccessible_children = res.get("inaccessible_children", []) - if not isinstance(inaccessible_children, Sequence): + if not isinstance(inaccessible_children, list): raise InvalidResponseError("'inaccessible_children' must be a list") if any(not isinstance(r, str) for r in inaccessible_children): raise InvalidResponseError( @@ -1630,7 +1630,7 @@ def _validate_hierarchy_event(d: JsonDict) -> None: raise ValueError("Invalid event: 'content' must be a dict") via = content.get("via") - if not isinstance(via, Sequence): + if not isinstance(via, list): raise ValueError("Invalid event: 'via' must be a list") if any(not isinstance(v, str) for v in via): raise ValueError("Invalid event: 'via' must be a list of strings") diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 55c2cbdba8bc..3979cbba71bd 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -857,7 +857,7 @@ def as_json(self) -> JsonDict: def _has_valid_via(e: EventBase) -> bool: via = e.content.get("via") - if not via or not isinstance(via, Sequence): + if not via or not isinstance(via, list): return False for v in via: if not isinstance(v, str): From 6d282a9c89ae9fb55fe7ccc8d0ab16bf18b206ec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Mar 2022 14:28:18 +0000 Subject: [PATCH 334/474] Make release script write correct no-op changelog (#12127) As we want to include the previous version in the "No new changes..." string. --- changelog.d/12127.misc | 1 + scripts-dev/release.py | 30 ++++++++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12127.misc diff --git a/changelog.d/12127.misc b/changelog.d/12127.misc new file mode 100644 index 000000000000..e42eca63a873 --- /dev/null +++ b/changelog.d/12127.misc @@ -0,0 +1 @@ +Update release script to insert the previous version when writing "No significant changes" line in the changelog. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 4e1f99fee4e1..046453e65ff8 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -17,6 +17,8 @@ """An interactive script for doing a release. See `cli()` below. """ +import glob +import os import re import subprocess import sys @@ -209,8 +211,8 @@ def prepare(): with open("synapse/__init__.py", "w") as f: f.write(parsed_synapse_ast.dumps()) - # Generate changelogs - run_until_successful("python3 -m towncrier", shell=True) + # Generate changelogs. + generate_and_write_changelog(current_version) # Generate debian changelogs if parsed_new_version.pre is not None: @@ -523,5 +525,29 @@ class VersionSection: return "\n".join(version_changelog) +def generate_and_write_changelog(current_version: version.Version): + # We do this by getting a draft so that we can edit it before writing to the + # changelog. + result = run_until_successful( + "python3 -m towncrier --draft", shell=True, capture_output=True + ) + new_changes = result.stdout.decode("utf-8") + new_changes = new_changes.replace( + "No significant changes.", f"No significant changes since {current_version}." + ) + + # Prepend changes to changelog + with open("CHANGES.md", "r+") as f: + existing_content = f.read() + f.seek(0, 0) + f.write(new_changes) + f.write("\n") + f.write(existing_content) + + # Remove all the news fragments + for f in glob.iglob("changelog.d/*.*"): + os.remove(f) + + if __name__ == "__main__": cli() From b4461e7d8ab6cfe150f39f62aa68f7f13ef97a24 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 2 Mar 2022 16:11:16 +0000 Subject: [PATCH 335/474] Enable complexity checking in complexity checking docs example (#11998) --- changelog.d/11998.doc | 1 + ...nning_synapse_on_single_board_computers.md | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelog.d/11998.doc diff --git a/changelog.d/11998.doc b/changelog.d/11998.doc new file mode 100644 index 000000000000..33ab7b7880be --- /dev/null +++ b/changelog.d/11998.doc @@ -0,0 +1 @@ +Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. \ No newline at end of file diff --git a/docs/other/running_synapse_on_single_board_computers.md b/docs/other/running_synapse_on_single_board_computers.md index ea14afa8b2df..dcf96f0056ba 100644 --- a/docs/other/running_synapse_on_single_board_computers.md +++ b/docs/other/running_synapse_on_single_board_computers.md @@ -31,28 +31,29 @@ Anything that requires modifying the device list [#7721](https://github.com/matr Put the below in a new file at /etc/matrix-synapse/conf.d/sbc.yaml to override the defaults in homeserver.yaml. ``` -# Set to false to disable presence tracking on this homeserver. +# Disable presence tracking, which is currently fairly resource intensive +# More info: https://github.com/matrix-org/synapse/issues/9478 use_presence: false -# When this is enabled, the room "complexity" will be checked before a user -# joins a new remote room. If it is above the complexity limit, the server will -# disallow joining, or will instantly leave. +# Set a small complexity limit, preventing users from joining large rooms +# which may be resource-intensive to remain a part of. +# +# Note that this will not prevent users from joining smaller rooms that +# eventually become complex. limit_remote_rooms: - # Uncomment to enable room complexity checking. - #enabled: true + enabled: true complexity: 3.0 # Database configuration database: + # Use postgres for the best performance name: psycopg2 args: user: matrix-synapse - # Generate a long, secure one with a password manager + # Generate a long, secure password using a password manager password: hunter2 database: matrix-synapse host: localhost - cp_min: 5 - cp_max: 10 ``` Currently the complexity is measured by [current_state_events / 500](https://github.com/matrix-org/synapse/blob/v1.20.1/synapse/storage/databases/main/events_worker.py#L986). You can find join times and your most complex rooms like this: From 2ffaf30803f93273a4d8a65c9e6c3110c8433488 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 2 Mar 2022 17:34:14 +0100 Subject: [PATCH 336/474] Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint --- changelog.d/12108.misc | 1 + mypy.ini | 6 - tests/rest/client/test_account.py | 290 +++++++++++--------- tests/rest/client/test_filter.py | 29 +- tests/rest/client/test_relations.py | 4 +- tests/rest/client/test_report_event.py | 25 +- tests/rest/client/test_rooms.py | 271 +++++++++--------- tests/rest/client/test_third_party_rules.py | 108 +++++--- tests/rest/client/test_typing.py | 41 +-- 9 files changed, 423 insertions(+), 352 deletions(-) create mode 100644 changelog.d/12108.misc diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc new file mode 100644 index 000000000000..0360dbd61edc --- /dev/null +++ b/changelog.d/12108.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/client`. diff --git a/mypy.ini b/mypy.ini index 6b1e995e64e8..23ca4eaa5a8b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -78,13 +78,7 @@ exclude = (?x) |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py - |tests/rest/client/test_account.py - |tests/rest/client/test_filter.py - |tests/rest/client/test_report_event.py - |tests/rest/client/test_rooms.py - |tests/rest/client/test_third_party_rules.py |tests/rest/client/test_transactions.py - |tests/rest/client/test_typing.py |tests/rest/key/v2/test_remote_key_resource.py |tests/rest/media/v1/test_base.py |tests/rest/media/v1/test_media_storage.py diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 6c4462e74a53..def836054db1 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -15,11 +15,12 @@ import os import re from email.parser import Parser -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from unittest.mock import Mock import pkg_resources +from twisted.internet.interfaces import IReactorTCP from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -30,6 +31,7 @@ from synapse.rest.client import account, login, register, room from synapse.rest.synapse.client.password_reset import PasswordResetSubmitTokenResource from synapse.server import HomeServer +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -46,7 +48,7 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # Email config. @@ -67,20 +69,27 @@ def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver(config=config) async def sendmail( - reactor, smtphost, smtpport, from_addr, to_addrs, msg, **kwargs - ): - self.email_attempts.append(msg) - - self.email_attempts = [] + reactor: IReactorTCP, + smtphost: str, + smtpport: int, + from_addr: str, + to_addr: str, + msg_bytes: bytes, + *args: Any, + **kwargs: Any, + ) -> None: + self.email_attempts.append(msg_bytes) + + self.email_attempts: List[bytes] = [] hs.get_send_email_handler()._sendmail = sendmail return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.submit_token_resource = PasswordResetSubmitTokenResource(hs) - def test_basic_password_reset(self): + def test_basic_password_reset(self) -> None: """Test basic password reset flow""" old_password = "monkey" new_password = "kangeroo" @@ -118,7 +127,7 @@ def test_basic_password_reset(self): self.attempt_wrong_password_login("kermit", old_password) @override_config({"rc_3pid_validation": {"burst_count": 3}}) - def test_ratelimit_by_email(self): + def test_ratelimit_by_email(self) -> None: """Test that we ratelimit /requestToken for the same email.""" old_password = "monkey" new_password = "kangeroo" @@ -139,7 +148,7 @@ def test_ratelimit_by_email(self): ) ) - def reset(ip): + def reset(ip: str) -> None: client_secret = "foobar" session_id = self._request_token(email, client_secret, ip) @@ -166,7 +175,7 @@ def reset(ip): self.assertEqual(cm.exception.code, 429) - def test_basic_password_reset_canonicalise_email(self): + def test_basic_password_reset_canonicalise_email(self) -> None: """Test basic password reset flow Request password reset with different spelling """ @@ -206,7 +215,7 @@ def test_basic_password_reset_canonicalise_email(self): # Assert we can't log in with the old password self.attempt_wrong_password_login("kermit", old_password) - def test_cant_reset_password_without_clicking_link(self): + def test_cant_reset_password_without_clicking_link(self) -> None: """Test that we do actually need to click the link in the email""" old_password = "monkey" new_password = "kangeroo" @@ -241,7 +250,7 @@ def test_cant_reset_password_without_clicking_link(self): # Assert we can't log in with the new password self.attempt_wrong_password_login("kermit", new_password) - def test_no_valid_token(self): + def test_no_valid_token(self) -> None: """Test that we do actually need to request a token and can't just make a session up. """ @@ -277,7 +286,7 @@ def test_no_valid_token(self): self.attempt_wrong_password_login("kermit", new_password) @unittest.override_config({"request_token_inhibit_3pid_errors": True}) - def test_password_reset_bad_email_inhibit_error(self): + def test_password_reset_bad_email_inhibit_error(self) -> None: """Test that triggering a password reset with an email address that isn't bound to an account doesn't leak the lack of binding for that address if configured that way. @@ -292,7 +301,12 @@ def test_password_reset_bad_email_inhibit_error(self): self.assertIsNotNone(session_id) - def _request_token(self, email, client_secret, ip="127.0.0.1"): + def _request_token( + self, + email: str, + client_secret: str, + ip: str = "127.0.0.1", + ) -> str: channel = self.make_request( "POST", b"account/password/email/requestToken", @@ -309,7 +323,7 @@ def _request_token(self, email, client_secret, ip="127.0.0.1"): return channel.json_body["sid"] - def _validate_token(self, link): + def _validate_token(self, link: str) -> None: # Remove the host path = link.replace("https://example.com", "") @@ -339,7 +353,7 @@ def _validate_token(self, link): ) self.assertEqual(200, channel.code, channel.result) - def _get_link_from_email(self): + def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" raw_msg = self.email_attempts[-1].decode("UTF-8") @@ -354,14 +368,19 @@ def _get_link_from_email(self): if not text: self.fail("Could not find text portion of email to parse") + assert text is not None match = re.search(r"https://example.com\S+", text) assert match, "Could not find link in email" return match.group(0) def _reset_password( - self, new_password, session_id, client_secret, expected_code=200 - ): + self, + new_password: str, + session_id: str, + client_secret: str, + expected_code: int = 200, + ) -> None: channel = self.make_request( "POST", b"account/password", @@ -388,11 +407,11 @@ class DeactivateTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver() return self.hs - def test_deactivate_account(self): + def test_deactivate_account(self) -> None: user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") @@ -407,7 +426,7 @@ def test_deactivate_account(self): channel = self.make_request("GET", "account/whoami", access_token=tok) self.assertEqual(channel.code, 401) - def test_pending_invites(self): + def test_pending_invites(self) -> None: """Tests that deactivating a user rejects every pending invite for them.""" store = self.hs.get_datastores().main @@ -448,7 +467,7 @@ def test_pending_invites(self): self.assertEqual(len(memberships), 1, memberships) self.assertEqual(memberships[0].room_id, room_id, memberships) - def deactivate(self, user_id, tok): + def deactivate(self, user_id: str, tok: str) -> None: request_data = json.dumps( { "auth": { @@ -474,12 +493,12 @@ class WhoamiTestCase(unittest.HomeserverTestCase): register.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["allow_guest_access"] = True return config - def test_GET_whoami(self): + def test_GET_whoami(self) -> None: device_id = "wouldgohere" user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test", device_id=device_id) @@ -496,7 +515,7 @@ def test_GET_whoami(self): }, ) - def test_GET_whoami_guests(self): + def test_GET_whoami_guests(self) -> None: channel = self.make_request( b"POST", b"/_matrix/client/r0/register?kind=guest", b"{}" ) @@ -516,7 +535,7 @@ def test_GET_whoami_guests(self): }, ) - def test_GET_whoami_appservices(self): + def test_GET_whoami_appservices(self) -> None: user_id = "@as:test" as_token = "i_am_an_app_service" @@ -541,7 +560,7 @@ def test_GET_whoami_appservices(self): ) self.assertFalse(hasattr(whoami, "device_id")) - def _whoami(self, tok): + def _whoami(self, tok: str) -> JsonDict: channel = self.make_request("GET", "account/whoami", {}, access_token=tok) self.assertEqual(channel.code, 200) return channel.json_body @@ -555,7 +574,7 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets_for_client_rest_resource, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # Email config. @@ -576,16 +595,23 @@ def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver(config=config) async def sendmail( - reactor, smtphost, smtpport, from_addr, to_addrs, msg, **kwargs - ): - self.email_attempts.append(msg) - - self.email_attempts = [] + reactor: IReactorTCP, + smtphost: str, + smtpport: int, + from_addr: str, + to_addr: str, + msg_bytes: bytes, + *args: Any, + **kwargs: Any, + ) -> None: + self.email_attempts.append(msg_bytes) + + self.email_attempts: List[bytes] = [] self.hs.get_send_email_handler()._sendmail = sendmail return self.hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.user_id = self.register_user("kermit", "test") @@ -593,83 +619,73 @@ def prepare(self, reactor, clock, hs): self.email = "test@example.com" self.url_3pid = b"account/3pid" - def test_add_valid_email(self): - self.get_success(self._add_email(self.email, self.email)) + def test_add_valid_email(self) -> None: + self._add_email(self.email, self.email) - def test_add_valid_email_second_time(self): - self.get_success(self._add_email(self.email, self.email)) - self.get_success( - self._request_token_invalid_email( - self.email, - expected_errcode=Codes.THREEPID_IN_USE, - expected_error="Email is already in use", - ) + def test_add_valid_email_second_time(self) -> None: + self._add_email(self.email, self.email) + self._request_token_invalid_email( + self.email, + expected_errcode=Codes.THREEPID_IN_USE, + expected_error="Email is already in use", ) - def test_add_valid_email_second_time_canonicalise(self): - self.get_success(self._add_email(self.email, self.email)) - self.get_success( - self._request_token_invalid_email( - "TEST@EXAMPLE.COM", - expected_errcode=Codes.THREEPID_IN_USE, - expected_error="Email is already in use", - ) + def test_add_valid_email_second_time_canonicalise(self) -> None: + self._add_email(self.email, self.email) + self._request_token_invalid_email( + "TEST@EXAMPLE.COM", + expected_errcode=Codes.THREEPID_IN_USE, + expected_error="Email is already in use", ) - def test_add_email_no_at(self): - self.get_success( - self._request_token_invalid_email( - "address-without-at.bar", - expected_errcode=Codes.UNKNOWN, - expected_error="Unable to parse email address", - ) + def test_add_email_no_at(self) -> None: + self._request_token_invalid_email( + "address-without-at.bar", + expected_errcode=Codes.UNKNOWN, + expected_error="Unable to parse email address", ) - def test_add_email_two_at(self): - self.get_success( - self._request_token_invalid_email( - "foo@foo@test.bar", - expected_errcode=Codes.UNKNOWN, - expected_error="Unable to parse email address", - ) + def test_add_email_two_at(self) -> None: + self._request_token_invalid_email( + "foo@foo@test.bar", + expected_errcode=Codes.UNKNOWN, + expected_error="Unable to parse email address", ) - def test_add_email_bad_format(self): - self.get_success( - self._request_token_invalid_email( - "user@bad.example.net@good.example.com", - expected_errcode=Codes.UNKNOWN, - expected_error="Unable to parse email address", - ) + def test_add_email_bad_format(self) -> None: + self._request_token_invalid_email( + "user@bad.example.net@good.example.com", + expected_errcode=Codes.UNKNOWN, + expected_error="Unable to parse email address", ) - def test_add_email_domain_to_lower(self): - self.get_success(self._add_email("foo@TEST.BAR", "foo@test.bar")) + def test_add_email_domain_to_lower(self) -> None: + self._add_email("foo@TEST.BAR", "foo@test.bar") - def test_add_email_domain_with_umlaut(self): - self.get_success(self._add_email("foo@Öumlaut.com", "foo@öumlaut.com")) + def test_add_email_domain_with_umlaut(self) -> None: + self._add_email("foo@Öumlaut.com", "foo@öumlaut.com") - def test_add_email_address_casefold(self): - self.get_success(self._add_email("Strauß@Example.com", "strauss@example.com")) + def test_add_email_address_casefold(self) -> None: + self._add_email("Strauß@Example.com", "strauss@example.com") - def test_address_trim(self): - self.get_success(self._add_email(" foo@test.bar ", "foo@test.bar")) + def test_address_trim(self) -> None: + self._add_email(" foo@test.bar ", "foo@test.bar") @override_config({"rc_3pid_validation": {"burst_count": 3}}) - def test_ratelimit_by_ip(self): + def test_ratelimit_by_ip(self) -> None: """Tests that adding emails is ratelimited by IP""" # We expect to be able to set three emails before getting ratelimited. - self.get_success(self._add_email("foo1@test.bar", "foo1@test.bar")) - self.get_success(self._add_email("foo2@test.bar", "foo2@test.bar")) - self.get_success(self._add_email("foo3@test.bar", "foo3@test.bar")) + self._add_email("foo1@test.bar", "foo1@test.bar") + self._add_email("foo2@test.bar", "foo2@test.bar") + self._add_email("foo3@test.bar", "foo3@test.bar") with self.assertRaises(HttpResponseException) as cm: - self.get_success(self._add_email("foo4@test.bar", "foo4@test.bar")) + self._add_email("foo4@test.bar", "foo4@test.bar") self.assertEqual(cm.exception.code, 429) - def test_add_email_if_disabled(self): + def test_add_email_if_disabled(self) -> None: """Test adding email to profile when doing so is disallowed""" self.hs.config.registration.enable_3pid_changes = False @@ -695,7 +711,7 @@ def test_add_email_if_disabled(self): }, access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -705,10 +721,10 @@ def test_add_email_if_disabled(self): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def test_delete_email(self): + def test_delete_email(self) -> None: """Test deleting an email from profile""" # Add a threepid self.get_success( @@ -727,7 +743,7 @@ def test_delete_email(self): {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -736,10 +752,10 @@ def test_delete_email(self): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def test_delete_email_if_disabled(self): + def test_delete_email_if_disabled(self) -> None: """Test deleting an email from profile when disallowed""" self.hs.config.registration.enable_3pid_changes = False @@ -761,7 +777,7 @@ def test_delete_email_if_disabled(self): access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -771,11 +787,11 @@ def test_delete_email_if_disabled(self): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) - def test_cant_add_email_without_clicking_link(self): + def test_cant_add_email_without_clicking_link(self) -> None: """Test that we do actually need to click the link in the email""" client_secret = "foobar" session_id = self._request_token(self.email, client_secret) @@ -797,7 +813,7 @@ def test_cant_add_email_without_clicking_link(self): }, access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -807,10 +823,10 @@ def test_cant_add_email_without_clicking_link(self): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) - def test_no_valid_token(self): + def test_no_valid_token(self) -> None: """Test that we do actually need to request a token and can't just make a session up. """ @@ -832,7 +848,7 @@ def test_no_valid_token(self): }, access_token=self.user_id_tok, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -842,11 +858,11 @@ def test_no_valid_token(self): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @override_config({"next_link_domain_whitelist": None}) - def test_next_link(self): + def test_next_link(self) -> None: """Tests a valid next_link parameter value with no whitelist (good case)""" self._request_token( "something@example.com", @@ -856,7 +872,7 @@ def test_next_link(self): ) @override_config({"next_link_domain_whitelist": None}) - def test_next_link_exotic_protocol(self): + def test_next_link_exotic_protocol(self) -> None: """Tests using a esoteric protocol as a next_link parameter value. Someone may be hosting a client on IPFS etc. """ @@ -868,7 +884,7 @@ def test_next_link_exotic_protocol(self): ) @override_config({"next_link_domain_whitelist": None}) - def test_next_link_file_uri(self): + def test_next_link_file_uri(self) -> None: """Tests next_link parameters cannot be file URI""" # Attempt to use a next_link value that points to the local disk self._request_token( @@ -879,7 +895,7 @@ def test_next_link_file_uri(self): ) @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) - def test_next_link_domain_whitelist(self): + def test_next_link_domain_whitelist(self) -> None: """Tests next_link parameters must fit the whitelist if provided""" # Ensure not providing a next_link parameter still works @@ -912,7 +928,7 @@ def test_next_link_domain_whitelist(self): ) @override_config({"next_link_domain_whitelist": []}) - def test_empty_next_link_domain_whitelist(self): + def test_empty_next_link_domain_whitelist(self) -> None: """Tests an empty next_lint_domain_whitelist value, meaning next_link is essentially disallowed """ @@ -962,28 +978,28 @@ def _request_token( def _request_token_invalid_email( self, - email, - expected_errcode, - expected_error, - client_secret="foobar", - ): + email: str, + expected_errcode: str, + expected_error: str, + client_secret: str = "foobar", + ) -> None: channel = self.make_request( "POST", b"account/3pid/email/requestToken", {"client_secret": client_secret, "email": email, "send_attempt": 1}, ) - self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(400, channel.code, msg=channel.result["body"]) self.assertEqual(expected_errcode, channel.json_body["errcode"]) self.assertEqual(expected_error, channel.json_body["error"]) - def _validate_token(self, link): + def _validate_token(self, link: str) -> None: # Remove the host path = link.replace("https://example.com", "") channel = self.make_request("GET", path, shorthand=False) self.assertEqual(200, channel.code, channel.result) - def _get_link_from_email(self): + def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" raw_msg = self.email_attempts[-1].decode("UTF-8") @@ -998,12 +1014,13 @@ def _get_link_from_email(self): if not text: self.fail("Could not find text portion of email to parse") + assert text is not None match = re.search(r"https://example.com\S+", text) assert match, "Could not find link in email" return match.group(0) - def _add_email(self, request_email, expected_email): + def _add_email(self, request_email: str, expected_email: str) -> None: """Test adding an email to profile""" previous_email_attempts = len(self.email_attempts) @@ -1030,7 +1047,7 @@ def _add_email(self, request_email, expected_email): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -1039,7 +1056,7 @@ def _add_email(self, request_email, expected_email): access_token=self.user_id_tok, ) - self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) threepids = {threepid["address"] for threepid in channel.json_body["threepids"]} @@ -1055,18 +1072,18 @@ class AccountStatusTestCase(unittest.HomeserverTestCase): url = "/_matrix/client/unstable/org.matrix.msc3720/account_status" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["experimental_features"] = {"msc3720_enabled": True} return self.setup_test_homeserver(config=config) - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.requester = self.register_user("requester", "password") self.requester_tok = self.login("requester", "password") - self.server_name = homeserver.config.server.server_name + self.server_name = hs.config.server.server_name - def test_missing_mxid(self): + def test_missing_mxid(self) -> None: """Tests that not providing any MXID raises an error.""" self._test_status( users=None, @@ -1074,7 +1091,7 @@ def test_missing_mxid(self): expected_errcode=Codes.MISSING_PARAM, ) - def test_invalid_mxid(self): + def test_invalid_mxid(self) -> None: """Tests that providing an invalid MXID raises an error.""" self._test_status( users=["bad:test"], @@ -1082,7 +1099,7 @@ def test_invalid_mxid(self): expected_errcode=Codes.INVALID_PARAM, ) - def test_local_user_not_exists(self): + def test_local_user_not_exists(self) -> None: """Tests that the account status endpoints correctly reports that a user doesn't exist. """ @@ -1098,7 +1115,7 @@ def test_local_user_not_exists(self): expected_failures=[], ) - def test_local_user_exists(self): + def test_local_user_exists(self) -> None: """Tests that the account status endpoint correctly reports that a user doesn't exist. """ @@ -1115,7 +1132,7 @@ def test_local_user_exists(self): expected_failures=[], ) - def test_local_user_deactivated(self): + def test_local_user_deactivated(self) -> None: """Tests that the account status endpoint correctly reports a deactivated user.""" user = self.register_user("someuser", "password") self.get_success( @@ -1135,7 +1152,7 @@ def test_local_user_deactivated(self): expected_failures=[], ) - def test_mixed_local_and_remote_users(self): + def test_mixed_local_and_remote_users(self) -> None: """Tests that if some users are remote the account status endpoint correctly merges the remote responses with the local result. """ @@ -1150,7 +1167,13 @@ def test_mixed_local_and_remote_users(self): "@bad:badremote", ] - async def post_json(destination, path, data, *a, **kwa): + async def post_json( + destination: str, + path: str, + data: Optional[JsonDict] = None, + *a: Any, + **kwa: Any, + ) -> Union[JsonDict, list]: if destination == "remote": return { "account_statuses": { @@ -1160,9 +1183,7 @@ async def post_json(destination, path, data, *a, **kwa): }, } } - if destination == "otherremote": - return {} - if destination == "badremote": + elif destination == "badremote": # badremote tries to overwrite the status of a user that doesn't belong # to it (i.e. users[1]) with false data, which Synapse is expected to # ignore. @@ -1176,6 +1197,9 @@ async def post_json(destination, path, data, *a, **kwa): }, } } + # if destination == "otherremote" + else: + return {} # Register a mock that will return the expected result depending on the remote. self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) @@ -1205,7 +1229,7 @@ def _test_status( expected_statuses: Optional[Dict[str, Dict[str, bool]]] = None, expected_failures: Optional[List[str]] = None, expected_errcode: Optional[str] = None, - ): + ) -> None: """Send a request to the account status endpoint and check that the response matches with what's expected. diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 5c31a54421df..823e8ab8c474 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import Codes from synapse.rest.client import filter +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -30,11 +32,11 @@ class FilterTestCase(unittest.HomeserverTestCase): EXAMPLE_FILTER_JSON = b'{"room": {"timeline": {"types": ["m.room.message"]}}}' servlets = [filter.register_servlets] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.filtering = hs.get_filtering() self.store = hs.get_datastores().main - def test_add_filter(self): + def test_add_filter(self) -> None: channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % (self.user_id), @@ -43,11 +45,13 @@ def test_add_filter(self): self.assertEqual(channel.result["code"], b"200") self.assertEqual(channel.json_body, {"filter_id": "0"}) - filter = self.store.get_user_filter(user_localpart="apple", filter_id=0) + filter = self.get_success( + self.store.get_user_filter(user_localpart="apple", filter_id=0) + ) self.pump() - self.assertEqual(filter.result, self.EXAMPLE_FILTER) + self.assertEqual(filter, self.EXAMPLE_FILTER) - def test_add_filter_for_other_user(self): + def test_add_filter_for_other_user(self) -> None: channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % ("@watermelon:test"), @@ -57,7 +61,7 @@ def test_add_filter_for_other_user(self): self.assertEqual(channel.result["code"], b"403") self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) - def test_add_filter_non_local_user(self): + def test_add_filter_non_local_user(self) -> None: _is_mine = self.hs.is_mine self.hs.is_mine = lambda target_user: False channel = self.make_request( @@ -70,14 +74,13 @@ def test_add_filter_non_local_user(self): self.assertEqual(channel.result["code"], b"403") self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) - def test_get_filter(self): - filter_id = defer.ensureDeferred( + def test_get_filter(self) -> None: + filter_id = self.get_success( self.filtering.add_user_filter( user_localpart="apple", user_filter=self.EXAMPLE_FILTER ) ) self.reactor.advance(1) - filter_id = filter_id.result channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/%s" % (self.user_id, filter_id) ) @@ -85,7 +88,7 @@ def test_get_filter(self): self.assertEqual(channel.result["code"], b"200") self.assertEqual(channel.json_body, self.EXAMPLE_FILTER) - def test_get_filter_non_existant(self): + def test_get_filter_non_existant(self) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/12382148321" % (self.user_id) ) @@ -95,7 +98,7 @@ def test_get_filter_non_existant(self): # Currently invalid params do not have an appropriate errcode # in errors.py - def test_get_filter_invalid_id(self): + def test_get_filter_invalid_id(self) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/foobar" % (self.user_id) ) @@ -103,7 +106,7 @@ def test_get_filter_invalid_id(self): self.assertEqual(channel.result["code"], b"400") # No ID also returns an invalid_id error - def test_get_filter_no_id(self): + def test_get_filter_no_id(self) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/user/%s/filter/" % (self.user_id) ) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index a087cd7b2149..709f851a3874 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -15,7 +15,7 @@ import itertools import urllib.parse -from typing import Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor @@ -45,7 +45,7 @@ class BaseRelationsTestCase(unittest.HomeserverTestCase): ] hijack_auth = False - def default_config(self) -> dict: + def default_config(self) -> Dict[str, Any]: # We need to enable msc1849 support for aggregations config = super().default_config() diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index ee6b0b9ebfb0..20a259fc4388 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -14,8 +14,13 @@ import json +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.rest.client import login, report_event, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest @@ -28,7 +33,7 @@ class ReportEventTestCase(unittest.HomeserverTestCase): report_event.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") @@ -42,35 +47,35 @@ def prepare(self, reactor, clock, hs): self.event_id = resp["event_id"] self.report_path = f"rooms/{self.room_id}/report/{self.event_id}" - def test_reason_str_and_score_int(self): + def test_reason_str_and_score_int(self) -> None: data = {"reason": "this makes me sad", "score": -100} self._assert_status(200, data) - def test_no_reason(self): + def test_no_reason(self) -> None: data = {"score": 0} self._assert_status(200, data) - def test_no_score(self): + def test_no_score(self) -> None: data = {"reason": "this makes me sad"} self._assert_status(200, data) - def test_no_reason_and_no_score(self): - data = {} + def test_no_reason_and_no_score(self) -> None: + data: JsonDict = {} self._assert_status(200, data) - def test_reason_int_and_score_str(self): + def test_reason_int_and_score_str(self) -> None: data = {"reason": 10, "score": "string"} self._assert_status(400, data) - def test_reason_zero_and_score_blank(self): + def test_reason_zero_and_score_blank(self) -> None: data = {"reason": 0, "score": ""} self._assert_status(400, data) - def test_reason_and_score_null(self): + def test_reason_and_score_null(self) -> None: data = {"reason": None, "score": None} self._assert_status(400, data) - def _assert_status(self, response_status, data): + def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( "POST", self.report_path, diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index e0b11e726433..37866ee330f3 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,11 +18,12 @@ """Tests REST events for /rooms paths.""" import json -from typing import Iterable, List +from typing import Any, Dict, Iterable, List, Optional from unittest.mock import Mock, call from urllib import parse as urlparse from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.constants import ( @@ -35,7 +36,9 @@ from synapse.handlers.pagination import PurgeStatus from synapse.rest import admin from synapse.rest.client import account, directory, login, profile, room, sync +from synapse.server import HomeServer from synapse.types import JsonDict, RoomAlias, UserID, create_requester +from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest @@ -45,11 +48,11 @@ class RoomBase(unittest.HomeserverTestCase): - rmcreator_id = None + rmcreator_id: Optional[str] = None servlets = [room.register_servlets, room.register_deprecated_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.hs = self.setup_test_homeserver( "red", @@ -57,15 +60,15 @@ def make_homeserver(self, reactor, clock): federation_client=Mock(), ) - self.hs.get_federation_handler = Mock() + self.hs.get_federation_handler = Mock() # type: ignore[assignment] self.hs.get_federation_handler.return_value.maybe_backfill = Mock( return_value=make_awaitable(None) ) - async def _insert_client_ip(*args, **kwargs): + async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: return None - self.hs.get_datastores().main.insert_client_ip = _insert_client_ip + self.hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] return self.hs @@ -76,7 +79,7 @@ class RoomPermissionsTestCase(RoomBase): user_id = "@sid1:red" rmcreator_id = "@notme:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.helper.auth_user_id = self.rmcreator_id # create some rooms under the name rmcreator_id @@ -108,12 +111,12 @@ def prepare(self, reactor, clock, hs): # auth as user_id now self.helper.auth_user_id = self.user_id - def test_can_do_action(self): + def test_can_do_action(self) -> None: msg_content = b'{"msgtype":"m.text","body":"hello"}' seq = iter(range(100)) - def send_msg_path(): + def send_msg_path() -> str: return "/rooms/%s/send/m.room.message/mid%s" % ( self.created_rmid, str(next(seq)), @@ -148,7 +151,7 @@ def send_msg_path(): channel = self.make_request("PUT", send_msg_path(), msg_content) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_topic_perms(self): + def test_topic_perms(self) -> None: topic_content = b'{"topic":"My Topic Name"}' topic_path = "/rooms/%s/state/m.room.topic" % self.created_rmid @@ -214,14 +217,14 @@ def test_topic_perms(self): self.assertEqual(403, channel.code, msg=channel.result["body"]) def _test_get_membership( - self, room=None, members: Iterable = frozenset(), expect_code=None - ): + self, room: str, members: Iterable = frozenset(), expect_code: int = 200 + ) -> None: for member in members: path = "/rooms/%s/state/m.room.member/%s" % (room, member) channel = self.make_request("GET", path) self.assertEqual(expect_code, channel.code) - def test_membership_basic_room_perms(self): + def test_membership_basic_room_perms(self) -> None: # === room does not exist === room = self.uncreated_rmid # get membership of self, get membership of other, uncreated room @@ -241,7 +244,7 @@ def test_membership_basic_room_perms(self): self.helper.join(room=room, user=usr, expect_code=404) self.helper.leave(room=room, user=usr, expect_code=404) - def test_membership_private_room_perms(self): + def test_membership_private_room_perms(self) -> None: room = self.created_rmid # get membership of self, get membership of other, private room + invite # expect all 403s @@ -264,7 +267,7 @@ def test_membership_private_room_perms(self): members=[self.user_id, self.rmcreator_id], room=room, expect_code=200 ) - def test_membership_public_room_perms(self): + def test_membership_public_room_perms(self) -> None: room = self.created_public_rmid # get membership of self, get membership of other, public room + invite # expect 403 @@ -287,7 +290,7 @@ def test_membership_public_room_perms(self): members=[self.user_id, self.rmcreator_id], room=room, expect_code=200 ) - def test_invited_permissions(self): + def test_invited_permissions(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) @@ -310,7 +313,7 @@ def test_invited_permissions(self): expect_code=403, ) - def test_joined_permissions(self): + def test_joined_permissions(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) self.helper.join(room=room, user=self.user_id) @@ -348,7 +351,7 @@ def test_joined_permissions(self): # set left of self, expect 200 self.helper.leave(room=room, user=self.user_id) - def test_leave_permissions(self): + def test_leave_permissions(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) self.helper.join(room=room, user=self.user_id) @@ -383,7 +386,7 @@ def test_leave_permissions(self): ) # tests the "from banned" line from the table in https://spec.matrix.org/unstable/client-server-api/#mroommember - def test_member_event_from_ban(self): + def test_member_event_from_ban(self) -> None: room = self.created_rmid self.helper.invite(room=room, src=self.rmcreator_id, targ=self.user_id) self.helper.join(room=room, user=self.user_id) @@ -475,21 +478,21 @@ class RoomsMemberListTestCase(RoomBase): user_id = "@sid1:red" - def test_get_member_list(self): + def test_get_member_list(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(200, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_room(self): + def test_get_member_list_no_room(self) -> None: channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission(self): + def test_get_member_list_no_permission(self) -> None: room_id = self.helper.create_room_as("@some_other_guy:red") channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission_with_at_token(self): + def test_get_member_list_no_permission_with_at_token(self) -> None: """ Tests that a stranger to the room cannot get the member list (in the case that they use an at token). @@ -509,7 +512,7 @@ def test_get_member_list_no_permission_with_at_token(self): ) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission_former_member(self): + def test_get_member_list_no_permission_former_member(self) -> None: """ Tests that a former member of the room can not get the member list. """ @@ -529,7 +532,7 @@ def test_get_member_list_no_permission_former_member(self): channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_no_permission_former_member_with_at_token(self): + def test_get_member_list_no_permission_former_member_with_at_token(self) -> None: """ Tests that a former member of the room can not get the member list (in the case that they use an at token). @@ -569,7 +572,7 @@ def test_get_member_list_no_permission_former_member_with_at_token(self): ) self.assertEqual(403, channel.code, msg=channel.result["body"]) - def test_get_member_list_mixed_memberships(self): + def test_get_member_list_mixed_memberships(self) -> None: room_creator = "@some_other_guy:red" room_id = self.helper.create_room_as(room_creator) room_path = "/rooms/%s/members" % room_id @@ -594,26 +597,26 @@ class RoomsCreateTestCase(RoomBase): user_id = "@sid1:red" - def test_post_room_no_keys(self): + def test_post_room_no_keys(self) -> None: # POST with no config keys, expect new room id channel = self.make_request("POST", "/createRoom", "{}") self.assertEqual(200, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) - def test_post_room_visibility_key(self): + def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id channel = self.make_request("POST", "/createRoom", b'{"visibility":"private"}') self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) - def test_post_room_custom_key(self): + def test_post_room_custom_key(self) -> None: # POST with custom config keys, expect new room id channel = self.make_request("POST", "/createRoom", b'{"custom":"stuff"}') self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) - def test_post_room_known_and_unknown_keys(self): + def test_post_room_known_and_unknown_keys(self) -> None: # POST with custom + known config keys, expect new room id channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' @@ -621,7 +624,7 @@ def test_post_room_known_and_unknown_keys(self): self.assertEqual(200, channel.code) self.assertTrue("room_id" in channel.json_body) - def test_post_room_invalid_content(self): + def test_post_room_invalid_content(self) -> None: # POST with invalid content / paths, expect 400 channel = self.make_request("POST", "/createRoom", b'{"visibili') self.assertEqual(400, channel.code) @@ -629,7 +632,7 @@ def test_post_room_invalid_content(self): channel = self.make_request("POST", "/createRoom", b'["hello"]') self.assertEqual(400, channel.code) - def test_post_room_invitees_invalid_mxid(self): + def test_post_room_invitees_invalid_mxid(self) -> None: # POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088 # Note the trailing space in the MXID here! channel = self.make_request( @@ -638,7 +641,7 @@ def test_post_room_invitees_invalid_mxid(self): self.assertEqual(400, channel.code) @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) - def test_post_room_invitees_ratelimit(self): + def test_post_room_invitees_ratelimit(self) -> None: """Test that invites sent when creating a room are ratelimited by a RateLimiter, which ratelimits them correctly, including by not limiting when the requester is exempt from ratelimiting. @@ -674,7 +677,7 @@ def test_post_room_invitees_ratelimit(self): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) - def test_spam_checker_may_join_room(self): + def test_spam_checker_may_join_room(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. """ @@ -704,12 +707,12 @@ class RoomTopicTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # create the room self.room_id = self.helper.create_room_as(self.user_id) self.path = "/rooms/%s/state/m.room.topic" % (self.room_id,) - def test_invalid_puts(self): + def test_invalid_puts(self) -> None: # missing keys or invalid json channel = self.make_request("PUT", self.path, "{}") self.assertEqual(400, channel.code, msg=channel.result["body"]) @@ -736,7 +739,7 @@ def test_invalid_puts(self): channel = self.make_request("PUT", self.path, content) self.assertEqual(400, channel.code, msg=channel.result["body"]) - def test_rooms_topic(self): + def test_rooms_topic(self) -> None: # nothing should be there channel = self.make_request("GET", self.path) self.assertEqual(404, channel.code, msg=channel.result["body"]) @@ -751,7 +754,7 @@ def test_rooms_topic(self): self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) - def test_rooms_topic_with_extra_keys(self): + def test_rooms_topic_with_extra_keys(self) -> None: # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' channel = self.make_request("PUT", self.path, content) @@ -768,10 +771,10 @@ class RoomMemberStateTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_invalid_puts(self): + def test_invalid_puts(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json channel = self.make_request("PUT", path, "{}") @@ -801,7 +804,7 @@ def test_invalid_puts(self): channel = self.make_request("PUT", path, content.encode("ascii")) self.assertEqual(400, channel.code, msg=channel.result["body"]) - def test_rooms_members_self(self): + def test_rooms_members_self(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % ( urlparse.quote(self.room_id), self.user_id, @@ -812,13 +815,13 @@ def test_rooms_members_self(self): channel = self.make_request("PUT", path, content.encode("ascii")) self.assertEqual(200, channel.code, msg=channel.result["body"]) - channel = self.make_request("GET", path, None) + channel = self.make_request("GET", path, content=b"") self.assertEqual(200, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} self.assertEqual(expected_response, channel.json_body) - def test_rooms_members_other(self): + def test_rooms_members_other(self) -> None: self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( urlparse.quote(self.room_id), @@ -830,11 +833,11 @@ def test_rooms_members_other(self): channel = self.make_request("PUT", path, content) self.assertEqual(200, channel.code, msg=channel.result["body"]) - channel = self.make_request("GET", path, None) + channel = self.make_request("GET", path, content=b"") self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) - def test_rooms_members_other_custom_keys(self): + def test_rooms_members_other_custom_keys(self) -> None: self.other_id = "@zzsid1:red" path = "/rooms/%s/state/m.room.member/%s" % ( urlparse.quote(self.room_id), @@ -849,7 +852,7 @@ def test_rooms_members_other_custom_keys(self): channel = self.make_request("PUT", path, content) self.assertEqual(200, channel.code, msg=channel.result["body"]) - channel = self.make_request("GET", path, None) + channel = self.make_request("GET", path, content=b"") self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) @@ -866,7 +869,7 @@ class RoomInviteRatelimitTestCase(RoomBase): @unittest.override_config( {"rc_invites": {"per_room": {"per_second": 0.5, "burst_count": 3}}} ) - def test_invites_by_rooms_ratelimit(self): + def test_invites_by_rooms_ratelimit(self) -> None: """Tests that invites in a room are actually rate-limited.""" room_id = self.helper.create_room_as(self.user_id) @@ -878,7 +881,7 @@ def test_invites_by_rooms_ratelimit(self): @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) - def test_invites_by_users_ratelimit(self): + def test_invites_by_users_ratelimit(self) -> None: """Tests that invites to a specific user are actually rate-limited.""" for _ in range(3): @@ -897,7 +900,7 @@ class RoomJoinTestCase(RoomBase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user1 = self.register_user("thomas", "hackme") self.tok1 = self.login("thomas", "hackme") @@ -908,7 +911,7 @@ def prepare(self, reactor, clock, homeserver): self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) - def test_spam_checker_may_join_room(self): + def test_spam_checker_may_join_room(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called and blocks room joins when needed. """ @@ -975,8 +978,8 @@ class RoomJoinRatelimitTestCase(RoomBase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): - super().prepare(reactor, clock, homeserver) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) # profile changes expect that the user is actually registered user = UserID.from_string(self.user_id) self.get_success(self.register_user(user.localpart, "supersecretpassword")) @@ -984,7 +987,7 @@ def prepare(self, reactor, clock, homeserver): @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) - def test_join_local_ratelimit(self): + def test_join_local_ratelimit(self) -> None: """Tests that local joins are actually rate-limited.""" for _ in range(3): self.helper.create_room_as(self.user_id) @@ -994,7 +997,7 @@ def test_join_local_ratelimit(self): @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) - def test_join_local_ratelimit_profile_change(self): + def test_join_local_ratelimit_profile_change(self) -> None: """Tests that sending a profile update into all of the user's joined rooms isn't rate-limited by the rate-limiter on joins.""" @@ -1031,7 +1034,7 @@ def test_join_local_ratelimit_profile_change(self): @unittest.override_config( {"rc_joins": {"local": {"per_second": 0.5, "burst_count": 3}}} ) - def test_join_local_ratelimit_idempotent(self): + def test_join_local_ratelimit_idempotent(self) -> None: """Tests that the room join endpoints remain idempotent despite rate-limiting on room joins.""" room_id = self.helper.create_room_as(self.user_id) @@ -1056,7 +1059,7 @@ def test_join_local_ratelimit_idempotent(self): "autocreate_auto_join_rooms": True, }, ) - def test_autojoin_rooms(self): + def test_autojoin_rooms(self) -> None: user_id = self.register_user("testuser", "password") # Check that the new user successfully joined the four rooms @@ -1071,10 +1074,10 @@ class RoomMessagesTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_invalid_puts(self): + def test_invalid_puts(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json channel = self.make_request("PUT", path, b"{}") @@ -1095,7 +1098,7 @@ def test_invalid_puts(self): channel = self.make_request("PUT", path, b"") self.assertEqual(400, channel.code, msg=channel.result["body"]) - def test_rooms_messages_sent(self): + def test_rooms_messages_sent(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) content = b'{"body":"test","msgtype":{"type":"a"}}' @@ -1119,11 +1122,11 @@ class RoomInitialSyncTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # create the room self.room_id = self.helper.create_room_as(self.user_id) - def test_initial_sync(self): + def test_initial_sync(self) -> None: channel = self.make_request("GET", "/rooms/%s/initialSync" % self.room_id) self.assertEqual(200, channel.code) @@ -1131,7 +1134,7 @@ def test_initial_sync(self): self.assertEqual("join", channel.json_body["membership"]) # Room state is easier to assert on if we unpack it into a dict - state = {} + state: JsonDict = {} for event in channel.json_body["state"]: if "state_key" not in event: continue @@ -1160,10 +1163,10 @@ class RoomMessageListTestCase(RoomBase): user_id = "@sid1:red" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) - def test_topo_token_is_accepted(self): + def test_topo_token_is_accepted(self) -> None: token = "t1-0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) @@ -1174,7 +1177,7 @@ def test_topo_token_is_accepted(self): self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) - def test_stream_token_is_accepted_for_fwd_pagianation(self): + def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None: token = "s0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) @@ -1185,7 +1188,7 @@ def test_stream_token_is_accepted_for_fwd_pagianation(self): self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) - def test_room_messages_purge(self): + def test_room_messages_purge(self) -> None: store = self.hs.get_datastores().main pagination_handler = self.hs.get_pagination_handler() @@ -1278,10 +1281,10 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): user_id = True hijack_auth = False - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Register the user who does the searching - self.user_id = self.register_user("user", "pass") + self.user_id2 = self.register_user("user", "pass") self.access_token = self.login("user", "pass") # Register the user who sends the message @@ -1289,12 +1292,12 @@ def prepare(self, reactor, clock, hs): self.other_access_token = self.login("otheruser", "pass") # Create a room - self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.room = self.helper.create_room_as(self.user_id2, tok=self.access_token) # Invite the other person self.helper.invite( room=self.room, - src=self.user_id, + src=self.user_id2, tok=self.access_token, targ=self.other_user_id, ) @@ -1304,7 +1307,7 @@ def prepare(self, reactor, clock, hs): room=self.room, user=self.other_user_id, tok=self.other_access_token ) - def test_finds_message(self): + def test_finds_message(self) -> None: """ The search functionality will search for content in messages if asked to do so. @@ -1333,7 +1336,7 @@ def test_finds_message(self): # No context was requested, so we should get none. self.assertEqual(results["results"][0]["context"], {}) - def test_include_context(self): + def test_include_context(self) -> None: """ When event_context includes include_profile, profile information will be included in the search response. @@ -1379,7 +1382,7 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.url = b"/_matrix/client/r0/publicRooms" @@ -1389,11 +1392,11 @@ def make_homeserver(self, reactor, clock): return self.hs - def test_restricted_no_auth(self): + def test_restricted_no_auth(self) -> None: channel = self.make_request("GET", self.url) self.assertEqual(channel.code, 401, channel.result) - def test_restricted_auth(self): + def test_restricted_auth(self) -> None: self.register_user("user", "pass") tok = self.login("user", "pass") @@ -1412,19 +1415,19 @@ class PublicRoomsTestRemoteSearchFallbackTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(federation_client=Mock()) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("user", "pass") self.token = self.login("user", "pass") self.federation_client = hs.get_federation_client() - def test_simple(self): + def test_simple(self) -> None: "Simple test for searching rooms over federation" - self.federation_client.get_public_rooms.side_effect = ( - lambda *a, **k: defer.succeed({}) + self.federation_client.get_public_rooms.side_effect = lambda *a, **k: defer.succeed( # type: ignore[attr-defined] + {} ) search_filter = {"generic_search_term": "foobar"} @@ -1437,7 +1440,7 @@ def test_simple(self): ) self.assertEqual(channel.code, 200, channel.result) - self.federation_client.get_public_rooms.assert_called_once_with( + self.federation_client.get_public_rooms.assert_called_once_with( # type: ignore[attr-defined] "testserv", limit=100, since_token=None, @@ -1446,12 +1449,12 @@ def test_simple(self): third_party_instance_id=None, ) - def test_fallback(self): + def test_fallback(self) -> None: "Test that searching public rooms over federation falls back if it gets a 404" # The `get_public_rooms` should be called again if the first call fails # with a 404, when using search filters. - self.federation_client.get_public_rooms.side_effect = ( + self.federation_client.get_public_rooms.side_effect = ( # type: ignore[attr-defined] HttpResponseException(404, "Not Found", b""), defer.succeed({}), ) @@ -1466,7 +1469,7 @@ def test_fallback(self): ) self.assertEqual(channel.code, 200, channel.result) - self.federation_client.get_public_rooms.assert_has_calls( + self.federation_client.get_public_rooms.assert_has_calls( # type: ignore[attr-defined] [ call( "testserv", @@ -1497,14 +1500,14 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): profile.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["allow_per_room_profiles"] = False self.hs = self.setup_test_homeserver(config=config) return self.hs - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("test", "test") self.tok = self.login("test", "test") @@ -1522,7 +1525,7 @@ def prepare(self, reactor, clock, homeserver): self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_per_room_profile_forbidden(self): + def test_per_room_profile_forbidden(self) -> None: data = {"membership": "join", "displayname": "other test user"} request_data = json.dumps(data) channel = self.make_request( @@ -1557,7 +1560,7 @@ class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.creator = self.register_user("creator", "test") self.creator_tok = self.login("creator", "test") @@ -1566,7 +1569,7 @@ def prepare(self, reactor, clock, homeserver): self.room_id = self.helper.create_room_as(self.creator, tok=self.creator_tok) - def test_join_reason(self): + def test_join_reason(self) -> None: reason = "hello" channel = self.make_request( "POST", @@ -1578,7 +1581,7 @@ def test_join_reason(self): self._check_for_reason(reason) - def test_leave_reason(self): + def test_leave_reason(self) -> None: self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) reason = "hello" @@ -1592,7 +1595,7 @@ def test_leave_reason(self): self._check_for_reason(reason) - def test_kick_reason(self): + def test_kick_reason(self) -> None: self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) reason = "hello" @@ -1606,7 +1609,7 @@ def test_kick_reason(self): self._check_for_reason(reason) - def test_ban_reason(self): + def test_ban_reason(self) -> None: self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) reason = "hello" @@ -1620,7 +1623,7 @@ def test_ban_reason(self): self._check_for_reason(reason) - def test_unban_reason(self): + def test_unban_reason(self) -> None: reason = "hello" channel = self.make_request( "POST", @@ -1632,7 +1635,7 @@ def test_unban_reason(self): self._check_for_reason(reason) - def test_invite_reason(self): + def test_invite_reason(self) -> None: reason = "hello" channel = self.make_request( "POST", @@ -1644,7 +1647,7 @@ def test_invite_reason(self): self._check_for_reason(reason) - def test_reject_invite_reason(self): + def test_reject_invite_reason(self) -> None: self.helper.invite( self.room_id, src=self.creator, @@ -1663,7 +1666,7 @@ def test_reject_invite_reason(self): self._check_for_reason(reason) - def _check_for_reason(self, reason): + def _check_for_reason(self, reason: str) -> None: channel = self.make_request( "GET", "/_matrix/client/r0/rooms/{}/state/m.room.member/{}".format( @@ -1704,12 +1707,12 @@ class LabelsTestCase(unittest.HomeserverTestCase): "org.matrix.not_labels": ["#notfun"], } - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("test", "test") self.tok = self.login("test", "test") self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_context_filter_labels(self): + def test_context_filter_labels(self) -> None: """Test that we can filter by a label on a /context request.""" event_id = self._send_labelled_messages_in_room() @@ -1739,7 +1742,7 @@ def test_context_filter_labels(self): events_after[0]["content"]["body"], "with right label", events_after[0] ) - def test_context_filter_not_labels(self): + def test_context_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /context request.""" event_id = self._send_labelled_messages_in_room() @@ -1772,7 +1775,7 @@ def test_context_filter_not_labels(self): events_after[1]["content"]["body"], "with two wrong labels", events_after[1] ) - def test_context_filter_labels_not_labels(self): + def test_context_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /context request. """ @@ -1801,7 +1804,7 @@ def test_context_filter_labels_not_labels(self): events_after[0]["content"]["body"], "with wrong label", events_after[0] ) - def test_messages_filter_labels(self): + def test_messages_filter_labels(self) -> None: """Test that we can filter by a label on a /messages request.""" self._send_labelled_messages_in_room() @@ -1818,7 +1821,7 @@ def test_messages_filter_labels(self): self.assertEqual(events[0]["content"]["body"], "with right label", events[0]) self.assertEqual(events[1]["content"]["body"], "with right label", events[1]) - def test_messages_filter_not_labels(self): + def test_messages_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /messages request.""" self._send_labelled_messages_in_room() @@ -1839,7 +1842,7 @@ def test_messages_filter_not_labels(self): events[3]["content"]["body"], "with two wrong labels", events[3] ) - def test_messages_filter_labels_not_labels(self): + def test_messages_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /messages request. """ @@ -1862,7 +1865,7 @@ def test_messages_filter_labels_not_labels(self): self.assertEqual(len(events), 1, [event["content"] for event in events]) self.assertEqual(events[0]["content"]["body"], "with wrong label", events[0]) - def test_search_filter_labels(self): + def test_search_filter_labels(self) -> None: """Test that we can filter by a label on a /search request.""" request_data = json.dumps( { @@ -1899,7 +1902,7 @@ def test_search_filter_labels(self): results[1]["result"]["content"]["body"], ) - def test_search_filter_not_labels(self): + def test_search_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /search request.""" request_data = json.dumps( { @@ -1946,7 +1949,7 @@ def test_search_filter_not_labels(self): results[3]["result"]["content"]["body"], ) - def test_search_filter_labels_not_labels(self): + def test_search_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /search request. """ @@ -1980,7 +1983,7 @@ def test_search_filter_labels_not_labels(self): results[0]["result"]["content"]["body"], ) - def _send_labelled_messages_in_room(self): + def _send_labelled_messages_in_room(self) -> str: """Sends several messages to a room with different labels (or without any) to test filtering by label. Returns: @@ -2056,12 +2059,12 @@ class RelationsTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["experimental_features"] = {"msc3440_enabled": True} return config - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("test", "test") self.tok = self.login("test", "test") self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) @@ -2136,7 +2139,7 @@ def _filter_messages(self, filter: JsonDict) -> List[JsonDict]: return channel.json_body["chunk"] - def test_filter_relation_senders(self): + def test_filter_relation_senders(self) -> None: # Messages which second user reacted to. filter = {"io.element.relation_senders": [self.second_user_id]} chunk = self._filter_messages(filter) @@ -2159,7 +2162,7 @@ def test_filter_relation_senders(self): [c["event_id"] for c in chunk], [self.event_id_1, self.event_id_2] ) - def test_filter_relation_type(self): + def test_filter_relation_type(self) -> None: # Messages which have annotations. filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]} chunk = self._filter_messages(filter) @@ -2185,7 +2188,7 @@ def test_filter_relation_type(self): [c["event_id"] for c in chunk], [self.event_id_1, self.event_id_2] ) - def test_filter_relation_senders_and_type(self): + def test_filter_relation_senders_and_type(self) -> None: # Messages which second user reacted to. filter = { "io.element.relation_senders": [self.second_user_id], @@ -2205,7 +2208,7 @@ class ContextTestCase(unittest.HomeserverTestCase): account.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("user", "password") self.tok = self.login("user", "password") self.room_id = self.helper.create_room_as( @@ -2218,7 +2221,7 @@ def prepare(self, reactor, clock, homeserver): self.helper.invite(self.room_id, self.user_id, self.other_user_id, tok=self.tok) self.helper.join(self.room_id, self.other_user_id, tok=self.other_tok) - def test_erased_sender(self): + def test_erased_sender(self) -> None: """Test that an erasure request results in the requester's events being hidden from any new member of the room. """ @@ -2332,7 +2335,7 @@ class RoomAliasListTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_owner = self.register_user("room_owner", "test") self.room_owner_tok = self.login("room_owner", "test") @@ -2340,17 +2343,17 @@ def prepare(self, reactor, clock, homeserver): self.room_owner, tok=self.room_owner_tok ) - def test_no_aliases(self): + def test_no_aliases(self) -> None: res = self._get_aliases(self.room_owner_tok) self.assertEqual(res["aliases"], []) - def test_not_in_room(self): + def test_not_in_room(self) -> None: self.register_user("user", "test") user_tok = self.login("user", "test") res = self._get_aliases(user_tok, expected_code=403) self.assertEqual(res["errcode"], "M_FORBIDDEN") - def test_admin_user(self): + def test_admin_user(self) -> None: alias1 = self._random_alias() self._set_alias_via_directory(alias1) @@ -2360,7 +2363,7 @@ def test_admin_user(self): res = self._get_aliases(user_tok) self.assertEqual(res["aliases"], [alias1]) - def test_with_aliases(self): + def test_with_aliases(self) -> None: alias1 = self._random_alias() alias2 = self._random_alias() @@ -2370,7 +2373,7 @@ def test_with_aliases(self): res = self._get_aliases(self.room_owner_tok) self.assertEqual(set(res["aliases"]), {alias1, alias2}) - def test_peekable_room(self): + def test_peekable_room(self) -> None: alias1 = self._random_alias() self._set_alias_via_directory(alias1) @@ -2404,7 +2407,7 @@ def _get_aliases(self, access_token: str, expected_code: int = 200) -> JsonDict: def _random_alias(self) -> str: return RoomAlias(random_string(5), self.hs.hostname).to_string() - def _set_alias_via_directory(self, alias: str, expected_code: int = 200): + def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias data = {"room_id": self.room_id} request_data = json.dumps(data) @@ -2423,7 +2426,7 @@ class RoomCanonicalAliasTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_owner = self.register_user("room_owner", "test") self.room_owner_tok = self.login("room_owner", "test") @@ -2434,7 +2437,7 @@ def prepare(self, reactor, clock, homeserver): self.alias = "#alias:test" self._set_alias_via_directory(self.alias) - def _set_alias_via_directory(self, alias: str, expected_code: int = 200): + def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias data = {"room_id": self.room_id} request_data = json.dumps(data) @@ -2456,7 +2459,9 @@ def _get_canonical_alias(self, expected_code: int = 200) -> JsonDict: self.assertIsInstance(res, dict) return res - def _set_canonical_alias(self, content: str, expected_code: int = 200) -> JsonDict: + def _set_canonical_alias( + self, content: JsonDict, expected_code: int = 200 + ) -> JsonDict: """Calls the endpoint under test. returns the json response object.""" channel = self.make_request( "PUT", @@ -2469,7 +2474,7 @@ def _set_canonical_alias(self, content: str, expected_code: int = 200) -> JsonDi self.assertIsInstance(res, dict) return res - def test_canonical_alias(self): + def test_canonical_alias(self) -> None: """Test a basic alias message.""" # There is no canonical alias to start with. self._get_canonical_alias(expected_code=404) @@ -2488,7 +2493,7 @@ def test_canonical_alias(self): res = self._get_canonical_alias() self.assertEqual(res, {}) - def test_alt_aliases(self): + def test_alt_aliases(self) -> None: """Test a canonical alias message with alt_aliases.""" # Create an alias. self._set_canonical_alias({"alt_aliases": [self.alias]}) @@ -2504,7 +2509,7 @@ def test_alt_aliases(self): res = self._get_canonical_alias() self.assertEqual(res, {}) - def test_alias_alt_aliases(self): + def test_alias_alt_aliases(self) -> None: """Test a canonical alias message with an alias and alt_aliases.""" # Create an alias. self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) @@ -2520,7 +2525,7 @@ def test_alias_alt_aliases(self): res = self._get_canonical_alias() self.assertEqual(res, {}) - def test_partial_modify(self): + def test_partial_modify(self) -> None: """Test removing only the alt_aliases.""" # Create an alias. self._set_canonical_alias({"alias": self.alias, "alt_aliases": [self.alias]}) @@ -2536,7 +2541,7 @@ def test_partial_modify(self): res = self._get_canonical_alias() self.assertEqual(res, {"alias": self.alias}) - def test_add_alias(self): + def test_add_alias(self) -> None: """Test removing only the alt_aliases.""" # Create an additional alias. second_alias = "#second:test" @@ -2556,7 +2561,7 @@ def test_add_alias(self): res, {"alias": self.alias, "alt_aliases": [self.alias, second_alias]} ) - def test_bad_data(self): + def test_bad_data(self) -> None: """Invalid data for alt_aliases should cause errors.""" self._set_canonical_alias({"alt_aliases": "@bad:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": None}, expected_code=400) @@ -2566,7 +2571,7 @@ def test_bad_data(self): self._set_canonical_alias({"alt_aliases": True}, expected_code=400) self._set_canonical_alias({"alt_aliases": {}}, expected_code=400) - def test_bad_alias(self): + def test_bad_alias(self) -> None: """An alias which does not point to the room raises a SynapseError.""" self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) @@ -2580,13 +2585,13 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("thomas", "hackme") self.tok = self.login("thomas", "hackme") self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_threepid_invite_spamcheck(self): + def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we # can check its call_count later on during the test. diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index bfc04785b7b2..58f1ea11b7da 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -12,16 +12,22 @@ # See the License for the specific language governing permissions and # limitations under the License. import threading -from typing import TYPE_CHECKING, Dict, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes, LoginType, Membership from synapse.api.errors import SynapseError +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase +from synapse.events.snapshot import EventContext from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin from synapse.rest.client import account, login, profile, room +from synapse.server import HomeServer from synapse.types import JsonDict, Requester, StateMap +from synapse.util import Clock from synapse.util.frozenutils import unfreeze from tests import unittest @@ -34,7 +40,7 @@ class LegacyThirdPartyRulesTestModule: - def __init__(self, config: Dict, module_api: "ModuleApi"): + def __init__(self, config: Dict, module_api: "ModuleApi") -> None: # keep a record of the "current" rules module, so that the test can patch # it if desired. thread_local.rules_module = self @@ -42,32 +48,36 @@ def __init__(self, config: Dict, module_api: "ModuleApi"): async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool - ): + ) -> bool: return True - async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + async def check_event_allowed( + self, event: EventBase, state: StateMap[EventBase] + ) -> Union[bool, dict]: return True @staticmethod - def parse_config(config): + def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: return config class LegacyDenyNewRooms(LegacyThirdPartyRulesTestModule): - def __init__(self, config: Dict, module_api: "ModuleApi"): + def __init__(self, config: Dict, module_api: "ModuleApi") -> None: super().__init__(config, module_api) - def on_create_room( + async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool - ): + ) -> bool: return False class LegacyChangeEvents(LegacyThirdPartyRulesTestModule): - def __init__(self, config: Dict, module_api: "ModuleApi"): + def __init__(self, config: Dict, module_api: "ModuleApi") -> None: super().__init__(config, module_api) - async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + async def check_event_allowed( + self, event: EventBase, state: StateMap[EventBase] + ) -> JsonDict: d = event.get_dict() content = unfreeze(event.content) content["foo"] = "bar" @@ -84,7 +94,7 @@ class ThirdPartyRulesTestCase(unittest.FederatingHomeserverTestCase): account.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() load_legacy_third_party_event_rules(hs) @@ -94,22 +104,30 @@ def make_homeserver(self, reactor, clock): # Note that these checks are not relevant to this test case. # Have this homeserver auto-approve all event signature checking. - async def approve_all_signature_checking(_, pdu): + async def approve_all_signature_checking( + _: RoomVersion, pdu: EventBase + ) -> EventBase: return pdu - hs.get_federation_server()._check_sigs_and_hash = approve_all_signature_checking + hs.get_federation_server()._check_sigs_and_hash = approve_all_signature_checking # type: ignore[assignment] # Have this homeserver skip event auth checks. This is necessary due to # event auth checks ensuring that events were signed by the sender's homeserver. - async def _check_event_auth(origin, event, context, *args, **kwargs): + async def _check_event_auth( + origin: str, + event: EventBase, + context: EventContext, + *args: Any, + **kwargs: Any, + ) -> EventContext: return context - hs.get_federation_event_handler()._check_event_auth = _check_event_auth + hs.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] return hs - def prepare(self, reactor, clock, homeserver): - super().prepare(reactor, clock, homeserver) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) # Create some users and a room to play with during the tests self.user_id = self.register_user("kermit", "monkey") self.invitee = self.register_user("invitee", "hackme") @@ -121,13 +139,15 @@ def prepare(self, reactor, clock, homeserver): except Exception: pass - def test_third_party_rules(self): + def test_third_party_rules(self) -> None: """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ # patch the rules module with a Mock which will return False for some event # types - async def check(ev, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: return ev.type != "foo.bar.forbidden", None callback = Mock(spec=[], side_effect=check) @@ -161,7 +181,7 @@ async def check(ev, state): ) self.assertEqual(channel.result["code"], b"403", channel.result) - def test_third_party_rules_workaround_synapse_errors_pass_through(self): + def test_third_party_rules_workaround_synapse_errors_pass_through(self) -> None: """ Tests that the workaround introduced by https://github.com/matrix-org/synapse/pull/11042 is functional: that SynapseErrors are passed through from check_event_allowed @@ -172,7 +192,7 @@ def test_third_party_rules_workaround_synapse_errors_pass_through(self): """ class NastyHackException(SynapseError): - def error_dict(self): + def error_dict(self) -> JsonDict: """ This overrides SynapseError's `error_dict` to nastily inject JSON into the error response. @@ -182,7 +202,9 @@ def error_dict(self): return result # add a callback that will raise our hacky exception - async def check(ev, state) -> Tuple[bool, Optional[JsonDict]]: + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: raise NastyHackException(429, "message") self.hs.get_third_party_event_rules()._check_event_allowed_callbacks = [check] @@ -202,11 +224,13 @@ async def check(ev, state) -> Tuple[bool, Optional[JsonDict]]: {"errcode": "M_UNKNOWN", "error": "message", "nasty": "very"}, ) - def test_cannot_modify_event(self): + def test_cannot_modify_event(self) -> None: """cannot accidentally modify an event before it is persisted""" # first patch the event checker so that it will try to modify the event - async def check(ev: EventBase, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: ev.content = {"x": "y"} return True, None @@ -223,10 +247,12 @@ async def check(ev: EventBase, state): # 500 Internal Server Error self.assertEqual(channel.code, 500, channel.result) - def test_modify_event(self): + def test_modify_event(self) -> None: """The module can return a modified version of the event""" # first patch the event checker so that it will modify the event - async def check(ev: EventBase, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: d = ev.get_dict() d["content"] = {"x": "y"} return True, d @@ -253,10 +279,12 @@ async def check(ev: EventBase, state): ev = channel.json_body self.assertEqual(ev["content"]["x"], "y") - def test_message_edit(self): + def test_message_edit(self) -> None: """Ensure that the module doesn't cause issues with edited messages.""" # first patch the event checker so that it will modify the event - async def check(ev: EventBase, state): + async def check( + ev: EventBase, state: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: d = ev.get_dict() d["content"] = { "msgtype": "m.text", @@ -315,7 +343,7 @@ async def check(ev: EventBase, state): ev = channel.json_body self.assertEqual(ev["content"]["body"], "EDITED BODY") - def test_send_event(self): + def test_send_event(self) -> None: """Tests that a module can send an event into a room via the module api""" content = { "msgtype": "m.text", @@ -344,7 +372,7 @@ def test_send_event(self): } } ) - def test_legacy_check_event_allowed(self): + def test_legacy_check_event_allowed(self) -> None: """Tests that the wrapper for legacy check_event_allowed callbacks works correctly. """ @@ -379,13 +407,13 @@ def test_legacy_check_event_allowed(self): } } ) - def test_legacy_on_create_room(self): + def test_legacy_on_create_room(self) -> None: """Tests that the wrapper for legacy on_create_room callbacks works correctly. """ self.helper.create_room_as(self.user_id, tok=self.tok, expect_code=403) - def test_sent_event_end_up_in_room_state(self): + def test_sent_event_end_up_in_room_state(self) -> None: """Tests that a state event sent by a module while processing another state event doesn't get dropped from the state of the room. This is to guard against a bug where Synapse has been observed doing so, see https://github.com/matrix-org/synapse/issues/10830 @@ -400,7 +428,9 @@ def test_sent_event_end_up_in_room_state(self): api = self.hs.get_module_api() # Define a callback that sends a custom event on power levels update. - async def test_fn(event: EventBase, state_events): + async def test_fn( + event: EventBase, state_events: StateMap[EventBase] + ) -> Tuple[bool, Optional[JsonDict]]: if event.is_state and event.type == EventTypes.PowerLevels: await api.create_and_send_event_into_room( { @@ -436,7 +466,7 @@ async def test_fn(event: EventBase, state_events): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["i"], i) - def test_on_new_event(self): + def test_on_new_event(self) -> None: """Test that the on_new_event callback is called on new events""" on_new_event = Mock(make_awaitable(None)) self.hs.get_third_party_event_rules()._on_new_event_callbacks.append( @@ -501,7 +531,7 @@ def _send_event_over_federation(self) -> None: self.assertEqual(channel.code, 200, channel.result) - def _update_power_levels(self, event_default: int = 0): + def _update_power_levels(self, event_default: int = 0) -> None: """Updates the room's power levels. Args: @@ -533,7 +563,7 @@ def _update_power_levels(self, event_default: int = 0): tok=self.tok, ) - def test_on_profile_update(self): + def test_on_profile_update(self) -> None: """Tests that the on_profile_update module callback is correctly called on profile updates. """ @@ -592,7 +622,7 @@ def test_on_profile_update(self): self.assertEqual(profile_info.display_name, displayname) self.assertEqual(profile_info.avatar_url, avatar_url) - def test_on_profile_update_admin(self): + def test_on_profile_update_admin(self) -> None: """Tests that the on_profile_update module callback is correctly called on profile updates triggered by a server admin. """ @@ -634,7 +664,7 @@ def test_on_profile_update_admin(self): self.assertEqual(profile_info.display_name, displayname) self.assertEqual(profile_info.avatar_url, avatar_url) - def test_on_user_deactivation_status_changed(self): + def test_on_user_deactivation_status_changed(self) -> None: """Tests that the on_user_deactivation_status_changed module callback is called correctly when processing a user's deactivation. """ @@ -691,7 +721,7 @@ def test_on_user_deactivation_status_changed(self): args = profile_mock.call_args[0] self.assertTrue(args[3]) - def test_on_user_deactivation_status_changed_admin(self): + def test_on_user_deactivation_status_changed_admin(self) -> None: """Tests that the on_user_deactivation_status_changed module callback is called correctly when processing a user's deactivation triggered by a server admin as well as a reactivation. diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py index 8b2da88e8a58..43be711a64f2 100644 --- a/tests/rest/client/test_typing.py +++ b/tests/rest/client/test_typing.py @@ -14,11 +14,16 @@ # limitations under the License. """Tests REST events for /rooms paths.""" - +from typing import Any from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.rest.client import room +from synapse.server import HomeServer +from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import UserID +from synapse.util import Clock from tests import unittest @@ -33,7 +38,7 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): user = UserID.from_string(user_id) servlets = [room.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( "red", @@ -43,30 +48,34 @@ def make_homeserver(self, reactor, clock): self.event_source = hs.get_event_sources().sources.typing - hs.get_federation_handler = Mock() + hs.get_federation_handler = Mock() # type: ignore[assignment] - async def get_user_by_access_token(token=None, allow_guest=False): - return { - "user": UserID.from_string(self.auth_user_id), - "token_id": 1, - "is_guest": False, - } + async def get_user_by_access_token( + token: str, + rights: str = "access", + allow_expired: bool = False, + ) -> TokenLookupResult: + return TokenLookupResult( + user_id=self.user_id, + is_guest=False, + token_id=1, + ) - hs.get_auth().get_user_by_access_token = get_user_by_access_token + hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment] - async def _insert_client_ip(*args, **kwargs): + async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: return None - hs.get_datastores().main.insert_client_ip = _insert_client_ip + hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) # Need another user to make notifications actually work self.helper.join(self.room_id, user="@jim:red") - def test_set_typing(self): + def test_set_typing(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), @@ -95,7 +104,7 @@ def test_set_typing(self): ], ) - def test_set_not_typing(self): + def test_set_not_typing(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), @@ -103,7 +112,7 @@ def test_set_not_typing(self): ) self.assertEqual(200, channel.code) - def test_typing_timeout(self): + def test_typing_timeout(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/typing/%s" % (self.room_id, self.user_id), From 106959b3cf1a59ab5469db639223b6a5b84fb7d7 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 17:24:52 +0000 Subject: [PATCH 337/474] Remove unused mocks from `test_typing` (#12136) * Remove unused mocks from `test_typing` It's not clear what these do. `get_user_by_access_token` has the wrong signature, including the return type. Tests all pass without these. I think we should nuke them. * Changelog * Fixup imports --- changelog.d/12136.misc | 1 + tests/rest/client/test_typing.py | 32 +------------------------------- 2 files changed, 2 insertions(+), 31 deletions(-) create mode 100644 changelog.d/12136.misc diff --git a/changelog.d/12136.misc b/changelog.d/12136.misc new file mode 100644 index 000000000000..98b1c1c9d8ac --- /dev/null +++ b/changelog.d/12136.misc @@ -0,0 +1 @@ +Remove unused mocks from `test_typing`. \ No newline at end of file diff --git a/tests/rest/client/test_typing.py b/tests/rest/client/test_typing.py index 43be711a64f2..d6da510773af 100644 --- a/tests/rest/client/test_typing.py +++ b/tests/rest/client/test_typing.py @@ -14,14 +14,11 @@ # limitations under the License. """Tests REST events for /rooms paths.""" -from typing import Any -from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor from synapse.rest.client import room from synapse.server import HomeServer -from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import UserID from synapse.util import Clock @@ -39,35 +36,8 @@ class RoomTypingTestCase(unittest.HomeserverTestCase): servlets = [room.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - - hs = self.setup_test_homeserver( - "red", - federation_http_client=None, - federation_client=Mock(), - ) - + hs = self.setup_test_homeserver("red") self.event_source = hs.get_event_sources().sources.typing - - hs.get_federation_handler = Mock() # type: ignore[assignment] - - async def get_user_by_access_token( - token: str, - rights: str = "access", - allow_expired: bool = False, - ) -> TokenLookupResult: - return TokenLookupResult( - user_id=self.user_id, - is_guest=False, - token_id=1, - ) - - hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment] - - async def _insert_client_ip(*args: Any, **kwargs: Any) -> None: - return None - - hs.get_datastores().main.insert_client_ip = _insert_client_ip # type: ignore[assignment] - return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: From 1fbe0316a991e77289d4577b16ff3fcd27c26dc8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 18:00:26 +0000 Subject: [PATCH 338/474] Add suffices to scripts in scripts-dev (#12137) * Rename scripts-dev to have suffices * Update references to `scripts-dev` * Changelog * These scripts don't pass mypy --- .github/workflows/release-artifacts.yml | 4 ++-- .github/workflows/tests.yml | 4 ++-- changelog.d/12137.misc | 1 + docs/code_style.md | 2 +- mypy.ini | 12 +++++++++++- ...uild_debian_packages => build_debian_packages.py} | 0 .../{check-newsfragment => check-newsfragment.sh} | 0 ...erate_sample_config => generate_sample_config.sh} | 4 ++-- scripts-dev/lint.sh | 2 -- scripts-dev/{sign_json => sign_json.py} | 0 tox.ini | 2 -- 11 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12137.misc rename scripts-dev/{build_debian_packages => build_debian_packages.py} (100%) rename scripts-dev/{check-newsfragment => check-newsfragment.sh} (100%) rename scripts-dev/{generate_sample_config => generate_sample_config.sh} (86%) rename scripts-dev/{sign_json => sign_json.py} (100%) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index eee3633d5043..65ea761ad713 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -31,7 +31,7 @@ jobs: # if we're running from a tag, get the full list of distros; otherwise just use debian:sid dists='["debian:sid"]' if [[ $GITHUB_REF == refs/tags/* ]]; then - dists=$(scripts-dev/build_debian_packages --show-dists-json) + dists=$(scripts-dev/build_debian_packages.py --show-dists-json) fi echo "::set-output name=distros::$dists" # map the step outputs to job outputs @@ -74,7 +74,7 @@ jobs: # see https://github.com/docker/build-push-action/issues/252 # for the cache magic here run: | - ./src/scripts-dev/build_debian_packages \ + ./src/scripts-dev/build_debian_packages.py \ --docker-build-arg=--cache-from=type=local,src=/tmp/.buildx-cache \ --docker-build-arg=--cache-to=type=local,mode=max,dest=/tmp/.buildx-cache-new \ --docker-build-arg=--progress=plain \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e9e427732239..3f4e44ca592d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - run: pip install -e . - - run: scripts-dev/generate_sample_config --check + - run: scripts-dev/generate_sample_config.sh --check lint: runs-on: ubuntu-latest @@ -51,7 +51,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 - run: "pip install 'towncrier>=18.6.0rc1'" - - run: scripts-dev/check-newsfragment + - run: scripts-dev/check-newsfragment.sh env: PULL_REQUEST_NUMBER: ${{ github.event.number }} diff --git a/changelog.d/12137.misc b/changelog.d/12137.misc new file mode 100644 index 000000000000..118ff77a91c6 --- /dev/null +++ b/changelog.d/12137.misc @@ -0,0 +1 @@ +Give `scripts-dev` scripts suffixes for neater CI config. \ No newline at end of file diff --git a/docs/code_style.md b/docs/code_style.md index 4d8e7c973d05..e7c9cd1a5e4f 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -172,6 +172,6 @@ frobber: ``` Note that the sample configuration is generated from the synapse code -and is maintained by a script, `scripts-dev/generate_sample_config`. +and is maintained by a script, `scripts-dev/generate_sample_config.sh`. Making sure that the output from this script matches the desired format is left as an exercise for the reader! diff --git a/mypy.ini b/mypy.ini index 23ca4eaa5a8b..10971b722514 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,7 +11,7 @@ local_partial_types = True no_implicit_optional = True files = - scripts-dev/sign_json, + scripts-dev/, setup.py, synapse/, tests/ @@ -23,10 +23,20 @@ files = # https://docs.python.org/3/library/re.html#re.X exclude = (?x) ^( + |scripts-dev/build_debian_packages.py + |scripts-dev/check_signature.py + |scripts-dev/definitions.py + |scripts-dev/federation_client.py + |scripts-dev/hash_history.py + |scripts-dev/list_url_patterns.py + |scripts-dev/release.py + |scripts-dev/tail-synapse.py + |synapse/_scripts/export_signing_key.py |synapse/_scripts/move_remote_media_to_new_store.py |synapse/_scripts/synapse_port_db.py |synapse/_scripts/update_synapse_database.py + |synapse/storage/databases/__init__.py |synapse/storage/databases/main/__init__.py |synapse/storage/databases/main/cache.py diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages.py similarity index 100% rename from scripts-dev/build_debian_packages rename to scripts-dev/build_debian_packages.py diff --git a/scripts-dev/check-newsfragment b/scripts-dev/check-newsfragment.sh similarity index 100% rename from scripts-dev/check-newsfragment rename to scripts-dev/check-newsfragment.sh diff --git a/scripts-dev/generate_sample_config b/scripts-dev/generate_sample_config.sh similarity index 86% rename from scripts-dev/generate_sample_config rename to scripts-dev/generate_sample_config.sh index 185e277933e3..375897eacb67 100755 --- a/scripts-dev/generate_sample_config +++ b/scripts-dev/generate_sample_config.sh @@ -15,11 +15,11 @@ check() { if [ "$1" == "--check" ]; then diff -u "$SAMPLE_CONFIG" <(synapse/_scripts/generate_config.py --header-file docs/.sample_config_header.yaml) >/dev/null || { - echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 + echo -e "\e[1m\e[31m$SAMPLE_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2 exit 1 } diff -u "$SAMPLE_LOG_CONFIG" <(synapse/_scripts/generate_log_config.py) >/dev/null || { - echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config\`.\e[0m" >&2 + echo -e "\e[1m\e[31m$SAMPLE_LOG_CONFIG is not up-to-date. Regenerate it with \`scripts-dev/generate_sample_config.sh\`.\e[0m" >&2 exit 1 } else diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index df4d4934d06f..2f5f2c356674 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -85,8 +85,6 @@ else "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them "scripts-dev" - "scripts-dev/build_debian_packages" - "scripts-dev/sign_json" "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci" ) fi diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json.py similarity index 100% rename from scripts-dev/sign_json rename to scripts-dev/sign_json.py diff --git a/tox.ini b/tox.ini index 8d6aa7580bb8..f4829200cca6 100644 --- a/tox.ini +++ b/tox.ini @@ -40,8 +40,6 @@ lint_targets = tests # annoyingly, black doesn't find these so we have to list them scripts-dev - scripts-dev/build_debian_packages - scripts-dev/sign_json stubs contrib synctl From 11282ade1d8deeafa042a639e2685472d6347e69 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 2 Mar 2022 19:22:44 +0000 Subject: [PATCH 339/474] Move the `snapcraft` configuration to `contrib`. (#12142) * Move the `snapcraft` configuration to `contrib`. We're happy for people to package this as a snap image if it's useful, but we don't support or maintain it. I'd like to move the config to `contrib` to reflect this state of affairs. * Changelog --- MANIFEST.in | 1 - changelog.d/12142.misc | 1 + {snap => contrib/snap}/snapcraft.yaml | 0 3 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/12142.misc rename {snap => contrib/snap}/snapcraft.yaml (100%) diff --git a/MANIFEST.in b/MANIFEST.in index 7e903518e152..f1e295e5837f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -52,5 +52,4 @@ prune contrib prune debian prune demo/etc prune docker -prune snap prune stubs diff --git a/changelog.d/12142.misc b/changelog.d/12142.misc new file mode 100644 index 000000000000..5d09f90b5244 --- /dev/null +++ b/changelog.d/12142.misc @@ -0,0 +1 @@ +Move the snapcraft configuration file to `contrib`. \ No newline at end of file diff --git a/snap/snapcraft.yaml b/contrib/snap/snapcraft.yaml similarity index 100% rename from snap/snapcraft.yaml rename to contrib/snap/snapcraft.yaml From ae8a616b4926a005cd73db835a1ebcea4f5cbee0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 3 Mar 2022 11:39:58 +0100 Subject: [PATCH 340/474] Correctly register deactivation and profile update module callbacks (#12141) --- changelog.d/12141.bugfix | 1 + synapse/events/third_party_rules.py | 10 +++++++--- synapse/module_api/__init__.py | 8 ++++++++ 3 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12141.bugfix diff --git a/changelog.d/12141.bugfix b/changelog.d/12141.bugfix new file mode 100644 index 000000000000..03a2507e2ce6 --- /dev/null +++ b/changelog.d/12141.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index dd3104faf3ac..ede72ee87631 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -174,7 +174,9 @@ def register_third_party_rules_callbacks( ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, - on_deactivation: Optional[ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK] = None, + on_user_deactivation_status_changed: Optional[ + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + ] = None, ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -199,8 +201,10 @@ def register_third_party_rules_callbacks( if on_profile_update is not None: self._on_profile_update_callbacks.append(on_profile_update) - if on_deactivation is not None: - self._on_user_deactivation_status_changed_callbacks.append(on_deactivation) + if on_user_deactivation_status_changed is not None: + self._on_user_deactivation_status_changed_callbacks.append( + on_user_deactivation_status_changed, + ) async def check_event_allowed( self, event: EventBase, context: EventContext diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7e469318695e..c42eeedd87ae 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -59,6 +59,8 @@ CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, ON_CREATE_ROOM_CALLBACK, ON_NEW_EVENT_CALLBACK, + ON_PROFILE_UPDATE_CALLBACK, + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, ) from synapse.handlers.account_validity import ( IS_USER_EXPIRED_CALLBACK, @@ -281,6 +283,10 @@ def register_third_party_rules_callbacks( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, + on_user_deactivation_status_changed: Optional[ + ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK + ] = None, ) -> None: """Registers callbacks for third party event rules capabilities. @@ -292,6 +298,8 @@ def register_third_party_rules_callbacks( check_threepid_can_be_invited=check_threepid_can_be_invited, check_visibility_can_be_modified=check_visibility_can_be_modified, on_new_event=on_new_event, + on_profile_update=on_profile_update, + on_user_deactivation_status_changed=on_user_deactivation_status_changed, ) def register_presence_router_callbacks( From 31b125ccec75e708b09f40205c8cfe692edfa6b4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 3 Mar 2022 04:45:23 -0600 Subject: [PATCH 341/474] Enable MSC3030 Complement tests in Synapse (#12144) The Complement tests for MSC3030 are now merged, https://github.com/matrix-org/complement/pull/178 Synapse implmentation: https://github.com/matrix-org/synapse/pull/9445 --- .github/workflows/tests.yml | 2 +- changelog.d/12144.misc | 1 + scripts-dev/complement.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12144.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3f4e44ca592d..fa9611d42b30 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -376,7 +376,7 @@ jobs: # Run Complement - run: | set -o pipefail - go test -v -json -p 1 -tags synapse_blacklist,msc2403 ./tests/... 2>&1 | gotestfmt + go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc3030 ./tests/... 2>&1 | gotestfmt shell: bash name: Run Complement Tests env: diff --git a/changelog.d/12144.misc b/changelog.d/12144.misc new file mode 100644 index 000000000000..d8f71bb203eb --- /dev/null +++ b/changelog.d/12144.misc @@ -0,0 +1 @@ +Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 0aecb3daf158..e3d3e0f293ac 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -71,4 +71,4 @@ fi # Run the tests! echo "Images built; running complement" -go test -v -tags synapse_blacklist,msc2403 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... +go test -v -tags synapse_blacklist,msc2403,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... From 61fd2a8f591f20fe9d1cffe659336664bf44e742 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 3 Mar 2022 10:52:35 +0000 Subject: [PATCH 342/474] Limit the size of the aggregation_key (#12101) There's no reason to let people use long keys. --- changelog.d/12101.misc | 1 + synapse/handlers/message.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/12101.misc diff --git a/changelog.d/12101.misc b/changelog.d/12101.misc new file mode 100644 index 000000000000..d165f73d13e8 --- /dev/null +++ b/changelog.d/12101.misc @@ -0,0 +1 @@ +Limit the size of `aggregation_key` on annotations. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 61cb133ef265..0799ec9a84df 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1069,6 +1069,9 @@ async def _validate_event_relation(self, event: EventBase) -> None: if relation_type == RelationTypes.ANNOTATION: aggregation_key = relation["key"] + if len(aggregation_key) > 500: + raise SynapseError(400, "Aggregation key is too long") + already_exists = await self.store.has_user_annotated_event( relates_to, event.type, aggregation_key, event.sender ) From a511a890d7c556ad357d27443e5665e6cc25e0b5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 3 Mar 2022 05:19:20 -0600 Subject: [PATCH 343/474] Enable MSC2716 Complement tests in Synapse (#12145) Co-authored-by: Brendan Abolivier --- .github/workflows/tests.yml | 2 +- changelog.d/12145.misc | 1 + scripts-dev/complement.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12145.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fa9611d42b30..c89c50cd07e2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -376,7 +376,7 @@ jobs: # Run Complement - run: | set -o pipefail - go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc3030 ./tests/... 2>&1 | gotestfmt + go test -v -json -p 1 -tags synapse_blacklist,msc2403,msc2716,msc3030 ./tests/... 2>&1 | gotestfmt shell: bash name: Run Complement Tests env: diff --git a/changelog.d/12145.misc b/changelog.d/12145.misc new file mode 100644 index 000000000000..4092a2d66e45 --- /dev/null +++ b/changelog.d/12145.misc @@ -0,0 +1 @@ +Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index e3d3e0f293ac..0a79a4063f5a 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -71,4 +71,4 @@ fi # Run the tests! echo "Images built; running complement" -go test -v -tags synapse_blacklist,msc2403,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... +go test -v -tags synapse_blacklist,msc2403,msc2716,msc3030 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests/... From cea1b58c4a5850a59e14808324ce1d9f222f52e5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 3 Mar 2022 12:47:55 +0000 Subject: [PATCH 344/474] Don't impose version checks on dev extras at runtime (#12129) * Fix incorrect argument in test case * Add copyright header * Docstring and __all__ * Exclude dev depenencies * Use changelog from #12088 * Include version in error messages This will hopefully distinguish between the version of the source code and the version of the distribution package that is installed. * Linter script is your friend --- changelog.d/12129.misc | 1 + synapse/util/check_dependencies.py | 61 ++++++++++++++++++++++++--- tests/util/test_check_dependencies.py | 21 ++++++++- 3 files changed, 74 insertions(+), 9 deletions(-) create mode 100644 changelog.d/12129.misc diff --git a/changelog.d/12129.misc b/changelog.d/12129.misc new file mode 100644 index 000000000000..ce4213650c5e --- /dev/null +++ b/changelog.d/12129.misc @@ -0,0 +1 @@ +Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 3a1f6b3c752b..39b0a91db3e8 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -1,3 +1,25 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +This module exposes a single function which checks synapse's dependencies are present +and correctly versioned. It makes use of `importlib.metadata` to do so. The details +are a bit murky: there's no easy way to get a map from "extras" to the packages they +require. But this is probably just symptomatic of Python's package management. +""" + import logging from typing import Iterable, NamedTuple, Optional @@ -10,6 +32,8 @@ except ImportError: import importlib_metadata as metadata # type: ignore[no-redef] +__all__ = ["check_requirements"] + class DependencyException(Exception): @property @@ -29,7 +53,17 @@ def dependencies(self) -> Iterable[str]: yield '"' + i + '"' -EXTRAS = set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) +DEV_EXTRAS = {"lint", "mypy", "test", "dev"} +RUNTIME_EXTRAS = ( + set(metadata.metadata(DISTRIBUTION_NAME).get_all("Provides-Extra")) - DEV_EXTRAS +) +VERSION = metadata.version(DISTRIBUTION_NAME) + + +def _is_dev_dependency(req: Requirement) -> bool: + return req.marker is not None and any( + req.marker.evaluate({"extra": e}) for e in DEV_EXTRAS + ) class Dependency(NamedTuple): @@ -43,6 +77,9 @@ def _generic_dependencies() -> Iterable[Dependency]: assert requirements is not None for raw_requirement in requirements: req = Requirement(raw_requirement) + if _is_dev_dependency(req): + continue + # https://packaging.pypa.io/en/latest/markers.html#usage notes that # > Evaluating an extra marker with no environment is an error # so we pass in a dummy empty extra value here. @@ -56,6 +93,8 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: assert requirements is not None for raw_requirement in requirements: req = Requirement(raw_requirement) + if _is_dev_dependency(req): + continue # Exclude mandatory deps by only selecting deps needed with this extra. if ( req.marker is not None @@ -67,18 +106,26 @@ def _dependencies_for_extra(extra: str) -> Iterable[Dependency]: def _not_installed(requirement: Requirement, extra: Optional[str] = None) -> str: if extra: - return f"Need {requirement.name} for {extra}, but it is not installed" + return ( + f"Synapse {VERSION} needs {requirement.name} for {extra}, " + f"but it is not installed" + ) else: - return f"Need {requirement.name}, but it is not installed" + return f"Synapse {VERSION} needs {requirement.name}, but it is not installed" def _incorrect_version( requirement: Requirement, got: str, extra: Optional[str] = None ) -> str: if extra: - return f"Need {requirement} for {extra}, but got {requirement.name}=={got}" + return ( + f"Synapse {VERSION} needs {requirement} for {extra}, " + f"but got {requirement.name}=={got}" + ) else: - return f"Need {requirement}, but got {requirement.name}=={got}" + return ( + f"Synapse {VERSION} needs {requirement}, but got {requirement.name}=={got}" + ) def check_requirements(extra: Optional[str] = None) -> None: @@ -100,10 +147,10 @@ def check_requirements(extra: Optional[str] = None) -> None: # First work out which dependencies are required, and which are optional. if extra is None: dependencies = _generic_dependencies() - elif extra in EXTRAS: + elif extra in RUNTIME_EXTRAS: dependencies = _dependencies_for_extra(extra) else: - raise ValueError(f"Synapse does not provide the feature '{extra}'") + raise ValueError(f"Synapse {VERSION} does not provide the feature '{extra}'") deps_unfulfilled = [] errors = [] diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index 3c072522525a..a91c33272f36 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -65,6 +65,23 @@ def test_mandatory_dependency(self) -> None: # should not raise check_requirements() + def test_checks_ignore_dev_dependencies(self) -> None: + """Bot generic and per-extra checks should ignore dev dependencies.""" + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'mypy'"], + ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): + # We're testing that none of these calls raise. + with self.mock_installed_package(None): + check_requirements() + check_requirements("cool-extra") + with self.mock_installed_package(old): + check_requirements() + check_requirements("cool-extra") + with self.mock_installed_package(new): + check_requirements() + check_requirements("cool-extra") + def test_generic_check_of_optional_dependency(self) -> None: """Complain if an optional package is old.""" with patch( @@ -85,11 +102,11 @@ def test_check_for_extra_dependencies(self) -> None: with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1; extra == 'cool-extra'"], - ), patch("synapse.util.check_dependencies.EXTRAS", {"cool-extra"}): + ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(old): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(new): # should not raise - check_requirements() + check_requirements("cool-extra") From 1d11b452b70c768e4919bd9cf6bcaeda2050a3d4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 3 Mar 2022 10:43:06 -0500 Subject: [PATCH 345/474] Use the proper serialization format when bundling aggregations. (#12090) This ensures that the `latest_event` field of the bundled aggregation for threads uses the same format as the other events in the response. --- changelog.d/12090.bugfix | 1 + synapse/appservice/api.py | 24 ++--- synapse/events/utils.py | 81 ++++++++++------ synapse/handlers/events.py | 3 +- synapse/handlers/initial_sync.py | 9 +- synapse/handlers/pagination.py | 7 +- synapse/rest/client/notifications.py | 9 +- synapse/rest/client/sync.py | 132 ++++++++------------------- tests/events/test_utils.py | 5 +- tests/rest/client/test_relations.py | 2 - 10 files changed, 130 insertions(+), 143 deletions(-) create mode 100644 changelog.d/12090.bugfix diff --git a/changelog.d/12090.bugfix b/changelog.d/12090.bugfix new file mode 100644 index 000000000000..087065dcb1cd --- /dev/null +++ b/changelog.d/12090.bugfix @@ -0,0 +1 @@ +Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index a0ea958af62a..98fe354014c4 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -25,7 +25,7 @@ TransactionUnusedFallbackKeys, ) from synapse.events import EventBase -from synapse.events.utils import serialize_event +from synapse.events.utils import SerializeEventConfig, serialize_event from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache @@ -321,16 +321,18 @@ def _serialize( serialize_event( e, time_now, - as_client_event=True, - # If this is an invite or a knock membership event, and we're interested - # in this user, then include any stripped state alongside the event. - include_stripped_room_state=( - e.type == EventTypes.Member - and ( - e.membership == Membership.INVITE - or e.membership == Membership.KNOCK - ) - and service.is_interested_in_user(e.state_key) + config=SerializeEventConfig( + as_client_event=True, + # If this is an invite or a knock membership event, and we're interested + # in this user, then include any stripped state alongside the event. + include_stripped_room_state=( + e.type == EventTypes.Member + and ( + e.membership == Membership.INVITE + or e.membership == Membership.KNOCK + ) + and service.is_interested_in_user(e.state_key) + ), ), ) for e in events diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9386fa29ddd3..ee34cb46e437 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -26,6 +26,7 @@ Union, ) +import attr from frozendict import frozendict from synapse.api.constants import EventContentFields, EventTypes, RelationTypes @@ -303,29 +304,37 @@ def format_event_for_client_v2_without_room_id(d: JsonDict) -> JsonDict: return d +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SerializeEventConfig: + as_client_event: bool = True + # Function to convert from federation format to client format + event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1 + # ID of the user's auth token - used for namespacing of transaction IDs + token_id: Optional[int] = None + # List of event fields to include. If empty, all fields will be returned. + only_event_fields: Optional[List[str]] = None + # Some events can have stripped room state stored in the `unsigned` field. + # This is required for invite and knock functionality. If this option is + # False, that state will be removed from the event before it is returned. + # Otherwise, it will be kept. + include_stripped_room_state: bool = False + + +_DEFAULT_SERIALIZE_EVENT_CONFIG = SerializeEventConfig() + + def serialize_event( e: Union[JsonDict, EventBase], time_now_ms: int, *, - as_client_event: bool = True, - event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1, - token_id: Optional[str] = None, - only_event_fields: Optional[List[str]] = None, - include_stripped_room_state: bool = False, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, ) -> JsonDict: """Serialize event for clients Args: e time_now_ms - as_client_event - event_format - token_id - only_event_fields - include_stripped_room_state: Some events can have stripped room state - stored in the `unsigned` field. This is required for invite and knock - functionality. If this option is False, that state will be removed from the - event before it is returned. Otherwise, it will be kept. + config: Event serialization config Returns: The serialized event dictionary. @@ -348,11 +357,11 @@ def serialize_event( if "redacted_because" in e.unsigned: d["unsigned"]["redacted_because"] = serialize_event( - e.unsigned["redacted_because"], time_now_ms, event_format=event_format + e.unsigned["redacted_because"], time_now_ms, config=config ) - if token_id is not None: - if token_id == getattr(e.internal_metadata, "token_id", None): + if config.token_id is not None: + if config.token_id == getattr(e.internal_metadata, "token_id", None): txn_id = getattr(e.internal_metadata, "txn_id", None) if txn_id is not None: d["unsigned"]["transaction_id"] = txn_id @@ -361,13 +370,14 @@ def serialize_event( # that are meant to provide metadata about a room to an invitee/knocker. They are # intended to only be included in specific circumstances, such as down sync, and # should not be included in any other case. - if not include_stripped_room_state: + if not config.include_stripped_room_state: d["unsigned"].pop("invite_room_state", None) d["unsigned"].pop("knock_room_state", None) - if as_client_event: - d = event_format(d) + if config.as_client_event: + d = config.event_format(d) + only_event_fields = config.only_event_fields if only_event_fields: if not isinstance(only_event_fields, list) or not all( isinstance(f, str) for f in only_event_fields @@ -390,18 +400,18 @@ def serialize_event( event: Union[JsonDict, EventBase], time_now: int, *, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, - **kwargs: Any, ) -> JsonDict: """Serializes a single event. Args: event: The event being serialized. time_now: The current time in milliseconds + config: Event serialization config bundle_aggregations: Whether to include the bundled aggregations for this event. Only applies to non-state events. (State events never include bundled aggregations.) - **kwargs: Arguments to pass to `serialize_event` Returns: The serialized event @@ -410,7 +420,7 @@ def serialize_event( if not isinstance(event, EventBase): return event - serialized_event = serialize_event(event, time_now, **kwargs) + serialized_event = serialize_event(event, time_now, config=config) # Check if there are any bundled aggregations to include with the event. if bundle_aggregations: @@ -419,6 +429,7 @@ def serialize_event( self._inject_bundled_aggregations( event, time_now, + config, bundle_aggregations[event.event_id], serialized_event, ) @@ -456,6 +467,7 @@ def _inject_bundled_aggregations( self, event: EventBase, time_now: int, + config: SerializeEventConfig, aggregations: "BundledAggregations", serialized_event: JsonDict, ) -> None: @@ -466,6 +478,7 @@ def _inject_bundled_aggregations( time_now: The current time in milliseconds aggregations: The bundled aggregation to serialize. serialized_event: The serialized event which may be modified. + config: Event serialization config """ serialized_aggregations = {} @@ -493,8 +506,8 @@ def _inject_bundled_aggregations( thread = aggregations.thread # Don't bundle aggregations as this could recurse forever. - serialized_latest_event = self.serialize_event( - thread.latest_event, time_now, bundle_aggregations=None + serialized_latest_event = serialize_event( + thread.latest_event, time_now, config=config ) # Manually apply an edit, if one exists. if thread.latest_edit: @@ -515,20 +528,34 @@ def _inject_bundled_aggregations( ) def serialize_events( - self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any + self, + events: Iterable[Union[JsonDict, EventBase]], + time_now: int, + *, + config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, + bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, ) -> List[JsonDict]: """Serializes multiple events. Args: event time_now: The current time in milliseconds - **kwargs: Arguments to pass to `serialize_event` + config: Event serialization config + bundle_aggregations: Whether to include the bundled aggregations for this + event. Only applies to non-state events. (State events never include + bundled aggregations.) Returns: The list of serialized events """ return [ - self.serialize_event(event, time_now=time_now, **kwargs) for event in events + self.serialize_event( + event, + time_now, + config=config, + bundle_aggregations=bundle_aggregations, + ) + for event in events ] diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 97e75e60c32e..d2ccb5c5d311 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -19,6 +19,7 @@ from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase +from synapse.events.utils import SerializeEventConfig from synapse.handlers.presence import format_user_presence_state from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, UserID @@ -120,7 +121,7 @@ async def get_stream( chunks = self._event_serializer.serialize_events( events, time_now, - as_client_event=as_client_event, + config=SerializeEventConfig(as_client_event=as_client_event), ) chunk = { diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 344f20f37cce..316cfae24ff0 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -18,6 +18,7 @@ from synapse.api.constants import EduTypes, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events import EventBase +from synapse.events.utils import SerializeEventConfig from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state from synapse.handlers.receipts import ReceiptEventSource @@ -156,6 +157,8 @@ async def _snapshot_all_rooms( if limit is None: limit = 10 + serializer_options = SerializeEventConfig(as_client_event=as_client_event) + async def handle_room(event: RoomsForUser) -> None: d: JsonDict = { "room_id": event.room_id, @@ -173,7 +176,7 @@ async def handle_room(event: RoomsForUser) -> None: d["invite"] = self._event_serializer.serialize_event( invite_event, time_now, - as_client_event=as_client_event, + config=serializer_options, ) rooms_ret.append(d) @@ -225,7 +228,7 @@ async def handle_room(event: RoomsForUser) -> None: self._event_serializer.serialize_events( messages, time_now=time_now, - as_client_event=as_client_event, + config=serializer_options, ) ), "start": await start_token.to_string(self.store), @@ -235,7 +238,7 @@ async def handle_room(event: RoomsForUser) -> None: d["state"] = self._event_serializer.serialize_events( current_state.values(), time_now=time_now, - as_client_event=as_client_event, + config=serializer_options, ) account_data_events = [] diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5c01a426ff32..183fabcfc09e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -22,6 +22,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter +from synapse.events.utils import SerializeEventConfig from synapse.handlers.room import ShutdownRoomResponse from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter @@ -541,13 +542,15 @@ async def get_messages( time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(as_client_event=as_client_event) + chunk = { "chunk": ( self._event_serializer.serialize_events( events, time_now, + config=serialize_options, bundle_aggregations=aggregations, - as_client_event=as_client_event, ) ), "start": await from_token.to_string(self.store), @@ -556,7 +559,7 @@ async def get_messages( if state: chunk["state"] = self._event_serializer.serialize_events( - state, time_now, as_client_event=as_client_event + state, time_now, config=serialize_options ) return chunk diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 20377a9ac628..ff040de6b840 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -16,7 +16,10 @@ from typing import TYPE_CHECKING, Tuple from synapse.api.constants import ReceiptTypes -from synapse.events.utils import format_event_for_client_v2_without_room_id +from synapse.events.utils import ( + SerializeEventConfig, + format_event_for_client_v2_without_room_id, +) from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest @@ -75,7 +78,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: self._event_serializer.serialize_event( notif_events[pa.event_id], self.clock.time_msec(), - event_format=format_event_for_client_v2_without_room_id, + config=SerializeEventConfig( + event_format=format_event_for_client_v2_without_room_id + ), ) ), } diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index f3018ff69077..53c385a86cc1 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -14,24 +14,14 @@ import itertools import logging from collections import defaultdict -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Iterable, - List, - Optional, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from synapse.api.constants import Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState -from synapse.events import EventBase from synapse.events.utils import ( + SerializeEventConfig, format_event_for_client_v2_without_room_id, format_event_raw, ) @@ -48,7 +38,6 @@ from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace -from synapse.storage.databases.main.relations import BundledAggregations from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -239,28 +228,31 @@ async def encode_response( else: raise Exception("Unknown event format %s" % (filter.event_format,)) + serialize_options = SerializeEventConfig( + event_format=event_formatter, + token_id=access_token_id, + only_event_fields=filter.event_fields, + ) + stripped_serialize_options = SerializeEventConfig( + event_format=event_formatter, + token_id=access_token_id, + include_stripped_room_state=True, + ) + joined = await self.encode_joined( - sync_result.joined, - time_now, - access_token_id, - filter.event_fields, - event_formatter, + sync_result.joined, time_now, serialize_options ) invited = await self.encode_invited( - sync_result.invited, time_now, access_token_id, event_formatter + sync_result.invited, time_now, stripped_serialize_options ) knocked = await self.encode_knocked( - sync_result.knocked, time_now, access_token_id, event_formatter + sync_result.knocked, time_now, stripped_serialize_options ) archived = await self.encode_archived( - sync_result.archived, - time_now, - access_token_id, - filter.event_fields, - event_formatter, + sync_result.archived, time_now, serialize_options ) logger.debug("building sync response dict") @@ -339,9 +331,7 @@ async def encode_joined( self, rooms: List[JoinedSyncResult], time_now: int, - token_id: Optional[int], - event_fields: List[str], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Encode the joined rooms in a sync result @@ -349,24 +339,14 @@ async def encode_joined( Args: rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing - of transaction IDs - event_fields: List of event fields to include. If empty, - all fields will be returned. - event_formatter: function to convert from federation format - to client format + serialize_options: Event serializer options Returns: The joined rooms list, in our response format """ joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, - time_now, - token_id, - joined=True, - only_fields=event_fields, - event_formatter=event_formatter, + room, time_now, joined=True, serialize_options=serialize_options ) return joined @@ -376,8 +356,7 @@ async def encode_invited( self, rooms: List[InvitedSyncResult], time_now: int, - token_id: Optional[int], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Encode the invited rooms in a sync result @@ -385,10 +364,7 @@ async def encode_invited( Args: rooms: list of sync results for rooms this user is invited to time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing - of transaction IDs - event_formatter: function to convert from federation format - to client format + serialize_options: Event serializer options Returns: The invited rooms list, in our response format @@ -396,11 +372,7 @@ async def encode_invited( invited = {} for room in rooms: invite = self._event_serializer.serialize_event( - room.invite, - time_now, - token_id=token_id, - event_format=event_formatter, - include_stripped_room_state=True, + room.invite, time_now, config=serialize_options ) unsigned = dict(invite.get("unsigned", {})) invite["unsigned"] = unsigned @@ -415,8 +387,7 @@ async def encode_knocked( self, rooms: List[KnockedSyncResult], time_now: int, - token_id: Optional[int], - event_formatter: Callable[[Dict], Dict], + serialize_options: SerializeEventConfig, ) -> Dict[str, Dict[str, Any]]: """ Encode the rooms we've knocked on in a sync result. @@ -424,8 +395,7 @@ async def encode_knocked( Args: rooms: list of sync results for rooms this user is knocking on time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing of transaction IDs - event_formatter: function to convert from federation format to client format + serialize_options: Event serializer options Returns: The list of rooms the user has knocked on, in our response format. @@ -433,11 +403,7 @@ async def encode_knocked( knocked = {} for room in rooms: knock = self._event_serializer.serialize_event( - room.knock, - time_now, - token_id=token_id, - event_format=event_formatter, - include_stripped_room_state=True, + room.knock, time_now, config=serialize_options ) # Extract the `unsigned` key from the knock event. @@ -470,9 +436,7 @@ async def encode_archived( self, rooms: List[ArchivedSyncResult], time_now: int, - token_id: Optional[int], - event_fields: List[str], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Encode the archived rooms in a sync result @@ -480,23 +444,14 @@ async def encode_archived( Args: rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations - token_id: ID of the user's auth token - used for namespacing - of transaction IDs - event_fields: List of event fields to include. If empty, - all fields will be returned. - event_formatter: function to convert from federation format to client format + serialize_options: Event serializer options Returns: The archived rooms list, in our response format """ joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, - time_now, - token_id, - joined=False, - only_fields=event_fields, - event_formatter=event_formatter, + room, time_now, joined=False, serialize_options=serialize_options ) return joined @@ -505,10 +460,8 @@ async def encode_room( self, room: Union[JoinedSyncResult, ArchivedSyncResult], time_now: int, - token_id: Optional[int], joined: bool, - only_fields: Optional[List[str]], - event_formatter: Callable[[JsonDict], JsonDict], + serialize_options: SerializeEventConfig, ) -> JsonDict: """ Args: @@ -524,20 +477,6 @@ async def encode_room( Returns: The room, encoded in our response format """ - - def serialize( - events: Iterable[EventBase], - aggregations: Optional[Dict[str, BundledAggregations]] = None, - ) -> List[JsonDict]: - return self._event_serializer.serialize_events( - events, - time_now=time_now, - bundle_aggregations=aggregations, - token_id=token_id, - event_format=event_formatter, - only_event_fields=only_fields, - ) - state_dict = room.state timeline_events = room.timeline.events @@ -554,9 +493,14 @@ def serialize( event.room_id, ) - serialized_state = serialize(state_events) - serialized_timeline = serialize( - timeline_events, room.timeline.bundled_aggregations + serialized_state = self._event_serializer.serialize_events( + state_events, time_now, config=serialize_options + ) + serialized_timeline = self._event_serializer.serialize_events( + timeline_events, + time_now, + config=serialize_options, + bundle_aggregations=room.timeline.bundled_aggregations, ) account_data = room.account_data diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 45e3395b3361..00ad19e446db 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -16,6 +16,7 @@ from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.utils import ( + SerializeEventConfig, copy_power_levels_contents, prune_event, serialize_event, @@ -392,7 +393,9 @@ def test_member(self): class SerializeEventTestCase(unittest.TestCase): def serialize(self, ev, fields): - return serialize_event(ev, 1479807801915, only_event_fields=fields) + return serialize_event( + ev, 1479807801915, config=SerializeEventConfig(only_event_fields=fields) + ) def test_event_fields_works_with_keys(self): self.assertEqual( diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 709f851a3874..53062b41deaa 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -704,10 +704,8 @@ def assert_bundle(event_json: JsonDict) -> None: } }, "event_id": thread_2, - "room_id": self.room, "sender": self.user_id, "type": "m.room.test", - "user_id": self.user_id, }, relations_dict[RelationTypes.THREAD].get("latest_event"), ) From 7e91107be1a4287873266e588a3c5b415279f4c8 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 3 Mar 2022 17:05:44 +0100 Subject: [PATCH 346/474] Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey` --- changelog.d/12146.misc | 1 + mypy.ini | 7 +-- tests/rest/key/v2/test_remote_key_resource.py | 44 +++++++++------ tests/rest/media/v1/test_base.py | 4 +- tests/rest/media/v1/test_filepath.py | 48 ++++++++--------- tests/rest/media/v1/test_html_preview.py | 54 +++++++++---------- tests/rest/media/v1/test_oembed.py | 10 ++-- tests/rest/test_health.py | 8 +-- tests/rest/test_well_known.py | 20 +++---- 9 files changed, 104 insertions(+), 92 deletions(-) create mode 100644 changelog.d/12146.misc diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc new file mode 100644 index 000000000000..3ca7c47212fd --- /dev/null +++ b/changelog.d/12146.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest`. diff --git a/mypy.ini b/mypy.ini index 10971b722514..481e8a5366b0 100644 --- a/mypy.ini +++ b/mypy.ini @@ -89,8 +89,6 @@ exclude = (?x) |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py - |tests/rest/key/v2/test_remote_key_resource.py - |tests/rest/media/v1/test_base.py |tests/rest/media/v1/test_media_storage.py |tests/rest/media/v1/test_url_preview.py |tests/scripts/test_new_matrix_user.py @@ -254,10 +252,7 @@ disallow_untyped_defs = True [mypy-tests.storage.test_user_directory] disallow_untyped_defs = True -[mypy-tests.rest.admin.*] -disallow_untyped_defs = True - -[mypy-tests.rest.client.*] +[mypy-tests.rest.*] disallow_untyped_defs = True [mypy-tests.federation.transport.test_client] diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 4672a6859684..978c252f8482 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -13,19 +13,24 @@ # limitations under the License. import urllib.parse from io import BytesIO, StringIO +from typing import Any, Dict, Optional, Union from unittest.mock import Mock import signedjson.key from canonicaljson import encode_canonical_json -from nacl.signing import SigningKey from signedjson.sign import sign_json +from signedjson.types import SigningKey -from twisted.web.resource import NoResource +from twisted.test.proto_helpers import MemoryReactor +from twisted.web.resource import NoResource, Resource from synapse.crypto.keyring import PerspectivesKeyFetcher from synapse.http.site import SynapseRequest from synapse.rest.key.v2 import KeyApiV2Resource +from synapse.server import HomeServer from synapse.storage.keys import FetchKeyResult +from synapse.types import JsonDict +from synapse.util import Clock from synapse.util.httpresourcetree import create_resource_tree from synapse.util.stringutils import random_string @@ -35,11 +40,11 @@ class BaseRemoteKeyResourceTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock() return self.setup_test_homeserver(federation_http_client=self.http_client) - def create_test_resource(self): + def create_test_resource(self) -> Resource: return create_resource_tree( {"/_matrix/key/v2": KeyApiV2Resource(self.hs)}, root_resource=NoResource() ) @@ -51,7 +56,12 @@ def expect_outgoing_key_request( Tell the mock http client to expect an outgoing GET request for the given key """ - async def get_json(destination, path, ignore_backoff=False, **kwargs): + async def get_json( + destination: str, + path: str, + ignore_backoff: bool = False, + **kwargs: Any, + ) -> Union[JsonDict, list]: self.assertTrue(ignore_backoff) self.assertEqual(destination, server_name) key_id = "%s:%s" % (signing_key.alg, signing_key.version) @@ -84,7 +94,8 @@ def make_notary_request(self, server_name: str, key_id: str) -> dict: Checks that the response is a 200 and returns the decoded json body. """ channel = FakeChannel(self.site, self.reactor) - req = SynapseRequest(channel, self.site) + # channel is a `FakeChannel` but `HTTPChannel` is expected + req = SynapseRequest(channel, self.site) # type: ignore[arg-type] req.content = BytesIO(b"") req.requestReceived( b"GET", @@ -97,7 +108,7 @@ def make_notary_request(self, server_name: str, key_id: str) -> dict: resp = channel.json_body return resp - def test_get_key(self): + def test_get_key(self) -> None: """Fetch a remote key""" SERVER_NAME = "remote.server" testkey = signedjson.key.generate_signing_key("ver1") @@ -114,7 +125,7 @@ def test_get_key(self): self.assertIn(SERVER_NAME, keys[0]["signatures"]) self.assertIn(self.hs.hostname, keys[0]["signatures"]) - def test_get_own_key(self): + def test_get_own_key(self) -> None: """Fetch our own key""" testkey = signedjson.key.generate_signing_key("ver1") self.expect_outgoing_key_request(self.hs.hostname, testkey) @@ -141,7 +152,7 @@ class EndToEndPerspectivesTests(BaseRemoteKeyResourceTestCase): endpoint, to check that the two implementations are compatible. """ - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # replace the signing key with our own @@ -152,7 +163,7 @@ def default_config(self): return config - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # make a second homeserver, configured to use the first one as a key notary self.http_client2 = Mock() config = default_config(name="keyclient") @@ -175,7 +186,9 @@ def prepare(self, reactor, clock, homeserver): # wire up outbound POST /key/v2/query requests from hs2 so that they # will be forwarded to hs1 - async def post_json(destination, path, data): + async def post_json( + destination: str, path: str, data: Optional[JsonDict] = None + ) -> Union[JsonDict, list]: self.assertEqual(destination, self.hs.hostname) self.assertEqual( path, @@ -183,7 +196,8 @@ async def post_json(destination, path, data): ) channel = FakeChannel(self.site, self.reactor) - req = SynapseRequest(channel, self.site) + # channel is a `FakeChannel` but `HTTPChannel` is expected + req = SynapseRequest(channel, self.site) # type: ignore[arg-type] req.content = BytesIO(encode_canonical_json(data)) req.requestReceived( @@ -198,7 +212,7 @@ async def post_json(destination, path, data): self.http_client2.post_json.side_effect = post_json - def test_get_key(self): + def test_get_key(self) -> None: """Fetch a key belonging to a random server""" # make up a key to be fetched. testkey = signedjson.key.generate_signing_key("abc") @@ -218,7 +232,7 @@ def test_get_key(self): signedjson.key.encode_verify_key_base64(testkey.verify_key), ) - def test_get_notary_key(self): + def test_get_notary_key(self) -> None: """Fetch a key belonging to the notary server""" # make up a key to be fetched. We randomise the keyid to try to get it to # appear before the key server signing key sometimes (otherwise we bail out @@ -240,7 +254,7 @@ def test_get_notary_key(self): signedjson.key.encode_verify_key_base64(testkey.verify_key), ) - def test_get_notary_keyserver_key(self): + def test_get_notary_keyserver_key(self) -> None: """Fetch the notary's keyserver key""" # we expect hs1 to make a regular key request to itself self.expect_outgoing_key_request(self.hs.hostname, self.hs_signing_key) diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py index f761e23f1bf0..c73179151adb 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/rest/media/v1/test_base.py @@ -28,11 +28,11 @@ class GetFileNameFromHeadersTests(unittest.TestCase): b"inline; filename*=utf-8''foo%C2%A3bar": "foo£bar", } - def tests(self): + def tests(self) -> None: for hdr, expected in self.TEST_CASES.items(): res = get_filename_from_headers({b"Content-Disposition": [hdr]}) self.assertEqual( res, expected, - "expected output for %s to be %s but was %s" % (hdr, expected, res), + f"expected output for {hdr!r} to be {expected} but was {res}", ) diff --git a/tests/rest/media/v1/test_filepath.py b/tests/rest/media/v1/test_filepath.py index 913bc530aac1..43e6f0f70aad 100644 --- a/tests/rest/media/v1/test_filepath.py +++ b/tests/rest/media/v1/test_filepath.py @@ -21,12 +21,12 @@ class MediaFilePathsTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: super().setUp() self.filepaths = MediaFilePaths("/media_store") - def test_local_media_filepath(self): + def test_local_media_filepath(self) -> None: """Test local media paths""" self.assertEqual( self.filepaths.local_media_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"), @@ -37,7 +37,7 @@ def test_local_media_filepath(self): "/media_store/local_content/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_local_media_thumbnail(self): + def test_local_media_thumbnail(self) -> None: """Test local media thumbnail paths""" self.assertEqual( self.filepaths.local_media_thumbnail_rel( @@ -52,14 +52,14 @@ def test_local_media_thumbnail(self): "/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale", ) - def test_local_media_thumbnail_dir(self): + def test_local_media_thumbnail_dir(self) -> None: """Test local media thumbnail directory paths""" self.assertEqual( self.filepaths.local_media_thumbnail_dir("GerZNDnDZVjsOtardLuwfIBg"), "/media_store/local_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_remote_media_filepath(self): + def test_remote_media_filepath(self) -> None: """Test remote media paths""" self.assertEqual( self.filepaths.remote_media_filepath_rel( @@ -74,7 +74,7 @@ def test_remote_media_filepath(self): "/media_store/remote_content/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_remote_media_thumbnail(self): + def test_remote_media_thumbnail(self) -> None: """Test remote media thumbnail paths""" self.assertEqual( self.filepaths.remote_media_thumbnail_rel( @@ -99,7 +99,7 @@ def test_remote_media_thumbnail(self): "/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale", ) - def test_remote_media_thumbnail_legacy(self): + def test_remote_media_thumbnail_legacy(self) -> None: """Test old-style remote media thumbnail paths""" self.assertEqual( self.filepaths.remote_media_thumbnail_rel_legacy( @@ -108,7 +108,7 @@ def test_remote_media_thumbnail_legacy(self): "remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg", ) - def test_remote_media_thumbnail_dir(self): + def test_remote_media_thumbnail_dir(self) -> None: """Test remote media thumbnail directory paths""" self.assertEqual( self.filepaths.remote_media_thumbnail_dir( @@ -117,7 +117,7 @@ def test_remote_media_thumbnail_dir(self): "/media_store/remote_thumbnail/example.com/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_url_cache_filepath(self): + def test_url_cache_filepath(self) -> None: """Test URL cache paths""" self.assertEqual( self.filepaths.url_cache_filepath_rel("2020-01-02_GerZNDnDZVjsOtar"), @@ -128,7 +128,7 @@ def test_url_cache_filepath(self): "/media_store/url_cache/2020-01-02/GerZNDnDZVjsOtar", ) - def test_url_cache_filepath_legacy(self): + def test_url_cache_filepath_legacy(self) -> None: """Test old-style URL cache paths""" self.assertEqual( self.filepaths.url_cache_filepath_rel("GerZNDnDZVjsOtardLuwfIBg"), @@ -139,7 +139,7 @@ def test_url_cache_filepath_legacy(self): "/media_store/url_cache/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_url_cache_filepath_dirs_to_delete(self): + def test_url_cache_filepath_dirs_to_delete(self) -> None: """Test URL cache cleanup paths""" self.assertEqual( self.filepaths.url_cache_filepath_dirs_to_delete( @@ -148,7 +148,7 @@ def test_url_cache_filepath_dirs_to_delete(self): ["/media_store/url_cache/2020-01-02"], ) - def test_url_cache_filepath_dirs_to_delete_legacy(self): + def test_url_cache_filepath_dirs_to_delete_legacy(self) -> None: """Test old-style URL cache cleanup paths""" self.assertEqual( self.filepaths.url_cache_filepath_dirs_to_delete( @@ -160,7 +160,7 @@ def test_url_cache_filepath_dirs_to_delete_legacy(self): ], ) - def test_url_cache_thumbnail(self): + def test_url_cache_thumbnail(self) -> None: """Test URL cache thumbnail paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_rel( @@ -175,7 +175,7 @@ def test_url_cache_thumbnail(self): "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale", ) - def test_url_cache_thumbnail_legacy(self): + def test_url_cache_thumbnail_legacy(self) -> None: """Test old-style URL cache thumbnail paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_rel( @@ -190,7 +190,7 @@ def test_url_cache_thumbnail_legacy(self): "/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg/800-600-image-jpeg-scale", ) - def test_url_cache_thumbnail_directory(self): + def test_url_cache_thumbnail_directory(self) -> None: """Test URL cache thumbnail directory paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_directory_rel( @@ -203,7 +203,7 @@ def test_url_cache_thumbnail_directory(self): "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar", ) - def test_url_cache_thumbnail_directory_legacy(self): + def test_url_cache_thumbnail_directory_legacy(self) -> None: """Test old-style URL cache thumbnail directory paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_directory_rel( @@ -216,7 +216,7 @@ def test_url_cache_thumbnail_directory_legacy(self): "/media_store/url_cache_thumbnails/Ge/rZ/NDnDZVjsOtardLuwfIBg", ) - def test_url_cache_thumbnail_dirs_to_delete(self): + def test_url_cache_thumbnail_dirs_to_delete(self) -> None: """Test URL cache thumbnail cleanup paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_dirs_to_delete( @@ -228,7 +228,7 @@ def test_url_cache_thumbnail_dirs_to_delete(self): ], ) - def test_url_cache_thumbnail_dirs_to_delete_legacy(self): + def test_url_cache_thumbnail_dirs_to_delete_legacy(self) -> None: """Test old-style URL cache thumbnail cleanup paths""" self.assertEqual( self.filepaths.url_cache_thumbnail_dirs_to_delete( @@ -241,7 +241,7 @@ def test_url_cache_thumbnail_dirs_to_delete_legacy(self): ], ) - def test_server_name_validation(self): + def test_server_name_validation(self) -> None: """Test validation of server names""" self._test_path_validation( [ @@ -274,7 +274,7 @@ def test_server_name_validation(self): ], ) - def test_file_id_validation(self): + def test_file_id_validation(self) -> None: """Test validation of local, remote and legacy URL cache file / media IDs""" # File / media IDs get split into three parts to form paths, consisting of the # first two characters, next two characters and rest of the ID. @@ -357,7 +357,7 @@ def test_file_id_validation(self): invalid_values=invalid_file_ids, ) - def test_url_cache_media_id_validation(self): + def test_url_cache_media_id_validation(self) -> None: """Test validation of URL cache media IDs""" self._test_path_validation( [ @@ -387,7 +387,7 @@ def test_url_cache_media_id_validation(self): ], ) - def test_content_type_validation(self): + def test_content_type_validation(self) -> None: """Test validation of thumbnail content types""" self._test_path_validation( [ @@ -410,7 +410,7 @@ def test_content_type_validation(self): ], ) - def test_thumbnail_method_validation(self): + def test_thumbnail_method_validation(self) -> None: """Test validation of thumbnail methods""" self._test_path_validation( [ @@ -440,7 +440,7 @@ def _test_path_validation( parameter: str, valid_values: Iterable[str], invalid_values: Iterable[str], - ): + ) -> None: """Test that the specified methods validate the named parameter as expected Args: diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py index a4b57e3d1feb..3fb37a2a5970 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -32,7 +32,7 @@ class SummarizeTestCase(unittest.TestCase): if not lxml: skip = "url preview feature requires lxml" - def test_long_summarize(self): + def test_long_summarize(self) -> None: example_paras = [ """Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami: Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in @@ -90,7 +90,7 @@ def test_long_summarize(self): " Tromsøya had a population of 36,088. Substantial parts of the urban…", ) - def test_short_summarize(self): + def test_short_summarize(self) -> None: example_paras = [ "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:" " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in" @@ -117,7 +117,7 @@ def test_short_summarize(self): " most of the year.", ) - def test_small_then_large_summarize(self): + def test_small_then_large_summarize(self) -> None: example_paras = [ "Tromsø (Norwegian pronunciation: [ˈtrʊmsœ] ( listen); Northern Sami:" " Romsa; Finnish: Tromssa[2] Kven: Tromssa) is a city and municipality in" @@ -150,7 +150,7 @@ class CalcOgTestCase(unittest.TestCase): if not lxml: skip = "url preview feature requires lxml" - def test_simple(self): + def test_simple(self) -> None: html = b""" Foo @@ -165,7 +165,7 @@ def test_simple(self): self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_comment(self): + def test_comment(self) -> None: html = b""" Foo @@ -181,7 +181,7 @@ def test_comment(self): self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_comment2(self): + def test_comment2(self) -> None: html = b""" Foo @@ -206,7 +206,7 @@ def test_comment2(self): }, ) - def test_script(self): + def test_script(self) -> None: html = b""" Foo @@ -222,7 +222,7 @@ def test_script(self): self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_missing_title(self): + def test_missing_title(self) -> None: html = b""" @@ -236,7 +236,7 @@ def test_missing_title(self): self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) - def test_h1_as_title(self): + def test_h1_as_title(self) -> None: html = b""" @@ -251,7 +251,7 @@ def test_h1_as_title(self): self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) - def test_missing_title_and_broken_h1(self): + def test_missing_title_and_broken_h1(self) -> None: html = b""" @@ -266,19 +266,19 @@ def test_missing_title_and_broken_h1(self): self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) - def test_empty(self): + def test_empty(self) -> None: """Test a body with no data in it.""" html = b"" tree = decode_body(html, "http://example.com/test.html") self.assertIsNone(tree) - def test_no_tree(self): + def test_no_tree(self) -> None: """A valid body with no tree in it.""" html = b"\x00" tree = decode_body(html, "http://example.com/test.html") self.assertIsNone(tree) - def test_xml(self): + def test_xml(self) -> None: """Test decoding XML and ensure it works properly.""" # Note that the strip() call is important to ensure the xml tag starts # at the initial byte. @@ -293,7 +293,7 @@ def test_xml(self): og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_invalid_encoding(self): + def test_invalid_encoding(self) -> None: """An invalid character encoding should be ignored and treated as UTF-8, if possible.""" html = b""" @@ -307,7 +307,7 @@ def test_invalid_encoding(self): og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) - def test_invalid_encoding2(self): + def test_invalid_encoding2(self) -> None: """A body which doesn't match the sent character encoding.""" # Note that this contains an invalid UTF-8 sequence in the title. html = b""" @@ -322,7 +322,7 @@ def test_invalid_encoding2(self): og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) - def test_windows_1252(self): + def test_windows_1252(self) -> None: """A body which uses cp1252, but doesn't declare that.""" html = b""" @@ -338,7 +338,7 @@ def test_windows_1252(self): class MediaEncodingTestCase(unittest.TestCase): - def test_meta_charset(self): + def test_meta_charset(self) -> None: """A character encoding is found via the meta tag.""" encodings = _get_html_media_encodings( b""" @@ -363,7 +363,7 @@ def test_meta_charset(self): ) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) - def test_meta_charset_underscores(self): + def test_meta_charset_underscores(self) -> None: """A character encoding contains underscore.""" encodings = _get_html_media_encodings( b""" @@ -376,7 +376,7 @@ def test_meta_charset_underscores(self): ) self.assertEqual(list(encodings), ["shift_jis", "utf-8", "cp1252"]) - def test_xml_encoding(self): + def test_xml_encoding(self) -> None: """A character encoding is found via the meta tag.""" encodings = _get_html_media_encodings( b""" @@ -388,7 +388,7 @@ def test_xml_encoding(self): ) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) - def test_meta_xml_encoding(self): + def test_meta_xml_encoding(self) -> None: """Meta tags take precedence over XML encoding.""" encodings = _get_html_media_encodings( b""" @@ -402,7 +402,7 @@ def test_meta_xml_encoding(self): ) self.assertEqual(list(encodings), ["utf-16", "ascii", "utf-8", "cp1252"]) - def test_content_type(self): + def test_content_type(self) -> None: """A character encoding is found via the Content-Type header.""" # Test a few variations of the header. headers = ( @@ -417,12 +417,12 @@ def test_content_type(self): encodings = _get_html_media_encodings(b"", header) self.assertEqual(list(encodings), ["ascii", "utf-8", "cp1252"]) - def test_fallback(self): + def test_fallback(self) -> None: """A character encoding cannot be found in the body or header.""" encodings = _get_html_media_encodings(b"", "text/html") self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - def test_duplicates(self): + def test_duplicates(self) -> None: """Ensure each encoding is only attempted once.""" encodings = _get_html_media_encodings( b""" @@ -436,7 +436,7 @@ def test_duplicates(self): ) self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - def test_unknown_invalid(self): + def test_unknown_invalid(self) -> None: """A character encoding should be ignored if it is unknown or invalid.""" encodings = _get_html_media_encodings( b""" @@ -451,7 +451,7 @@ def test_unknown_invalid(self): class RebaseUrlTestCase(unittest.TestCase): - def test_relative(self): + def test_relative(self) -> None: """Relative URLs should be resolved based on the context of the base URL.""" self.assertEqual( rebase_url("subpage", "https://example.com/foo/"), @@ -466,14 +466,14 @@ def test_relative(self): "https://example.com/bar", ) - def test_absolute(self): + def test_absolute(self) -> None: """Absolute URLs should not be modified.""" self.assertEqual( rebase_url("https://alice.com/a/", "https://example.com/foo/"), "https://alice.com/a/", ) - def test_data(self): + def test_data(self) -> None: """Data URLs should not be modified.""" self.assertEqual( rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"), diff --git a/tests/rest/media/v1/test_oembed.py b/tests/rest/media/v1/test_oembed.py index 048d0ca44a95..f38d7225f8f6 100644 --- a/tests/rest/media/v1/test_oembed.py +++ b/tests/rest/media/v1/test_oembed.py @@ -16,7 +16,7 @@ from twisted.test.proto_helpers import MemoryReactor -from synapse.rest.media.v1.oembed import OEmbedProvider +from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -25,15 +25,15 @@ class OEmbedTests(HomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): - self.oembed = OEmbedProvider(homeserver) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.oembed = OEmbedProvider(hs) - def parse_response(self, response: JsonDict): + def parse_response(self, response: JsonDict) -> OEmbedResult: return self.oembed.parse_oembed_response( "https://test", json.dumps(response).encode("utf-8") ) - def test_version(self): + def test_version(self) -> None: """Accept versions that are similar to 1.0 as a string or int (or missing).""" for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version, "type": "link"}) diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index 01d48c3860d4..da325955f86f 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from http import HTTPStatus from synapse.rest.health import HealthResource @@ -19,12 +19,12 @@ class HealthCheckTests(unittest.HomeserverTestCase): - def create_test_resource(self): + def create_test_resource(self) -> HealthResource: # replace the JsonResource with a HealthResource. return HealthResource() - def test_health(self): + def test_health(self) -> None: channel = self.make_request("GET", "/health", shorthand=False) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual(channel.result["body"], b"OK") diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 118aa93a320d..11f78f52b87a 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus + from twisted.web.resource import Resource from synapse.rest.well_known import well_known_resource @@ -19,7 +21,7 @@ class WellKnownTests(unittest.HomeserverTestCase): - def create_test_resource(self): + def create_test_resource(self) -> Resource: # replace the JsonResource with a Resource wrapping the WellKnownResource res = Resource() res.putChild(b".well-known", well_known_resource(self.hs)) @@ -31,12 +33,12 @@ def create_test_resource(self): "default_identity_server": "https://testis", } ) - def test_client_well_known(self): + def test_client_well_known(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual( channel.json_body, { @@ -50,27 +52,27 @@ def test_client_well_known(self): "public_baseurl": None, } ) - def test_client_well_known_no_public_baseurl(self): + def test_client_well_known_no_public_baseurl(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/client", shorthand=False ) - self.assertEqual(channel.code, 404) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) @unittest.override_config({"serve_server_wellknown": True}) - def test_server_well_known(self): + def test_server_well_known(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/server", shorthand=False ) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, HTTPStatus.OK) self.assertEqual( channel.json_body, {"m.server": "test:443"}, ) - def test_server_well_known_disabled(self): + def test_server_well_known_disabled(self) -> None: channel = self.make_request( "GET", "/.well-known/matrix/server", shorthand=False ) - self.assertEqual(channel.code, 404) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) From 9297d040a72b70c7cc0ec15319afdd99b01ba885 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 3 Mar 2022 17:14:09 +0000 Subject: [PATCH 347/474] Detox, part 2 of N (#12152) I've argued in #11537 that poetry and tox don't cooperate well at the moment. (See also #12119.) Therefore I'm pruning away bits of tox to make the transition to poetry easier. This change removes the commands for coverage. We don't use coverage in anger at the moment. It shouldn't be too hard to add coverage as a dev-dependency and reintroduce this if we really want it. --- changelog.d/12152.misc | 1 + tox.ini | 26 -------------------------- 2 files changed, 1 insertion(+), 26 deletions(-) create mode 100644 changelog.d/12152.misc diff --git a/changelog.d/12152.misc b/changelog.d/12152.misc new file mode 100644 index 000000000000..b9877eaccbee --- /dev/null +++ b/changelog.d/12152.misc @@ -0,0 +1 @@ +Prune unused jobs from `tox` config. \ No newline at end of file diff --git a/tox.ini b/tox.ini index f4829200cca6..04d282a705af 100644 --- a/tox.ini +++ b/tox.ini @@ -158,32 +158,6 @@ commands = extras = lint commands = isort -c --df {[base]lint_targets} -[testenv:combine] -skip_install = true -usedevelop = false -deps = - coverage - pip>=10 -commands= - coverage combine - coverage report - -[testenv:cov-erase] -skip_install = true -usedevelop = false -deps = - coverage -commands= - coverage erase - -[testenv:cov-html] -skip_install = true -usedevelop = false -deps = - coverage -commands= - coverage html - [testenv:mypy] deps = {[base]deps} From fb0ffa96766a4b6f298f53af2d212e4c4d09d9e9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 3 Mar 2022 18:14:09 +0000 Subject: [PATCH 348/474] Rename various ApplicationServices interested methods (#11915) --- changelog.d/11915.misc | 1 + synapse/appservice/__init__.py | 133 +++++++++++++++++++--------- synapse/handlers/appservice.py | 4 +- synapse/handlers/directory.py | 6 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/typing.py | 4 +- tests/appservice/test_appservice.py | 45 +++++++--- tests/handlers/test_appservice.py | 56 +++++++++--- 8 files changed, 175 insertions(+), 76 deletions(-) create mode 100644 changelog.d/11915.misc diff --git a/changelog.d/11915.misc b/changelog.d/11915.misc new file mode 100644 index 000000000000..e3cef1511eb6 --- /dev/null +++ b/changelog.d/11915.misc @@ -0,0 +1 @@ +Simplify the `ApplicationService` class' set of public methods related to interest checking. \ No newline at end of file diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 4d3f8e492384..07ec95f1d67e 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -175,27 +175,14 @@ def _is_exclusive(self, namespace_key: str, test_string: str) -> bool: return namespace.exclusive return False - async def _matches_user(self, event: EventBase, store: "DataStore") -> bool: - if self.is_interested_in_user(event.sender): - return True - - # also check m.room.member state key - if event.type == EventTypes.Member and self.is_interested_in_user( - event.state_key - ): - return True - - does_match = await self.matches_user_in_member_list(event.room_id, store) - return does_match - @cached(num_args=1, cache_context=True) - async def matches_user_in_member_list( + async def _matches_user_in_member_list( self, room_id: str, store: "DataStore", cache_context: _CacheContext, ) -> bool: - """Check if this service is interested a room based upon it's membership + """Check if this service is interested a room based upon its membership Args: room_id: The room to check. @@ -214,47 +201,110 @@ async def matches_user_in_member_list( return True return False - def _matches_room_id(self, event: EventBase) -> bool: - if hasattr(event, "room_id"): - return self.is_interested_in_room(event.room_id) - return False + def is_interested_in_user( + self, + user_id: str, + ) -> bool: + """ + Returns whether the application is interested in a given user ID. + + The appservice is considered to be interested in a user if either: the + user ID is in the appservice's user namespace, or if the user is the + appservice's configured sender_localpart. + + Args: + user_id: The ID of the user to check. + + Returns: + True if the application service is interested in the user, False if not. + """ + return ( + # User is the appservice's sender_localpart user + user_id == self.sender + # User is in the appservice's user namespace + or self.is_user_in_namespace(user_id) + ) + + @cached(num_args=1, cache_context=True) + async def is_interested_in_room( + self, + room_id: str, + store: "DataStore", + cache_context: _CacheContext, + ) -> bool: + """ + Returns whether the application service is interested in a given room ID. + + The appservice is considered to be interested in the room if either: the ID or one + of the aliases of the room is in the appservice's room ID or alias namespace + respectively, or if one of the members of the room fall into the appservice's user + namespace. - async def _matches_aliases(self, event: EventBase, store: "DataStore") -> bool: - alias_list = await store.get_aliases_for_room(event.room_id) + Args: + room_id: The ID of the room to check. + store: The homeserver's datastore class. + + Returns: + True if the application service is interested in the room, False if not. + """ + # Check if we have interest in this room ID + if self.is_room_id_in_namespace(room_id): + return True + + # likewise with the room's aliases (if it has any) + alias_list = await store.get_aliases_for_room(room_id) for alias in alias_list: - if self.is_interested_in_alias(alias): + if self.is_room_alias_in_namespace(alias): return True - return False + # And finally, perform an expensive check on whether any of the + # users in the room match the appservice's user namespace + return await self._matches_user_in_member_list( + room_id, store, on_invalidate=cache_context.invalidate + ) - async def is_interested(self, event: EventBase, store: "DataStore") -> bool: + @cached(num_args=1, cache_context=True) + async def is_interested_in_event( + self, + event_id: str, + event: EventBase, + store: "DataStore", + cache_context: _CacheContext, + ) -> bool: """Check if this service is interested in this event. Args: + event_id: The ID of the event to check. This is purely used for simplifying the + caching of calls to this method. event: The event to check. store: The datastore to query. Returns: - True if this service would like to know about this event. + True if this service would like to know about this event, otherwise False. """ - # Do cheap checks first - if self._matches_room_id(event): + # Check if we're interested in this event's sender by namespace (or if they're the + # sender_localpart user) + if self.is_interested_in_user(event.sender): return True - # This will check the namespaces first before - # checking the store, so should be run before _matches_aliases - if await self._matches_user(event, store): + # additionally, if this is a membership event, perform the same checks on + # the user it references + if event.type == EventTypes.Member and self.is_interested_in_user( + event.state_key + ): return True - # This will check the store, so should be run last - if await self._matches_aliases(event, store): + # This will check the datastore, so should be run last + if await self.is_interested_in_room( + event.room_id, store, on_invalidate=cache_context.invalidate + ): return True return False - @cached(num_args=1) + @cached(num_args=1, cache_context=True) async def is_interested_in_presence( - self, user_id: UserID, store: "DataStore" + self, user_id: UserID, store: "DataStore", cache_context: _CacheContext ) -> bool: """Check if this service is interested a user's presence @@ -272,20 +322,19 @@ async def is_interested_in_presence( # Then find out if the appservice is interested in any of those rooms for room_id in room_ids: - if await self.matches_user_in_member_list(room_id, store): + if await self.is_interested_in_room( + room_id, store, on_invalidate=cache_context.invalidate + ): return True return False - def is_interested_in_user(self, user_id: str) -> bool: - return ( - bool(self._matches_regex(ApplicationService.NS_USERS, user_id)) - or user_id == self.sender - ) + def is_user_in_namespace(self, user_id: str) -> bool: + return bool(self._matches_regex(ApplicationService.NS_USERS, user_id)) - def is_interested_in_alias(self, alias: str) -> bool: + def is_room_alias_in_namespace(self, alias: str) -> bool: return bool(self._matches_regex(ApplicationService.NS_ALIASES, alias)) - def is_interested_in_room(self, room_id: str) -> bool: + def is_room_id_in_namespace(self, room_id: str) -> bool: return bool(self._matches_regex(ApplicationService.NS_ROOMS, room_id)) def is_exclusive_user(self, user_id: str) -> bool: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index e6461cc3c980..bd913e524e7b 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -571,7 +571,7 @@ async def query_room_alias_exists( room_alias_str = room_alias.to_string() services = self.store.get_app_services() alias_query_services = [ - s for s in services if (s.is_interested_in_alias(room_alias_str)) + s for s in services if (s.is_room_alias_in_namespace(room_alias_str)) ] for alias_service in alias_query_services: is_known_alias = await self.appservice_api.query_alias( @@ -660,7 +660,7 @@ async def _get_services_for_event( # inside of a list comprehension anymore. interested_list = [] for s in services: - if await s.is_interested(event, self.store): + if await s.is_interested_in_event(event.event_id, event, self.store): interested_list.append(s) return interested_list diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index b7064c6624b7..33d827a45b33 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -119,7 +119,7 @@ async def create_association( service = requester.app_service if service: - if not service.is_interested_in_alias(room_alias_str): + if not service.is_room_alias_in_namespace(room_alias_str): raise SynapseError( 400, "This application service has not reserved this kind of alias.", @@ -221,7 +221,7 @@ async def delete_association( async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias ) -> None: - if not service.is_interested_in_alias(room_alias.to_string()): + if not service.is_room_alias_in_namespace(room_alias.to_string()): raise SynapseError( 400, "This application service has not reserved this kind of alias", @@ -376,7 +376,7 @@ def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> b # non-exclusive locks on the alias (or there are no interested services) services = self.store.get_app_services() interested_services = [ - s for s in services if s.is_interested_in_alias(alias.to_string()) + s for s in services if s.is_room_alias_in_namespace(alias.to_string()) ] for service in interested_services: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index b4132c353ae2..6250bb3bdf2b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -269,7 +269,7 @@ async def get_new_events_as( # Then filter down to rooms that the AS can read events = [] for room_id, event in rooms_to_events.items(): - if not await service.matches_user_in_member_list(room_id, self.store): + if not await service.is_interested_in_room(room_id, self.store): continue events.append(event) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 843c68eb0fdf..3b8912652856 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -486,9 +486,7 @@ async def get_new_events_as( if handler._room_serials[room_id] <= from_key: continue - if not await service.matches_user_in_member_list( - room_id, self._main_store - ): + if not await service.is_interested_in_room(room_id, self._main_store): continue events.append(self._make_event_for(room_id)) diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 9bd6275e92db..edc584d0cf50 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -36,7 +36,10 @@ def setUp(self): hostname="matrix.org", # only used by get_groups_for_user ) self.event = Mock( - type="m.something", room_id="!foo:bar", sender="@someone:somewhere" + event_id="$abc:xyz", + type="m.something", + room_id="!foo:bar", + sender="@someone:somewhere", ) self.store = Mock() @@ -50,7 +53,9 @@ def test_regex_user_id_prefix_match(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -62,7 +67,9 @@ def test_regex_user_id_prefix_no_match(self): self.assertFalse( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -76,7 +83,9 @@ def test_regex_room_member_is_checked(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -90,7 +99,9 @@ def test_regex_room_id_match(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -104,7 +115,9 @@ def test_regex_room_id_no_match(self): self.assertFalse( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -121,7 +134,9 @@ def test_regex_alias_match(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -174,7 +189,9 @@ def test_regex_alias_no_match(self): self.assertFalse( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -191,7 +208,9 @@ def test_regex_multiple_matches(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -207,7 +226,9 @@ def test_interested_in_self(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(self.event, self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) @@ -225,7 +246,9 @@ def test_member_list_match(self): self.assertTrue( ( yield defer.ensureDeferred( - self.service.is_interested(event=self.event, store=self.store) + self.service.is_interested_in_event( + self.event.event_id, self.event, self.store + ) ) ) ) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 072e6bbcdd6e..cead9f90df56 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -59,11 +59,11 @@ def setUp(self): self.event_source = hs.get_event_sources() def test_notify_interested_services(self): - interested_service = self._mkservice(is_interested=True) + interested_service = self._mkservice(is_interested_in_event=True) services = [ - self._mkservice(is_interested=False), + self._mkservice(is_interested_in_event=False), interested_service, - self._mkservice(is_interested=False), + self._mkservice(is_interested_in_event=False), ] self.mock_as_api.query_user.return_value = make_awaitable(True) @@ -85,7 +85,7 @@ def test_notify_interested_services(self): def test_query_user_exists_unknown_user(self): user_id = "@someone:anywhere" - services = [self._mkservice(is_interested=True)] + services = [self._mkservice(is_interested_in_event=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services self.mock_store.get_user_by_id.return_value = make_awaitable(None) @@ -102,7 +102,7 @@ def test_query_user_exists_unknown_user(self): def test_query_user_exists_known_user(self): user_id = "@someone:anywhere" - services = [self._mkservice(is_interested=True)] + services = [self._mkservice(is_interested_in_event=True)] services[0].is_interested_in_user.return_value = True self.mock_store.get_app_services.return_value = services self.mock_store.get_user_by_id.return_value = make_awaitable({"name": user_id}) @@ -127,11 +127,11 @@ def test_query_room_alias_exists(self): room_id = "!alpha:bet" servers = ["aperture"] - interested_service = self._mkservice_alias(is_interested_in_alias=True) + interested_service = self._mkservice_alias(is_room_alias_in_namespace=True) services = [ - self._mkservice_alias(is_interested_in_alias=False), + self._mkservice_alias(is_room_alias_in_namespace=False), interested_service, - self._mkservice_alias(is_interested_in_alias=False), + self._mkservice_alias(is_room_alias_in_namespace=False), ] self.mock_as_api.query_alias.return_value = make_awaitable(True) @@ -275,7 +275,7 @@ def test_notify_interested_services_ephemeral(self): to be pushed out to interested appservices, and that the stream ID is updated accordingly. """ - interested_service = self._mkservice(is_interested=True) + interested_service = self._mkservice(is_interested_in_event=True) services = [interested_service] self.mock_store.get_app_services.return_value = services self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( @@ -304,7 +304,7 @@ def test_notify_interested_services_ephemeral_out_of_order(self): Test sending out of order ephemeral events to the appservice handler are ignored. """ - interested_service = self._mkservice(is_interested=True) + interested_service = self._mkservice(is_interested_in_event=True) services = [interested_service] self.mock_store.get_app_services.return_value = services @@ -325,17 +325,45 @@ def test_notify_interested_services_ephemeral_out_of_order(self): interested_service, ephemeral=[] ) - def _mkservice(self, is_interested, protocols=None): + def _mkservice( + self, is_interested_in_event: bool, protocols: Optional[Iterable] = None + ) -> Mock: + """ + Create a new mock representing an ApplicationService. + + Args: + is_interested_in_event: Whether this application service will be considered + interested in all events. + protocols: The third-party protocols that this application service claims to + support. + + Returns: + A mock representing the ApplicationService. + """ service = Mock() - service.is_interested.return_value = make_awaitable(is_interested) + service.is_interested_in_event.return_value = make_awaitable( + is_interested_in_event + ) service.token = "mock_service_token" service.url = "mock_service_url" service.protocols = protocols return service - def _mkservice_alias(self, is_interested_in_alias): + def _mkservice_alias(self, is_room_alias_in_namespace: bool) -> Mock: + """ + Create a new mock representing an ApplicationService that is or is not interested + any given room aliase. + + Args: + is_room_alias_in_namespace: If true, the application service will be interested + in all room aliases that are queried against it. If false, the application + service will not be interested in any room aliases. + + Returns: + A mock representing the ApplicationService. + """ service = Mock() - service.is_interested_in_alias.return_value = is_interested_in_alias + service.is_room_alias_in_namespace.return_value = is_room_alias_in_namespace service.token = "mock_service_token" service.url = "mock_service_url" return service From 8533c8b03d8916e3805c7d0e0020226017680147 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 3 Mar 2022 19:58:08 +0000 Subject: [PATCH 349/474] Avoid generating state groups for local out-of-band leaves (#12154) If we locally generate a rejection for an invite received over federation, it is stored as an outlier (because we probably don't have the state for the room). However, currently we still generate a state group for it (even though the state in that state group will be nonsense). By setting the `outlier` param on `create_event`, we avoid the nonsensical state. --- changelog.d/12154.misc | 1 + synapse/handlers/room_member.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12154.misc diff --git a/changelog.d/12154.misc b/changelog.d/12154.misc new file mode 100644 index 000000000000..18d2a4728be9 --- /dev/null +++ b/changelog.d/12154.misc @@ -0,0 +1 @@ +Avoid generating state groups for local out-of-band leaves. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index a582837cf0aa..7cbc484b0654 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1736,8 +1736,8 @@ async def _generate_local_out_of_band_leave( txn_id=txn_id, prev_event_ids=prev_event_ids, auth_event_ids=auth_event_ids, + outlier=True, ) - event.internal_metadata.outlier = True event.internal_metadata.out_of_band_membership = True result_event = await self.event_creation_handler.handle_new_client_event( From d56202b0383627fdb4e04404d62922dce16868f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 4 Mar 2022 10:25:18 +0000 Subject: [PATCH 350/474] Fix type of `events` in `StateGroupStorage` and `StateHandler` (#12156) We make multiple passes over this, so a regular iterable won't do. --- changelog.d/12156.misc | 1 + synapse/state/__init__.py | 6 +++--- synapse/storage/state.py | 8 ++++---- 3 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/12156.misc diff --git a/changelog.d/12156.misc b/changelog.d/12156.misc new file mode 100644 index 000000000000..4818d988d771 --- /dev/null +++ b/changelog.d/12156.misc @@ -0,0 +1 @@ +Fix some type annotations. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 6babd5963cc1..21888cc8c561 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -194,7 +194,7 @@ async def get_current_state( } async def get_current_state_ids( - self, room_id: str, latest_event_ids: Optional[Iterable[str]] = None + self, room_id: str, latest_event_ids: Optional[Collection[str]] = None ) -> StateMap[str]: """Get the current state, or the state at a set of events, for a room @@ -243,7 +243,7 @@ async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: return await self.get_hosts_in_room_at_events(room_id, event_ids) async def get_hosts_in_room_at_events( - self, room_id: str, event_ids: Iterable[str] + self, room_id: str, event_ids: Collection[str] ) -> Set[str]: """Get the hosts that were in a room at the given event ids @@ -404,7 +404,7 @@ async def compute_event_context( @measure_func() async def resolve_state_groups_for_events( - self, room_id: str, event_ids: Iterable[str] + self, room_id: str, event_ids: Collection[str] ) -> _StateCacheEntry: """Given a list of event_ids this method fetches the state at each event, resolves conflicts between them and returns them. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index e79ecf64a0ec..86f1a5373bad 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -561,7 +561,7 @@ async def get_state_group_delta( return state_group_delta.prev_group, state_group_delta.delta_ids async def get_state_groups_ids( - self, _room_id: str, event_ids: Iterable[str] + self, _room_id: str, event_ids: Collection[str] ) -> Dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events @@ -596,7 +596,7 @@ async def get_state_ids_for_group(self, state_group: int) -> StateMap[str]: return group_to_state[state_group] async def get_state_groups( - self, room_id: str, event_ids: Iterable[str] + self, room_id: str, event_ids: Collection[str] ) -> Dict[int, List[EventBase]]: """Get the state groups for the given list of event_ids @@ -648,7 +648,7 @@ def _get_state_groups_from_groups( return self.stores.state._get_state_groups_from_groups(groups, state_filter) async def get_state_for_events( - self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None + self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: """Given a list of event_ids and type tuples, return a list of state dicts for each event. @@ -684,7 +684,7 @@ async def get_state_for_events( return {event: event_to_state[event] for event in event_ids} async def get_state_ids_for_events( - self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None + self, event_ids: Collection[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[str]]: """ Get the state dicts corresponding to a list of events, containing the event_ids From 87c230c27cdeb7e421f61f1271a500c760f1f63b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 4 Mar 2022 10:31:19 +0000 Subject: [PATCH 351/474] Update client-visibility filtering for outlier events (#12155) Avoid trying to get the state for outliers, which isn't a sensible thing to do. --- changelog.d/12155.misc | 1 + synapse/visibility.py | 17 ++++++++- tests/test_visibility.py | 76 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 90 insertions(+), 4 deletions(-) create mode 100644 changelog.d/12155.misc diff --git a/changelog.d/12155.misc b/changelog.d/12155.misc new file mode 100644 index 000000000000..9f333e718a86 --- /dev/null +++ b/changelog.d/12155.misc @@ -0,0 +1 @@ +Avoid trying to calculate the state at outlier events. diff --git a/synapse/visibility.py b/synapse/visibility.py index 1b970ce479d0..281cbe4d8877 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -81,8 +81,9 @@ async def filter_events_for_client( types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id)) + # we exclude outliers at this point, and then handle them separately later event_id_to_state = await storage.state.get_state_for_events( - frozenset(e.event_id for e in events), + frozenset(e.event_id for e in events if not e.internal_metadata.outlier), state_filter=StateFilter.from_types(types), ) @@ -154,6 +155,17 @@ def allowed(event: EventBase) -> Optional[EventBase]: if event.event_id in always_include_ids: return event + # we need to handle outliers separately, since we don't have the room state. + if event.internal_metadata.outlier: + # Normally these can't be seen by clients, but we make an exception for + # for out-of-band membership events (eg, incoming invites, or rejections of + # said invite) for the user themselves. + if event.type == EventTypes.Member and event.state_key == user_id: + logger.debug("Returning out-of-band-membership event %s", event) + return event + + return None + state = event_id_to_state[event.event_id] # get the room_visibility at the time of the event. @@ -198,6 +210,9 @@ def allowed(event: EventBase) -> Optional[EventBase]: # Always allow the user to see their own leave events, otherwise # they won't see the room disappear if they reject the invite + # + # (Note this doesn't work for out-of-band invite rejections, which don't + # have prev_state populated. They are handled above in the outlier code.) if membership == "leave" and ( prev_membership == "join" or prev_membership == "invite" ): diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 219b5660b117..532e3fe9cd92 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -13,11 +13,12 @@ # limitations under the License. import logging from typing import Optional +from unittest.mock import patch from synapse.api.room_versions import RoomVersions -from synapse.events import EventBase -from synapse.types import JsonDict -from synapse.visibility import filter_events_for_server +from synapse.events import EventBase, make_event_from_dict +from synapse.types import JsonDict, create_requester +from synapse.visibility import filter_events_for_client, filter_events_for_server from tests import unittest from tests.utils import create_room @@ -185,3 +186,72 @@ def _inject_message( self.get_success(self.storage.persistence.persist_event(event, context)) return event + + +class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase): + def test_out_of_band_invite_rejection(self): + # this is where we have received an invite event over federation, and then + # rejected it. + invite_pdu = { + "room_id": "!room:id", + "depth": 1, + "auth_events": [], + "prev_events": [], + "origin_server_ts": 1, + "sender": "@someone:" + self.OTHER_SERVER_NAME, + "type": "m.room.member", + "state_key": "@user:test", + "content": {"membership": "invite"}, + } + self.add_hashes_and_signatures(invite_pdu) + invite_event_id = make_event_from_dict(invite_pdu, RoomVersions.V9).event_id + + self.get_success( + self.hs.get_federation_server().on_invite_request( + self.OTHER_SERVER_NAME, + invite_pdu, + "9", + ) + ) + + # stub out do_remotely_reject_invite so that we fall back to a locally- + # generated rejection + with patch.object( + self.hs.get_federation_handler(), + "do_remotely_reject_invite", + side_effect=Exception(), + ): + reject_event_id, _ = self.get_success( + self.hs.get_room_member_handler().remote_reject_invite( + invite_event_id, + txn_id=None, + requester=create_requester("@user:test"), + content={}, + ) + ) + + invite_event, reject_event = self.get_success( + self.hs.get_datastores().main.get_events_as_list( + [invite_event_id, reject_event_id] + ) + ) + + # the invited user should be able to see both the invite and the rejection + self.assertEqual( + self.get_success( + filter_events_for_client( + self.hs.get_storage(), "@user:test", [invite_event, reject_event] + ) + ), + [invite_event, reject_event], + ) + + # other users should see neither + self.assertEqual( + self.get_success( + filter_events_for_client( + self.hs.get_storage(), "@other:test", [invite_event, reject_event] + ) + ), + [], + ) From 423cca9efe06d78aaca5f62fb74ee7e5bceebe49 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2022 11:48:15 +0000 Subject: [PATCH 352/474] Spread out sending device lists to remote hosts (#12132) --- changelog.d/12132.feature | 1 + synapse/federation/send_queue.py | 2 +- synapse/federation/sender/__init__.py | 26 ++++++---- .../sender/per_destination_queue.py | 10 ++++ synapse/handlers/device.py | 2 +- synapse/replication/tcp/client.py | 2 +- tests/federation/test_federation_sender.py | 52 +++++++++++++++++-- 7 files changed, 79 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12132.feature diff --git a/changelog.d/12132.feature b/changelog.d/12132.feature new file mode 100644 index 000000000000..3b8362ad35ed --- /dev/null +++ b/changelog.d/12132.feature @@ -0,0 +1 @@ +Improve performance of logging in for large accounts. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0d7c4f506758..d720b5fd3fe2 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -244,7 +244,7 @@ def send_presence_to_destinations( self.notifier.on_new_replication_data() - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = False) -> None: """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 6106a486d10a..30e2421efc6d 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -118,7 +118,12 @@ def build_and_send_edu( raise NotImplementedError() @abc.abstractmethod - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = True) -> None: + """Tells the sender that a new device message is ready to be sent to the + destination. The `immediate` flag specifies whether the messages should + be tried to be sent immediately, or whether it can be delayed for a + short while (to aid performance). + """ raise NotImplementedError() @abc.abstractmethod @@ -146,9 +151,8 @@ async def get_replication_rows( @attr.s -class _PresenceQueue: - """A queue of destinations that need to be woken up due to new presence - updates. +class _DestinationWakeupQueue: + """A queue of destinations that need to be woken up due to new updates. Staggers waking up of per destination queues to ensure that we don't attempt to start TLS connections with many hosts all at once, leading to pinned CPU. @@ -175,7 +179,7 @@ def add_to_queue(self, destination: str) -> None: if not self.processing: self._handle() - @wrap_as_background_process("_PresenceQueue.handle") + @wrap_as_background_process("_DestinationWakeupQueue.handle") async def _handle(self) -> None: """Background process to drain the queue.""" @@ -297,7 +301,7 @@ def __init__(self, hs: "HomeServer"): self._external_cache = hs.get_external_cache() - self._presence_queue = _PresenceQueue(self, self.clock) + self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock) def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination @@ -614,7 +618,7 @@ def send_presence_to_destinations( states, start_loop=False ) - self._presence_queue.add_to_queue(destination) + self._destination_wakeup_queue.add_to_queue(destination) def build_and_send_edu( self, @@ -667,7 +671,7 @@ def send_edu(self, edu: Edu, key: Optional[Hashable]) -> None: else: queue.send_edu(edu) - def send_device_messages(self, destination: str) -> None: + def send_device_messages(self, destination: str, immediate: bool = False) -> None: if destination == self.server_name: logger.warning("Not sending device update to ourselves") return @@ -677,7 +681,11 @@ def send_device_messages(self, destination: str) -> None: ): return - self._get_per_destination_queue(destination).attempt_new_transaction() + if immediate: + self._get_per_destination_queue(destination).attempt_new_transaction() + else: + self._get_per_destination_queue(destination).mark_new_data() + self._destination_wakeup_queue.add_to_queue(destination) def wake_destination(self, destination: str) -> None: """Called when we want to retry sending transactions to a remote. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index c8768f22bc6b..d80f0ac5e8c3 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -219,6 +219,16 @@ def send_edu(self, edu: Edu) -> None: self._pending_edus.append(edu) self.attempt_new_transaction() + def mark_new_data(self) -> None: + """Marks that the destination has new data to send, without starting a + new transaction. + + If a transaction loop is already in progress then a new transcation will + be attempted when the current one finishes. + """ + + self._new_data_to_send = True + def attempt_new_transaction(self) -> None: """Try to start a new transaction to this destination diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 934b5bd7349b..d90cb259a65c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -506,7 +506,7 @@ async def notify_device_update( "Sending device list update notif for %r to: %r", user_id, hosts ) for host in hosts: - self.federation_sender.send_device_messages(host) + self.federation_sender.send_device_messages(host, immediate=False) log_kv({"message": "sent device update to host", "host": host}) async def notify_user_signature_update( diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 1b8479b0b4ec..b8fc1d4db95e 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -380,7 +380,7 @@ async def process_replication_rows( # changes. hosts = {row.entity for row in rows if not row.entity.startswith("@")} for host in hosts: - self.federation_sender.send_device_messages(host) + self.federation_sender.send_device_messages(host, immediate=False) elif stream_name == ToDeviceStream.NAME: # The to_device stream includes stuff to be pushed to both local diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 60e0c31f4384..e90592855ad9 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -201,9 +201,12 @@ def test_send_device_updates(self): self.assertEqual(len(self.edus), 1) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # a second call should produce no new device EDUs self.hs.get_federation_sender().send_device_messages("host2") - self.pump() self.assertEqual(self.edus, []) # a second device @@ -232,6 +235,10 @@ def test_upload_signatures(self): device1_signing_key = self.generate_and_upload_device_signing_key(u1, "D1") device2_signing_key = self.generate_and_upload_device_signing_key(u1, "D2") + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect two more edus self.assertEqual(len(self.edus), 2) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id) @@ -265,6 +272,10 @@ def test_upload_signatures(self): e2e_handler.upload_signing_keys_for_user(u1, cross_signing_keys) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect signing key update edu self.assertEqual(len(self.edus), 2) self.assertEqual(self.edus.pop(0)["edu_type"], "m.signing_key_update") @@ -284,6 +295,10 @@ def test_upload_signatures(self): ) self.assertEqual(ret["failures"], {}) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect two edus, in one or two transactions. We don't know what order the # devices will be updated. self.assertEqual(len(self.edus), 2) @@ -307,6 +322,10 @@ def test_delete_devices(self): self.login("user", "pass", device_id="D2") self.login("user", "pass", device_id="D3") + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect three edus self.assertEqual(len(self.edus), 3) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) @@ -318,6 +337,10 @@ def test_delete_devices(self): self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # expect three edus, in an unknown order self.assertEqual(len(self.edus), 3) for edu in self.edus: @@ -350,12 +373,19 @@ def test_unreachable_server(self): self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + self.assertGreaterEqual(mock_send_txn.call_count, 4) # recover the server mock_send_txn.side_effect = self.record_transaction self.hs.get_federation_sender().send_device_messages("host2") - self.pump() + + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) # for each device, there should be a single update self.assertEqual(len(self.edus), 3) @@ -390,6 +420,10 @@ def test_prune_outbound_device_pokes1(self): self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) ) + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + self.assertGreaterEqual(mock_send_txn.call_count, 4) # run the prune job @@ -401,7 +435,10 @@ def test_prune_outbound_device_pokes1(self): # recover the server mock_send_txn.side_effect = self.record_transaction self.hs.get_federation_sender().send_device_messages("host2") - self.pump() + + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) # there should be a single update for this user. self.assertEqual(len(self.edus), 1) @@ -435,6 +472,10 @@ def test_prune_outbound_device_pokes2(self): self.login("user", "pass", device_id="D2") self.login("user", "pass", device_id="D3") + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) + # delete them again self.get_success( self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) @@ -451,7 +492,10 @@ def test_prune_outbound_device_pokes2(self): # recover the server mock_send_txn.side_effect = self.record_transaction self.hs.get_federation_sender().send_device_messages("host2") - self.pump() + + # We queue up device list updates to be sent over federation, so we + # advance to clear the queue. + self.reactor.advance(1) # ... and we should get a single update for this user. self.assertEqual(len(self.edus), 1) From 4aeb00ca20a0d9dbb2a104591aca081c723eb6d9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Mar 2022 11:58:49 +0000 Subject: [PATCH 353/474] Move synctl into `synapse._scripts` and expose as an entrypoint (#12140) --- .dockerignore | 1 - MANIFEST.in | 1 - changelog.d/12140.misc | 1 + docker/Dockerfile | 2 +- docs/postgres.md | 8 ++++---- docs/turn-howto.md | 5 +++-- docs/upgrade.md | 23 ++++++++++++++++++++++- scripts-dev/lint.sh | 2 +- setup.py | 2 +- synctl => synapse/_scripts/synctl.py | 0 tox.ini | 1 - 11 files changed, 33 insertions(+), 13 deletions(-) create mode 100644 changelog.d/12140.misc rename synctl => synapse/_scripts/synctl.py (100%) diff --git a/.dockerignore b/.dockerignore index 617f7015971b..434231fce9fc 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,6 +7,5 @@ !MANIFEST.in !README.rst !setup.py -!synctl **/__pycache__ diff --git a/MANIFEST.in b/MANIFEST.in index f1e295e5837f..d744c090acde 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ -include synctl include LICENSE include VERSION include *.rst diff --git a/changelog.d/12140.misc b/changelog.d/12140.misc new file mode 100644 index 000000000000..33a21a29f0f4 --- /dev/null +++ b/changelog.d/12140.misc @@ -0,0 +1 @@ +Move `synctl` into `synapse._scripts` and expose as an entry point. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 327275a9cae6..24b5515eb99e 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -46,7 +46,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Copy just what we need to pip install -COPY MANIFEST.in README.rst setup.py synctl /synapse/ +COPY MANIFEST.in README.rst setup.py /synapse/ COPY synapse/__init__.py /synapse/synapse/__init__.py COPY synapse/python_dependencies.py /synapse/synapse/python_dependencies.py diff --git a/docs/postgres.md b/docs/postgres.md index 0562021da526..de4e2ba4b756 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -153,9 +153,9 @@ database file (typically `homeserver.db`) to another location. Once the copy is complete, restart synapse. For instance: ```sh -./synctl stop +synctl stop cp homeserver.db homeserver.db.snapshot -./synctl start +synctl start ``` Copy the old config file into a new config file: @@ -192,10 +192,10 @@ Once that has completed, change the synapse config to point at the PostgreSQL database configuration file `homeserver-postgres.yaml`: ```sh -./synctl stop +synctl stop mv homeserver.yaml homeserver-old-sqlite.yaml mv homeserver-postgres.yaml homeserver.yaml -./synctl start +synctl start ``` Synapse should now be running against PostgreSQL. diff --git a/docs/turn-howto.md b/docs/turn-howto.md index eba7ca6124a5..3a2cd04e36a9 100644 --- a/docs/turn-howto.md +++ b/docs/turn-howto.md @@ -238,8 +238,9 @@ After updating the homeserver configuration, you must restart synapse: * If you use synctl: ```sh - cd /where/you/run/synapse - ./synctl restart + # Depending on how Synapse is installed, synctl may already be on + # your PATH. If not, you may need to activate a virtual environment. + synctl restart ``` * If you use systemd: ```sh diff --git a/docs/upgrade.md b/docs/upgrade.md index f9be3ac6bc15..0d0bb066ee63 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -47,7 +47,7 @@ this document. 3. Restart Synapse: ```bash - ./synctl restart + synctl restart ``` To check whether your update was successful, you can check the running @@ -85,6 +85,27 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.55.0 + +## `synctl` script has been moved + +The `synctl` script +[has been made](https://github.com/matrix-org/synapse/pull/12140) an +[entry point](https://packaging.python.org/en/latest/specifications/entry-points/) +and no longer exists at the root of Synapse's source tree. If you wish to use +`synctl` to manage your homeserver, you should invoke `synctl` directly, e.g. +`synctl start` instead of `./synctl start` or `/path/to/synctl start`. + +You will need to ensure `synctl` is on your `PATH`. + - This is automatically the case when using + [Debian packages](https://packages.matrix.org/debian/) or + [docker images](https://hub.docker.com/r/matrixdotorg/synapse) + provided by Matrix.org. + - When installing from a wheel, sdist, or PyPI, a `synctl` executable is added + to your Python installation's `bin`. This should be on your `PATH` + automatically, though you might need to activate a virtual environment + depending on how you installed Synapse. + # Upgrading to v1.54.0 ## Legacy structured logging configuration removal diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 2f5f2c356674..c063fafa973b 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -85,7 +85,7 @@ else "synapse" "docker" "tests" # annoyingly, black doesn't find these so we have to list them "scripts-dev" - "contrib" "synctl" "setup.py" "synmark" "stubs" ".ci" + "contrib" "setup.py" "synmark" "stubs" ".ci" ) fi fi diff --git a/setup.py b/setup.py index 318df16766ec..439ed75d7282 100755 --- a/setup.py +++ b/setup.py @@ -155,6 +155,7 @@ def exec_file(path_segments): # Application "synapse_homeserver = synapse.app.homeserver:main", "synapse_worker = synapse.app.generic_worker:main", + "synctl = synapse._scripts.synctl:main", # Scripts "export_signing_key = synapse._scripts.export_signing_key:main", "generate_config = synapse._scripts.generate_config:main", @@ -177,6 +178,5 @@ def exec_file(path_segments): "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], - scripts=["synctl"], cmdclass={"test": TestCommand}, ) diff --git a/synctl b/synapse/_scripts/synctl.py similarity index 100% rename from synctl rename to synapse/_scripts/synctl.py diff --git a/tox.ini b/tox.ini index 04d282a705af..f1f96b27ea14 100644 --- a/tox.ini +++ b/tox.ini @@ -42,7 +42,6 @@ lint_targets = scripts-dev stubs contrib - synctl synmark .ci docker From 36071d39f784ddc2271c91a72a55d9ee4f8689bb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Mar 2022 12:01:51 +0000 Subject: [PATCH 354/474] Changelog (#12153) --- .github/workflows/tests.yml | 1 + changelog.d/12153.misc | 1 + tox.ini | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12153.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c89c50cd07e2..3bce95b0e057 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,6 +17,7 @@ jobs: - uses: actions/setup-python@v2 - run: pip install -e . - run: scripts-dev/generate_sample_config.sh --check + - run: scripts-dev/config-lint.sh lint: runs-on: ubuntu-latest diff --git a/changelog.d/12153.misc b/changelog.d/12153.misc new file mode 100644 index 000000000000..f02d140f3871 --- /dev/null +++ b/changelog.d/12153.misc @@ -0,0 +1 @@ +Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/tox.ini b/tox.ini index f1f96b27ea14..3ffd2c3e97e8 100644 --- a/tox.ini +++ b/tox.ini @@ -151,7 +151,6 @@ extras = lint commands = python -m black --check --diff {[base]lint_targets} flake8 {[base]lint_targets} {env:PEP8SUFFIX:} - {toxinidir}/scripts-dev/config-lint.sh [testenv:check_isort] extras = lint From cd1ae3d0b438ff453b7d4750c4fe901f266fcbb6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 4 Mar 2022 07:10:10 -0500 Subject: [PATCH 355/474] Remove backwards compatibility with RelationPaginationToken. (#12138) --- changelog.d/12138.removal | 1 + synapse/rest/client/relations.py | 55 ++++++---------------- synapse/storage/relations.py | 31 ------------ tests/rest/client/test_relations.py | 73 +---------------------------- 4 files changed, 16 insertions(+), 144 deletions(-) create mode 100644 changelog.d/12138.removal diff --git a/changelog.d/12138.removal b/changelog.d/12138.removal new file mode 100644 index 000000000000..6ed84d476cd9 --- /dev/null +++ b/changelog.d/12138.removal @@ -0,0 +1 @@ +Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 487ea38b55cc..07fa1cdd4c67 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -27,50 +27,15 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns -from synapse.storage.relations import ( - AggregationPaginationToken, - PaginationChunk, - RelationPaginationToken, -) -from synapse.types import JsonDict, RoomStreamToken, StreamToken +from synapse.storage.relations import AggregationPaginationToken, PaginationChunk +from synapse.types import JsonDict, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) -async def _parse_token( - store: "DataStore", token: Optional[str] -) -> Optional[StreamToken]: - """ - For backwards compatibility support RelationPaginationToken, but new pagination - tokens are generated as full StreamTokens, to be compatible with /sync and /messages. - """ - if not token: - return None - # Luckily the format for StreamToken and RelationPaginationToken differ enough - # that they can easily be separated. An "_" appears in the serialization of - # RoomStreamToken (as part of StreamToken), but RelationPaginationToken uses - # "-" only for separators. - if "_" in token: - return await StreamToken.from_string(store, token) - else: - relation_token = RelationPaginationToken.from_string(token) - return StreamToken( - room_key=RoomStreamToken(relation_token.topological, relation_token.stream), - presence_key=0, - typing_key=0, - receipt_key=0, - account_data_key=0, - push_rules_key=0, - to_device_key=0, - device_list_key=0, - groups_key=0, - ) - - class RelationPaginationServlet(RestServlet): """API to paginate relations on an event by topological ordering, optionally filtered by relation type and event type. @@ -122,8 +87,12 @@ async def on_GET( pagination_chunk = PaginationChunk(chunk=[]) else: # Return the relations - from_token = await _parse_token(self.store, from_token_str) - to_token = await _parse_token(self.store, to_token_str) + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) pagination_chunk = await self.store.get_relations_for_event( event_id=parent_id, @@ -317,8 +286,12 @@ async def on_GET( from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - from_token = await _parse_token(self.store, from_token_str) - to_token = await _parse_token(self.store, to_token_str) + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) result = await self.store.get_relations_for_event( event_id=parent_id, diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 36ca2b827398..fba270150b63 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -54,37 +54,6 @@ async def to_dict(self, store: "DataStore") -> Dict[str, Any]: return d -@attr.s(frozen=True, slots=True, auto_attribs=True) -class RelationPaginationToken: - """Pagination token for relation pagination API. - - As the results are in topological order, we can use the - `topological_ordering` and `stream_ordering` fields of the events at the - boundaries of the chunk as pagination tokens. - - Attributes: - topological: The topological ordering of the boundary event - stream: The stream ordering of the boundary event. - """ - - topological: int - stream: int - - @staticmethod - def from_string(string: str) -> "RelationPaginationToken": - try: - t, s = string.split("-") - return RelationPaginationToken(int(t), int(s)) - except ValueError: - raise SynapseError(400, "Invalid relation pagination token") - - async def to_string(self, store: "DataStore") -> str: - return "%d-%d" % (self.topological, self.stream) - - def as_tuple(self) -> Tuple[Any, ...]: - return attr.astuple(self) - - @attr.s(frozen=True, slots=True, auto_attribs=True) class AggregationPaginationToken: """Pagination token for relation aggregation pagination API. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 53062b41deaa..274f9c44c164 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -24,8 +24,7 @@ from synapse.rest import admin from synapse.rest.client import login, register, relations, room, sync from synapse.server import HomeServer -from synapse.storage.relations import RelationPaginationToken -from synapse.types import JsonDict, StreamToken +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -281,15 +280,6 @@ def test_basic_paginate_relations(self) -> None: channel.json_body["chunk"][0], ) - def _stream_token_to_relation_token(self, token: str) -> str: - """Convert a StreamToken into a legacy token (RelationPaginationToken).""" - room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key - return self.get_success( - RelationPaginationToken( - topological=room_key.topological, stream=room_key.stream - ).to_string(self.store) - ) - def test_repeated_paginate_relations(self) -> None: """Test that if we paginate using a limit and tokens then we get the expected events. @@ -330,34 +320,6 @@ def test_repeated_paginate_relations(self) -> None: found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) - # Reset and try again, but convert the tokens to the legacy format. - prev_token = "" - found_event_ids = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + self._stream_token_to_relation_token(prev_token) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - def test_pagination_from_sync_and_messages(self) -> None: """Pagination tokens from /sync and /messages can be used to paginate /relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") @@ -543,39 +505,6 @@ def test_aggregation_pagination_within_group(self) -> None: found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) - # Reset and try again, but convert the tokens to the legacy format. - prev_token = "" - found_event_ids = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + self._stream_token_to_relation_token(prev_token) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}" - f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" - f"/m.reaction/{encoded_key}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - def test_aggregation(self) -> None: """Test that annotations get correctly aggregated.""" From 158e0937eb56f14dd851549939037de499526fd2 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 4 Mar 2022 13:10:05 +0000 Subject: [PATCH 356/474] Add test for `ObservableDeferred`'s cancellation behaviour (#12149) Signed-off-by: Sean Quah --- changelog.d/12149.misc | 1 + tests/util/test_async_helpers.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 changelog.d/12149.misc diff --git a/changelog.d/12149.misc b/changelog.d/12149.misc new file mode 100644 index 000000000000..d39af9672365 --- /dev/null +++ b/changelog.d/12149.misc @@ -0,0 +1 @@ +Add test for `ObservableDeferred`'s cancellation behaviour. diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index 362014f4cb6f..ff53ce114bd7 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -100,6 +100,34 @@ def check_val(res, idx): self.assertEqual(str(results[0].value), "gah!", "observer 1 errback result") self.assertEqual(str(results[1].value), "gah!", "observer 2 errback result") + def test_cancellation(self): + """Test that cancelling an observer does not affect other observers.""" + origin_d: "Deferred[int]" = Deferred() + observable = ObservableDeferred(origin_d, consumeErrors=True) + + observer1 = observable.observe() + observer2 = observable.observe() + observer3 = observable.observe() + + self.assertFalse(observer1.called) + self.assertFalse(observer2.called) + self.assertFalse(observer3.called) + + # cancel the second observer + observer2.cancel() + self.assertFalse(observer1.called) + self.failureResultOf(observer2, CancelledError) + self.assertFalse(observer3.called) + + # other observers resolve as normal + origin_d.callback(123) + self.assertEqual(observer1.result, 123, "observer 1 callback result") + self.assertEqual(observer3.result, 123, "observer 3 callback result") + + # additional observers resolve as normal + observer4 = observable.observe() + self.assertEqual(observer4.result, 123, "observer 4 callback result") + class TimeoutDeferredTest(TestCase): def setUp(self): From 75574726a766f09d955c05672d400c65cb341810 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 4 Mar 2022 15:37:02 +0000 Subject: [PATCH 357/474] Add type hints for `ObservableDeferred` attributes (#12159) Signed-off-by: Sean Quah --- changelog.d/12159.misc | 1 + synapse/util/async_helpers.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12159.misc diff --git a/changelog.d/12159.misc b/changelog.d/12159.misc new file mode 100644 index 000000000000..30500f2fd95d --- /dev/null +++ b/changelog.d/12159.misc @@ -0,0 +1 @@ +Add type hints for `ObservableDeferred` attributes. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 60c03a66fd16..a9f67dcbac6a 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -40,7 +40,7 @@ ) import attr -from typing_extensions import ContextManager +from typing_extensions import ContextManager, Literal from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -96,6 +96,10 @@ class ObservableDeferred(Generic[_T], AbstractObservableDeferred[_T]): __slots__ = ["_deferred", "_observers", "_result"] + _deferred: "defer.Deferred[_T]" + _observers: Union[List["defer.Deferred[_T]"], Tuple[()]] + _result: Union[None, Tuple[Literal[True], _T], Tuple[Literal[False], Failure]] + def __init__(self, deferred: "defer.Deferred[_T]", consumeErrors: bool = False): object.__setattr__(self, "_deferred", deferred) object.__setattr__(self, "_result", None) @@ -158,12 +162,14 @@ def observe(self) -> "defer.Deferred[_T]": effect the underlying deferred. """ if not self._result: + assert isinstance(self._observers, list) d: "defer.Deferred[_T]" = defer.Deferred() self._observers.append(d) return d + elif self._result[0]: + return defer.succeed(self._result[1]) else: - success, res = self._result - return defer.succeed(res) if success else defer.fail(res) + return defer.fail(self._result[1]) def observers(self) -> "Collection[defer.Deferred[_T]]": return self._observers @@ -175,6 +181,8 @@ def has_succeeded(self) -> bool: return self._result is not None and self._result[0] is True def get_result(self) -> Union[_T, Failure]: + if self._result is None: + raise ValueError(f"{self!r} has no result yet") return self._result[1] def __getattr__(self, name: str) -> Any: From 0752ab7a3621b90073f9332fbfdc8afe16a3be01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 4 Mar 2022 17:57:27 +0000 Subject: [PATCH 358/474] Reduce to-device queries for /sync. (#12163) --- changelog.d/12163.misc | 1 + synapse/storage/databases/main/deviceinbox.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/12163.misc diff --git a/changelog.d/12163.misc b/changelog.d/12163.misc new file mode 100644 index 000000000000..13de0895f5fa --- /dev/null +++ b/changelog.d/12163.misc @@ -0,0 +1 @@ +Reduce number of DB queries made during processing of `/sync`. diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 1392363de15a..b4a1b041b1f8 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -298,6 +298,9 @@ async def _get_device_messages( # This user has new messages sent to them. Query messages for them user_ids_to_query.add(user_id) + if not user_ids_to_query: + return {}, to_stream_id + def get_device_messages_txn(txn: LoggingTransaction): # Build a query to select messages from any of the given devices that # are between the given stream id bounds. From d2ef1a79cf942b2f8c1a4724eb0c4bca0b60edbe Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 4 Mar 2022 22:40:24 +0000 Subject: [PATCH 359/474] Relax version guard for packaging (#12166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It’s just occurred to me that #12088 pulled in the “packaging” package (~=21.3). I pulled in the newest version I had at the time. I only use it for packaging.requirements.Requirements. Which was added in packaging 16.1: https://github.com/pypa/packaging/releases/tag/16.1 https://pkgs.org/download/python3-packaging suggests that the oldest version we care about is 17.1 in Ubuntu Bionic. So I think with this bound we're hunky dory. --- changelog.d/12166.misc | 1 + synapse/python_dependencies.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12166.misc diff --git a/changelog.d/12166.misc b/changelog.d/12166.misc new file mode 100644 index 000000000000..24b4a7c7def7 --- /dev/null +++ b/changelog.d/12166.misc @@ -0,0 +1 @@ +Relax the version guard for "packaging" added in #12088. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 8f48a33936c1..b40a7bbb76ca 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -83,8 +83,8 @@ # ijson 3.1.4 fixes a bug with "." in property names "ijson>=3.1.4", "matrix-common~=1.1.0", - # For runtime introspection of our dependencies - "packaging~=21.3", + # We need packaging.requirements.Requirement, added in 16.1. + "packaging>=16.1", ] CONDITIONAL_REQUIREMENTS = { From 0211f18d65b20c9bd77e64f296f8790f4267cf28 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 7 Mar 2022 12:24:06 +0000 Subject: [PATCH 360/474] Switch the `tests-done` job to an Action (#12161) I've factored it out for easier use in other workflows. --- .github/workflows/tests.yml | 30 +++++++++--------------------- changelog.d/12161.misc | 1 + 2 files changed, 10 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12161.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3bce95b0e057..613a773775a2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -388,34 +388,22 @@ jobs: tests-done: if: ${{ always() }} needs: + - check-sampleconfig - lint - lint-crlf - lint-newsfile - trial - trial-olddeps - sytest + - export-data - portdb - complement runs-on: ubuntu-latest steps: - - name: Set build result - env: - NEEDS_CONTEXT: ${{ toJSON(needs) }} - # the `jq` incantation dumps out a series of " " lines. - # we set it to an intermediate variable to avoid a pipe, which makes it - # hard to set $rc. - run: | - rc=0 - results=$(jq -r 'to_entries[] | [.key,.value.result] | join(" ")' <<< $NEEDS_CONTEXT) - while read job result ; do - # The newsfile lint may be skipped on non PR builds - if [ $result == "skipped" ] && [ $job == "lint-newsfile" ]; then - continue - fi - - if [ "$result" != "success" ]; then - echo "::set-failed ::Job $job returned $result" - rc=1 - fi - done <<< $results - exit $rc + - uses: matrix-org/done-action@v2 + with: + needs: ${{ toJSON(needs) }} + + # The newsfile lint may be skipped on non PR builds + skippable: + lint-newsfile diff --git a/changelog.d/12161.misc b/changelog.d/12161.misc new file mode 100644 index 000000000000..43eff08d467e --- /dev/null +++ b/changelog.d/12161.misc @@ -0,0 +1 @@ +Use a prebuilt Action for the `tests-done` CI job. From f63bedef07360216a8de71dc38f00f1aea503903 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 7 Mar 2022 09:00:05 -0500 Subject: [PATCH 361/474] Invalidate caches when an event with a relation is redacted. (#12121) The caches for the target of the relation must be cleared so that the bundled aggregations are re-calculated after the redaction is processed. --- changelog.d/12113.bugfix | 1 + changelog.d/12113.misc | 1 - changelog.d/12121.bugfix | 1 + synapse/storage/databases/main/cache.py | 2 + synapse/storage/databases/main/events.py | 38 ++++- tests/rest/client/test_relations.py | 207 ++++++++++++++++++----- 6 files changed, 202 insertions(+), 48 deletions(-) create mode 100644 changelog.d/12113.bugfix delete mode 100644 changelog.d/12113.misc create mode 100644 changelog.d/12121.bugfix diff --git a/changelog.d/12113.bugfix b/changelog.d/12113.bugfix new file mode 100644 index 000000000000..df9b0dc413dd --- /dev/null +++ b/changelog.d/12113.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12113.misc b/changelog.d/12113.misc deleted file mode 100644 index 102e064053c2..000000000000 --- a/changelog.d/12113.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the tests for event relations. diff --git a/changelog.d/12121.bugfix b/changelog.d/12121.bugfix new file mode 100644 index 000000000000..df9b0dc413dd --- /dev/null +++ b/changelog.d/12121.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index c428dd5596af..abd54c7dc703 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -200,6 +200,8 @@ def _invalidate_caches_for_event( self.get_relations_for_event.invalidate((relates_to,)) self.get_aggregation_groups_for_event.invalidate((relates_to,)) self.get_applicable_edit.invalidate((relates_to,)) + self.get_thread_summary.invalidate((relates_to,)) + self.get_thread_participated.invalidate((relates_to,)) async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): """Invalidates the cache and adds it to the cache stream so slaves diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ca2a9ba9d116..1dc83aa5e3a6 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1518,7 +1518,7 @@ def _update_metadata_tables_txn( ) # Remove from relations table. - self._handle_redaction(txn, event.redacts) + self._handle_redact_relations(txn, event.redacts) # Update the event_forward_extremities, event_backward_extremities and # event_edges tables. @@ -1943,15 +1943,43 @@ def _handle_batch_event(self, txn: LoggingTransaction, event: EventBase): txn.execute(sql, (batch_id,)) - def _handle_redaction(self, txn, redacted_event_id): - """Handles receiving a redaction and checking whether we need to remove - any redacted relations from the database. + def _handle_redact_relations( + self, txn: LoggingTransaction, redacted_event_id: str + ) -> None: + """Handles receiving a redaction and checking whether the redacted event + has any relations which must be removed from the database. Args: txn - redacted_event_id (str): The event that was redacted. + redacted_event_id: The event that was redacted. """ + # Fetch the current relation of the event being redacted. + redacted_relates_to = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_relations", + keyvalues={"event_id": redacted_event_id}, + retcol="relates_to_id", + allow_none=True, + ) + # Any relation information for the related event must be cleared. + if redacted_relates_to is not None: + self.store._invalidate_cache_and_stream( + txn, self.store.get_relations_for_event, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_applicable_edit, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_thread_summary, (redacted_relates_to,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_thread_participated, (redacted_relates_to,) + ) + self.db_pool.simple_delete_txn( txn, table="event_relations", keyvalues={"event_id": redacted_event_id} ) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 274f9c44c164..a40a5de3991c 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1273,7 +1273,21 @@ def test_background_update(self) -> None: class RelationRedactionTestCase(BaseRelationsTestCase): - """Test the behaviour of relations when the parent or child event is redacted.""" + """ + Test the behaviour of relations when the parent or child event is redacted. + + The behaviour of each relation type is subtly different which causes the tests + to be a bit repetitive, they follow a naming scheme of: + + test_redact_(relation|parent)_{relation_type} + + The first bit of "relation" means that the event with the relation defined + on it (the child event) is to be redacted. A "parent" means that the target + of the relation (the parent event) is to be redacted. + + The relation_type describes which type of relation is under test (i.e. it is + related to the value of rel_type in the event content). + """ def _redact(self, event_id: str) -> None: channel = self.make_request( @@ -1284,9 +1298,53 @@ def _redact(self, event_id: str) -> None: ) self.assertEqual(200, channel.code, channel.json_body) + def _make_relation_requests(self) -> Tuple[List[str], JsonDict]: + """ + Makes requests and ensures they result in a 200 response, returns a + tuple of results: + + 1. `/relations` -> Returns a list of event IDs. + 2. `/event` -> Returns the response's m.relations field (from unsigned), + if it exists. + """ + + # Request the relations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] + + # Fetch the bundled aggregations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + bundled_relations = channel.json_body["unsigned"].get("m.relations", {}) + + return event_ids, bundled_relations + + def _get_aggregations(self) -> List[JsonDict]: + """Request /aggregations on the parent ID and includes the returned chunk.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + return channel.json_body["chunk"] + def test_redact_relation_annotation(self) -> None: - """Test that annotations of an event are properly handled after the + """ + Test that annotations of an event are properly handled after the annotation is redacted. + + The redacted relation should not be included in bundled aggregations or + the response to relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEqual(200, channel.code, channel.json_body) @@ -1296,24 +1354,97 @@ def test_redact_relation_annotation(self) -> None: RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) self.assertEqual(200, channel.code, channel.json_body) + unredacted_event_id = channel.json_body["event_id"] + + # Both relations should exist. + event_ids, relations = self._make_relation_requests() + self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]}, + ) + + # Both relations appear in the aggregation. + chunk = self._get_aggregations() + self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 2}]) # Redact one of the reactions. self._redact(to_redact_event_id) - # Ensure that the aggregations are correct. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(event_ids, [unredacted_event_id]) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + ) + + # The unredacted aggregation should still exist. + chunk = self._get_aggregations() + self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}]) + + @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + def test_redact_relation_thread(self) -> None: + """ + Test that thread replies are properly handled after the thread reply redacted. + + The redacted event should not be included in bundled aggregations or + the response to relations. + """ + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 1", "msgtype": "m.text"}, ) self.assertEqual(200, channel.code, channel.json_body) + unredacted_event_id = channel.json_body["event_id"] + # Note that the *last* event in the thread is redacted, as that gets + # included in the bundled aggregation. + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 2", "msgtype": "m.text"}, + ) + self.assertEqual(200, channel.code, channel.json_body) + to_redact_event_id = channel.json_body["event_id"] + + # Both relations exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) + self.assertDictContainsSubset( + { + "count": 2, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + # And the latest event returned is the event that will be redacted. self.assertEqual( - channel.json_body, - {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + to_redact_event_id, ) - def test_redact_relation_edit(self) -> None: + # Redact one of the reactions. + self._redact(to_redact_event_id) + + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(event_ids, [unredacted_event_id]) + self.assertDictContainsSubset( + { + "count": 1, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + # And the latest event is now the unredacted event. + self.assertEqual( + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + unredacted_event_id, + ) + + def test_redact_parent_edit(self) -> None: """Test that edits of an event are redacted when the original event is redacted. """ @@ -1331,34 +1462,19 @@ def test_redact_relation_edit(self) -> None: self.assertEqual(200, channel.code, channel.json_body) # Check the relation is returned - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}/m.replace/m.room.message", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertIn("chunk", channel.json_body) - self.assertEqual(len(channel.json_body["chunk"]), 1) + event_ids, relations = self._make_relation_requests() + self.assertEqual(len(event_ids), 1) + self.assertIn(RelationTypes.REPLACE, relations) # Redact the original event self._redact(self.parent_id) - # Try to check for remaining m.replace relations - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}/m.replace/m.room.message", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Check that no relations are returned - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # The relations are not returned. + event_ids, relations = self._make_relation_requests() + self.assertEqual(len(event_ids), 0) + self.assertEqual(relations, {}) - def test_redact_parent(self) -> None: + def test_redact_parent_annotation(self) -> None: """Test that annotations of an event are redacted when the original event is redacted. """ @@ -1366,16 +1482,23 @@ def test_redact_parent(self) -> None: channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) + # The relations should exist. + event_ids, relations = self._make_relation_requests() + self.assertEqual(len(event_ids), 1) + self.assertIn(RelationTypes.ANNOTATION, relations) + + # The aggregation should exist. + chunk = self._get_aggregations() + self.assertEqual(chunk, [{"type": "m.reaction", "key": "👍", "count": 1}]) + # Redact the original event. self._redact(self.parent_id) - # Check that aggregations returns zero - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}/m.annotation/m.reaction", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + # The relations are not returned. + event_ids, relations = self._make_relation_requests() + self.assertEqual(event_ids, []) + self.assertEqual(relations, {}) - self.assertIn("chunk", channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # There's nothing to aggregate. + chunk = self._get_aggregations() + self.assertEqual(chunk, []) From 26211fec24d8d0a967de33147e148166359ec8cb Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 7 Mar 2022 09:44:33 -0800 Subject: [PATCH 362/474] Fix a bug in background updates wherein background updates are never run using the default batch size (#12157) --- changelog.d/12157.bugfix | 1 + synapse/storage/background_updates.py | 8 +++++--- tests/rest/admin/test_background_updates.py | 18 ++++++++---------- tests/storage/test_background_update.py | 4 ++-- 4 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/12157.bugfix diff --git a/changelog.d/12157.bugfix b/changelog.d/12157.bugfix new file mode 100644 index 000000000000..c3d2e700bb1d --- /dev/null +++ b/changelog.d/12157.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index d64910aded33..4acc2c997dce 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -102,10 +102,12 @@ def average_items_per_ms(self) -> Optional[float]: Returns: A duration in ms as a float """ - if self.avg_duration_ms == 0: - return 0 - elif self.total_item_count == 0: + # We want to return None if this is the first background update item + if self.total_item_count == 0: return None + # Avoid dividing by zero + elif self.avg_duration_ms == 0: + return 0 else: # Use the exponential moving average so that we can adapt to # changes in how long the update process takes. diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index fb36aa994090..becec84524cb 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -155,10 +155,10 @@ def test_status_bg_update(self) -> None: "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE + BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE ), } }, @@ -210,10 +210,10 @@ def test_enabled(self) -> None: "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE + BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE ), } }, @@ -239,10 +239,10 @@ def test_enabled(self) -> None: "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE + BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE ), } }, @@ -278,11 +278,9 @@ def test_enabled(self) -> None: "current_updates": { "master": { "name": "test_update", - "average_items_per_ms": 0.001, + "average_items_per_ms": 0.05263157894736842, "total_duration_ms": 2000.0, - "total_item_count": ( - 2 * BackgroundUpdater.MINIMUM_BACKGROUND_BATCH_SIZE - ), + "total_item_count": (110), } }, "enabled": True, diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 39dcc094bd8b..9fdf54ea31f9 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -66,13 +66,13 @@ async def update(progress, count): self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), - by=0.01, + by=0.02, ) self.assertFalse(res) # on the first call, we should get run with the default background update size self.update_handler.assert_called_once_with( - {"my_key": 1}, self.updates.MINIMUM_BACKGROUND_BATCH_SIZE + {"my_key": 1}, self.updates.DEFAULT_BACKGROUND_BATCH_SIZE ) # second step: complete the update From 2eef234ae367657d4fe5cb0bef6bda67e97b7e4d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 8 Mar 2022 10:47:28 +0000 Subject: [PATCH 363/474] Fix a bug introduced in 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. (#12177) * Add failing test to characterise the regression #12176 * Permit pre-release versions of specified packages * Newsfile (bugfix) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/12177.bugfix | 1 + synapse/util/check_dependencies.py | 3 ++- tests/util/test_check_dependencies.py | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12177.bugfix diff --git a/changelog.d/12177.bugfix b/changelog.d/12177.bugfix new file mode 100644 index 000000000000..3f2852f345b9 --- /dev/null +++ b/changelog.d/12177.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 39b0a91db3e8..12cd8049392f 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -163,7 +163,8 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled.append(requirement.name) errors.append(_not_installed(requirement, extra)) else: - if not requirement.specifier.contains(dist.version): + # We specify prereleases=True to allow prereleases such as RCs. + if not requirement.specifier.contains(dist.version, prereleases=True): deps_unfulfilled.append(requirement.name) errors.append(_incorrect_version(requirement, dist.version, extra)) diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index a91c33272f36..38e9f58ac6ca 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -27,7 +27,9 @@ def read_text(self, filename): old = DummyDistribution("0.1.2") +old_release_candidate = DummyDistribution("0.1.2rc3") new = DummyDistribution("1.2.3") +new_release_candidate = DummyDistribution("1.2.3rc4") # could probably use stdlib TestCase --- no need for twisted here @@ -110,3 +112,20 @@ def test_check_for_extra_dependencies(self) -> None: with self.mock_installed_package(new): # should not raise check_requirements("cool-extra") + + def test_release_candidates_satisfy_dependency(self) -> None: + """ + Tests that release candidates count as far as satisfying a dependency + is concerned. + (Regression test, see #12176.) + """ + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1"], + ): + with self.mock_installed_package(old_release_candidate): + self.assertRaises(DependencyException, check_requirements) + + with self.mock_installed_package(new_release_candidate): + # should not raise + check_requirements() From ea992adf867c0c74dccfd6a40e0f73933ccf2899 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 10:55:26 +0000 Subject: [PATCH 364/474] 1.54.0 --- CHANGES.md | 18 ++++++++++++++++++ changelog.d/12127.misc | 1 - changelog.d/12129.misc | 1 - changelog.d/12141.bugfix | 1 - changelog.d/12166.misc | 1 - changelog.d/12177.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 8 files changed, 25 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/12127.misc delete mode 100644 changelog.d/12129.misc delete mode 100644 changelog.d/12141.bugfix delete mode 100644 changelog.d/12166.misc delete mode 100644 changelog.d/12177.bugfix diff --git a/CHANGES.md b/CHANGES.md index 0a87f5cd42a7..9d27cd35aa5c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,21 @@ +Synapse 1.54.0 (2022-03-08) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141)) +- Fix a bug introduced in Synapse 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. ([\#12177](https://github.com/matrix-org/synapse/issues/12177)) + + +Internal Changes +---------------- + +- Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) +- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12129](https://github.com/matrix-org/synapse/issues/12129)) +- Relax the version guard for "packaging" added in #12088. ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) + + Synapse 1.54.0rc1 (2022-03-02) ============================== diff --git a/changelog.d/12127.misc b/changelog.d/12127.misc deleted file mode 100644 index e42eca63a873..000000000000 --- a/changelog.d/12127.misc +++ /dev/null @@ -1 +0,0 @@ -Update release script to insert the previous version when writing "No significant changes" line in the changelog. diff --git a/changelog.d/12129.misc b/changelog.d/12129.misc deleted file mode 100644 index ce4213650c5e..000000000000 --- a/changelog.d/12129.misc +++ /dev/null @@ -1 +0,0 @@ -Inspect application dependencies using `importlib.metadata` or its backport. \ No newline at end of file diff --git a/changelog.d/12141.bugfix b/changelog.d/12141.bugfix deleted file mode 100644 index 03a2507e2ce6..000000000000 --- a/changelog.d/12141.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. diff --git a/changelog.d/12166.misc b/changelog.d/12166.misc deleted file mode 100644 index 24b4a7c7def7..000000000000 --- a/changelog.d/12166.misc +++ /dev/null @@ -1 +0,0 @@ -Relax the version guard for "packaging" added in #12088. diff --git a/changelog.d/12177.bugfix b/changelog.d/12177.bugfix deleted file mode 100644 index 3f2852f345b9..000000000000 --- a/changelog.d/12177.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index df3db85b8e77..02136a0d606f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.54.0) stable; urgency=medium + + * New synapse release 1.54.0. + + -- Synapse Packaging team Tue, 08 Mar 2022 10:54:52 +0000 + matrix-synapse-py3 (1.54.0~rc1) stable; urgency=medium * New synapse release 1.54.0~rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index b21e1ed0f342..c6727024f026 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.54.0rc1" +__version__ = "1.54.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 094802e04e6dc174040e2b050419df124cf2ba00 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 10:58:10 +0000 Subject: [PATCH 365/474] Shift up warning about Mjolnir --- CHANGES.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 9d27cd35aa5c..d9ea9fb46c68 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,10 @@ Synapse 1.54.0 (2022-03-08) =========================== +Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. +Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. + + Bugfixes -------- @@ -19,9 +23,6 @@ Internal Changes Synapse 1.54.0rc1 (2022-03-02) ============================== -Please note that this will be the last release of Synapse that is compatible with Mjolnir 1.3.1 and earlier. -Administrators of servers which have the Mjolnir module installed are advised to upgrade Mjolnir to version 1.3.2 or later. - Features -------- From d8bab6793c75774db4bde8aeec6897b607e08799 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 07:26:05 -0500 Subject: [PATCH 366/474] Fix incorrect type hints for txredis. (#12042) Some properties were marked as RedisProtocol instead of ConnectionHandler, which wraps RedisProtocol instance(s). --- changelog.d/12042.misc | 1 + stubs/txredisapi.pyi | 9 ++++++--- synapse/replication/tcp/external_cache.py | 4 ++-- synapse/replication/tcp/redis.py | 6 +++--- synapse/server.py | 4 ++-- 5 files changed, 14 insertions(+), 10 deletions(-) create mode 100644 changelog.d/12042.misc diff --git a/changelog.d/12042.misc b/changelog.d/12042.misc new file mode 100644 index 000000000000..6ecdc960210c --- /dev/null +++ b/changelog.d/12042.misc @@ -0,0 +1 @@ +Correct type hints for txredis. diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 429234d7ae7f..2d8ca018fbfc 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -20,7 +20,7 @@ from twisted.internet import protocol from twisted.internet.defer import Deferred class RedisProtocol(protocol.Protocol): - def publish(self, channel: str, message: bytes): ... + def publish(self, channel: str, message: bytes) -> "Deferred[None]": ... def ping(self) -> "Deferred[None]": ... def set( self, @@ -52,11 +52,14 @@ def lazyConnection( convertNumbers: bool = ..., ) -> RedisProtocol: ... -class ConnectionHandler: ... +# ConnectionHandler doesn't actually inherit from RedisProtocol, but it proxies +# most methods to it via ConnectionHandler.__getattr__. +class ConnectionHandler(RedisProtocol): + def disconnect(self) -> "Deferred[None]": ... class RedisFactory(protocol.ReconnectingClientFactory): continueTrying: bool - handler: RedisProtocol + handler: ConnectionHandler pool: List[RedisProtocol] replyTimeout: Optional[int] def __init__( diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index aaf91e5e0253..bf7d017968f9 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -21,7 +21,7 @@ from synapse.util import json_decoder, json_encoder if TYPE_CHECKING: - from txredisapi import RedisProtocol + from txredisapi import ConnectionHandler from synapse.server import HomeServer @@ -63,7 +63,7 @@ class ExternalCache: def __init__(self, hs: "HomeServer"): if hs.config.redis.redis_enabled: self._redis_connection: Optional[ - "RedisProtocol" + "ConnectionHandler" ] = hs.get_outbound_redis_connection() else: self._redis_connection = None diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 3170f7c59b04..b84e572da136 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -93,7 +93,7 @@ class RedisSubscriber(txredisapi.SubscriberProtocol): synapse_handler: "ReplicationCommandHandler" synapse_stream_name: str - synapse_outbound_redis_connection: txredisapi.RedisProtocol + synapse_outbound_redis_connection: txredisapi.ConnectionHandler def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) @@ -313,7 +313,7 @@ class RedisDirectTcpReplicationClientFactory(SynapseRedisFactory): protocol = RedisSubscriber def __init__( - self, hs: "HomeServer", outbound_redis_connection: txredisapi.RedisProtocol + self, hs: "HomeServer", outbound_redis_connection: txredisapi.ConnectionHandler ): super().__init__( @@ -353,7 +353,7 @@ def lazyConnection( reconnect: bool = True, password: Optional[str] = None, replyTimeout: int = 30, -) -> txredisapi.RedisProtocol: +) -> txredisapi.ConnectionHandler: """Creates a connection to Redis that is lazily set up and reconnects if the connections is lost. """ diff --git a/synapse/server.py b/synapse/server.py index b5e2a319bcef..46a64418ea0c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -145,7 +145,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: - from txredisapi import RedisProtocol + from txredisapi import ConnectionHandler from synapse.handlers.oidc import OidcHandler from synapse.handlers.saml import SamlHandler @@ -807,7 +807,7 @@ def get_account_handler(self) -> AccountHandler: return AccountHandler(self) @cache_in_self - def get_outbound_redis_connection(self) -> "RedisProtocol": + def get_outbound_redis_connection(self) -> "ConnectionHandler": """ The Redis connection used for replication. From ca9234a9eba4fba02d8d50e5d5eff079bfaf0ebd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 08:09:11 -0500 Subject: [PATCH 367/474] Do not return allowed_room_ids from /hierarchy response. (#12175) This field is only to be used in the Server-Server API, and not the Client-Server API, but was being leaked when a federation response was used in the /hierarchy API. --- changelog.d/12175.bugfix | 1 + synapse/handlers/room_summary.py | 15 +++++++++++++-- tests/handlers/test_room_summary.py | 3 +++ 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12175.bugfix diff --git a/changelog.d/12175.bugfix b/changelog.d/12175.bugfix new file mode 100644 index 000000000000..881cb9b76c20 --- /dev/null +++ b/changelog.d/12175.bugfix @@ -0,0 +1 @@ +Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 3979cbba71bd..486145f48aca 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -295,7 +295,7 @@ async def _get_room_hierarchy( # inaccessible to the requesting user. if room_entry: # Add the room (including the stripped m.space.child events). - rooms_result.append(room_entry.as_json()) + rooms_result.append(room_entry.as_json(for_client=True)) # If this room is not at the max-depth, check if there are any # children to process. @@ -843,14 +843,25 @@ class _RoomEntry: # This may not include all children. children_state_events: Sequence[JsonDict] = () - def as_json(self) -> JsonDict: + def as_json(self, for_client: bool = False) -> JsonDict: """ Returns a JSON dictionary suitable for the room hierarchy endpoint. It returns the room summary including the stripped m.space.child events as a sub-key. + + Args: + for_client: If true, any server-server only fields are stripped from + the result. + """ result = dict(self.room) + + # Before returning to the client, remove the allowed_room_ids key, if it + # exists. + if for_client: + result.pop("allowed_room_ids", False) + result["children_state"] = self.children_state_events return result diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index cff07a8973b3..d37292ce138e 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -172,6 +172,9 @@ def _assert_hierarchy( result_room_ids = [] result_children_ids = [] for result_room in result["rooms"]: + # Ensure federation results are not leaking over the client-server API. + self.assertNotIn("allowed_room_ids", result_room) + result_room_ids.append(result_room["room_id"]) result_children_ids.append( [ From 2ce27a24fe29104ca54e0a879c7ad37d88a3fc69 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 8 Mar 2022 13:23:18 +0000 Subject: [PATCH 368/474] Add experimental environment variable to enable asyncio reactor (#12135) --- changelog.d/12135.feature | 1 + mypy.ini | 3 +++ synapse/__init__.py | 21 +++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 changelog.d/12135.feature diff --git a/changelog.d/12135.feature b/changelog.d/12135.feature new file mode 100644 index 000000000000..b337f51730e6 --- /dev/null +++ b/changelog.d/12135.feature @@ -0,0 +1 @@ +Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. diff --git a/mypy.ini b/mypy.ini index 481e8a5366b0..c8390ddba96d 100644 --- a/mypy.ini +++ b/mypy.ini @@ -353,3 +353,6 @@ ignore_missing_imports = True [mypy-zope] ignore_missing_imports = True + +[mypy-incremental.*] +ignore_missing_imports = True diff --git a/synapse/__init__.py b/synapse/__init__.py index b21e1ed0f342..674acc713503 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -25,6 +25,27 @@ print("Synapse requires Python 3.7 or above.") sys.exit(1) +# Allow using the asyncio reactor via env var. +if bool(os.environ.get("SYNAPSE_ASYNC_IO_REACTOR", False)): + try: + from incremental import Version + + import twisted + + # We need a bugfix that is included in Twisted 21.2.0: + # https://twistedmatrix.com/trac/ticket/9787 + if twisted.version < Version("Twisted", 21, 2, 0): + print("Using asyncio reactor requires Twisted>=21.2.0") + sys.exit(1) + + import asyncio + + from twisted.internet import asyncioreactor + + asyncioreactor.install(asyncio.get_event_loop()) + except ImportError: + pass + # Twisted and canonicaljson will fail to import when this file is executed to # get the __version__ during a fresh install. That's OK and subsequent calls to # actually start Synapse will import these libraries fine. From 65e02b3e6d286a3f04d71286395523d9b2feeee9 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 14:00:16 +0000 Subject: [PATCH 369/474] Tweak changelog formatting --- CHANGES.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d9ea9fb46c68..1c8a52cf1ff6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,15 +9,14 @@ Bugfixes -------- - Fix a bug introduced in Synapse 1.54.0rc1 preventing the new module callbacks introduced in this release from being registered by modules. ([\#12141](https://github.com/matrix-org/synapse/issues/12141)) -- Fix a bug introduced in Synapse 1.54.0rc1 which meant that Synapse would refuse to start if pre-release versions of dependencies were installed. ([\#12177](https://github.com/matrix-org/synapse/issues/12177)) +- Fix a bug introduced in Synapse 1.54.0rc1 where runtime dependency version checks would mistakenly check development dependencies if they were present and would not accept pre-release versions of dependencies. ([\#12129](https://github.com/matrix-org/synapse/issues/12129), [\#12177](https://github.com/matrix-org/synapse/issues/12177)) Internal Changes ---------------- - Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) -- Inspect application dependencies using `importlib.metadata` or its backport. ([\#12129](https://github.com/matrix-org/synapse/issues/12129)) -- Relax the version guard for "packaging" added in #12088. ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) +- Relax the version guard for "packaging" added in ([\#12088](https://github.com/matrix-org/synapse/issues/12088)). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) Synapse 1.54.0rc1 (2022-03-02) From b1989ced00cc0bc6214bfd1a393c7e8f8eda660c Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 8 Mar 2022 14:01:19 +0000 Subject: [PATCH 370/474] Fix silly markdown typo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 1c8a52cf1ff6..ef671e73f178 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -16,7 +16,7 @@ Internal Changes ---------------- - Update release script to insert the previous version when writing "No significant changes" line in the changelog. ([\#12127](https://github.com/matrix-org/synapse/issues/12127)) -- Relax the version guard for "packaging" added in ([\#12088](https://github.com/matrix-org/synapse/issues/12088)). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) +- Relax the version guard for "packaging" added in [\#12088](https://github.com/matrix-org/synapse/issues/12088). ([\#12166](https://github.com/matrix-org/synapse/issues/12166)) Synapse 1.54.0rc1 (2022-03-02) From bfa7d6b03588ec3eca488f404d8fcac38f9e6427 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 8 Mar 2022 15:11:50 +0000 Subject: [PATCH 371/474] Fix CI not attaching source distributions and wheels to the GitHub releases. (#12131) --- .github/workflows/release-artifacts.yml | 3 ++- changelog.d/12131.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12131.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 65ea761ad713..ed4fc6179db3 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -112,7 +112,8 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: files: | - python-dist/* + Sdist/* + Wheel/* debs.tar.xz # if it's not already published, keep the release as a draft. draft: true diff --git a/changelog.d/12131.misc b/changelog.d/12131.misc new file mode 100644 index 000000000000..8ef23c22d524 --- /dev/null +++ b/changelog.d/12131.misc @@ -0,0 +1 @@ +Fix CI not attaching source distributions and wheels to the GitHub releases. \ No newline at end of file From 562718278847375636ead2ed3afcc9d9d482ef96 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 8 Mar 2022 15:58:14 +0000 Subject: [PATCH 372/474] Use `ParamSpec` in type hints for `synapse.logging.context` (#12150) Signed-off-by: Sean Quah --- changelog.d/12150.misc | 1 + synapse/handlers/initial_sync.py | 5 +-- synapse/logging/context.py | 44 ++++++++++++----------- synapse/python_dependencies.py | 3 +- synapse/rest/media/v1/storage_provider.py | 9 +++-- 5 files changed, 37 insertions(+), 25 deletions(-) create mode 100644 changelog.d/12150.misc diff --git a/changelog.d/12150.misc b/changelog.d/12150.misc new file mode 100644 index 000000000000..2d2706dac769 --- /dev/null +++ b/changelog.d/12150.misc @@ -0,0 +1 @@ +Use `ParamSpec` in type hints for `synapse.logging.context`. diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 316cfae24ff0..a7db8feb57eb 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -153,8 +153,9 @@ async def _snapshot_all_rooms( public_room_ids = await self.store.get_public_room_ids() - limit = pagin_config.limit - if limit is None: + if pagin_config.limit is not None: + limit = pagin_config.limit + else: limit = 10 serializer_options = SerializeEventConfig(as_client_event=as_client_event) diff --git a/synapse/logging/context.py b/synapse/logging/context.py index c31c2960ad95..88cd8a9e1c39 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -29,7 +29,6 @@ from types import TracebackType from typing import ( TYPE_CHECKING, - Any, Awaitable, Callable, Optional, @@ -41,7 +40,7 @@ ) import attr -from typing_extensions import Literal +from typing_extensions import Literal, ParamSpec from twisted.internet import defer, threads from twisted.python.threadpool import ThreadPool @@ -719,32 +718,33 @@ def nested_logging_context(suffix: str) -> LoggingContext: ) +P = ParamSpec("P") R = TypeVar("R") @overload def preserve_fn( # type: ignore[misc] - f: Callable[..., Awaitable[R]], -) -> Callable[..., "defer.Deferred[R]"]: + f: Callable[P, Awaitable[R]], +) -> Callable[P, "defer.Deferred[R]"]: # The `type: ignore[misc]` above suppresses # "Overloaded function signatures 1 and 2 overlap with incompatible return types" ... @overload -def preserve_fn(f: Callable[..., R]) -> Callable[..., "defer.Deferred[R]"]: +def preserve_fn(f: Callable[P, R]) -> Callable[P, "defer.Deferred[R]"]: ... def preserve_fn( f: Union[ - Callable[..., R], - Callable[..., Awaitable[R]], + Callable[P, R], + Callable[P, Awaitable[R]], ] -) -> Callable[..., "defer.Deferred[R]"]: +) -> Callable[P, "defer.Deferred[R]"]: """Function decorator which wraps the function with run_in_background""" - def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]": + def g(*args: P.args, **kwargs: P.kwargs) -> "defer.Deferred[R]": return run_in_background(f, *args, **kwargs) return g @@ -752,7 +752,7 @@ def g(*args: Any, **kwargs: Any) -> "defer.Deferred[R]": @overload def run_in_background( # type: ignore[misc] - f: Callable[..., Awaitable[R]], *args: Any, **kwargs: Any + f: Callable[P, Awaitable[R]], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": # The `type: ignore[misc]` above suppresses # "Overloaded function signatures 1 and 2 overlap with incompatible return types" @@ -761,18 +761,22 @@ def run_in_background( # type: ignore[misc] @overload def run_in_background( - f: Callable[..., R], *args: Any, **kwargs: Any + f: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": ... -def run_in_background( +def run_in_background( # type: ignore[misc] + # The `type: ignore[misc]` above suppresses + # "Overloaded function implementation does not accept all possible arguments of signature 1" + # "Overloaded function implementation does not accept all possible arguments of signature 2" + # which seems like a bug in mypy. f: Union[ - Callable[..., R], - Callable[..., Awaitable[R]], + Callable[P, R], + Callable[P, Awaitable[R]], ], - *args: Any, - **kwargs: Any, + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[R]": """Calls a function, ensuring that the current context is restored after return from the function, and that the sentinel context is set once the @@ -872,7 +876,7 @@ def _set_context_cb(result: ResultT, context: LoggingContext) -> ResultT: def defer_to_thread( - reactor: "ISynapseReactor", f: Callable[..., R], *args: Any, **kwargs: Any + reactor: "ISynapseReactor", f: Callable[P, R], *args: P.args, **kwargs: P.kwargs ) -> "defer.Deferred[R]": """ Calls the function `f` using a thread from the reactor's default threadpool and @@ -908,9 +912,9 @@ def defer_to_thread( def defer_to_threadpool( reactor: "ISynapseReactor", threadpool: ThreadPool, - f: Callable[..., R], - *args: Any, - **kwargs: Any, + f: Callable[P, R], + *args: P.args, + **kwargs: P.kwargs, ) -> "defer.Deferred[R]": """ A wrapper for twisted.internet.threads.deferToThreadpool, which handles diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index b40a7bbb76ca..1dd39f06cffb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -76,7 +76,8 @@ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", - "typing-extensions>=3.7.4", + # We use `ParamSpec`, which was added in `typing-extensions` 3.10.0.0. + "typing-extensions>=3.10.0", # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. "cryptography>=3.4.7", diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 18bf977d3d9f..1c9b71d69c77 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -16,7 +16,7 @@ import logging import os import shutil -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Callable, Optional from synapse.config._base import Config from synapse.logging.context import defer_to_thread, run_in_background @@ -150,8 +150,13 @@ async def store_file(self, path: str, file_info: FileInfo) -> None: dirname = os.path.dirname(backup_fname) os.makedirs(dirname, exist_ok=True) + # mypy needs help inferring the type of the second parameter, which is generic + shutil_copyfile: Callable[[str, str], str] = shutil.copyfile await defer_to_thread( - self.hs.get_reactor(), shutil.copyfile, primary_fname, backup_fname + self.hs.get_reactor(), + shutil_copyfile, + primary_fname, + backup_fname, ) async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: From 9a0172d49f3da46c615304c7df3353494500fd49 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 15:02:59 -0500 Subject: [PATCH 373/474] Clean-up demo scripts & documentation (#12143) * Rewrites the demo documentation to be clearer, accurate, and moves it to our documentation tree. * Improvements to the demo scripts: * `clean.sh` now runs `stop.sh` first to avoid zombie processes. * Uses more modern Synapse configuration (and removes some obsolete configuration). * Consistently use the HTTP ports for server name, etc. * Remove the `demo/etc` directory and place everything into the `demo/808x` directories. --- README.rst | 3 ++ changelog.d/12143.doc | 1 + demo/.gitignore | 11 +++---- demo/README | 26 --------------- demo/clean.sh | 3 ++ demo/start.sh | 71 +++++++++++++++++++--------------------- docs/SUMMARY.md | 1 + docs/development/demo.md | 41 +++++++++++++++++++++++ docs/federate.md | 3 +- 9 files changed, 89 insertions(+), 71 deletions(-) create mode 100644 changelog.d/12143.doc delete mode 100644 demo/README create mode 100644 docs/development/demo.md diff --git a/README.rst b/README.rst index 4281c87d1f80..595fb5ff62a6 100644 --- a/README.rst +++ b/README.rst @@ -312,6 +312,9 @@ We recommend using the demo which starts 3 federated instances running on ports (to stop, you can use `./demo/stop.sh`) +See the [demo documentation](https://matrix-org.github.io/synapse/develop/development/demo.html) +for more information. + If you just want to start a single instance of the app and run it directly:: # Create the homeserver.yaml config once diff --git a/changelog.d/12143.doc b/changelog.d/12143.doc new file mode 100644 index 000000000000..4b9db74b1fc9 --- /dev/null +++ b/changelog.d/12143.doc @@ -0,0 +1 @@ +Improve documentation for demo scripts. diff --git a/demo/.gitignore b/demo/.gitignore index 4d1271234312..5663aba8e758 100644 --- a/demo/.gitignore +++ b/demo/.gitignore @@ -1,7 +1,4 @@ -*.db -*.log -*.log.* -*.pid - -/media_store.* -/etc +# Ignore all the temporary files from the demo servers. +8080/ +8081/ +8082/ diff --git a/demo/README b/demo/README deleted file mode 100644 index a5a95bd19666..000000000000 --- a/demo/README +++ /dev/null @@ -1,26 +0,0 @@ -DO NOT USE THESE DEMO SERVERS IN PRODUCTION - -Requires you to have done: - python setup.py develop - - -The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required. - -To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs -and are configured in a highly insecure way. Do not use these configuration files in production. - -stop.sh will stop the synapse servers and the webclient. - -clean.sh will delete the databases and log files. - -To start a completely new set of servers, run: - - ./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh - - -Logs and sqlitedb will be stored in demo/808{0,1,2}.{log,db} - - - -Also note that when joining a public room on a different HS via "#foo:bar.net", then you are (in the current impl) joining a room with room_id "foo". This means that it won't work if your HS already has a room with that name. - diff --git a/demo/clean.sh b/demo/clean.sh index e9b440d90dfd..7f1e1920215a 100755 --- a/demo/clean.sh +++ b/demo/clean.sh @@ -4,6 +4,9 @@ set -e DIR="$( cd "$( dirname "$0" )" && pwd )" +# Ensure that the servers are stopped. +$DIR/stop.sh + PID_FILE="$DIR/servers.pid" if [ -f "$PID_FILE" ]; then diff --git a/demo/start.sh b/demo/start.sh index 8ffb14e30add..55e69685e3c2 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -6,8 +6,6 @@ CWD=$(pwd) cd "$DIR/.." || exit -mkdir -p demo/etc - PYTHONPATH=$(readlink -f "$(pwd)") export PYTHONPATH @@ -21,22 +19,26 @@ for port in 8080 8081 8082; do mkdir -p demo/$port pushd demo/$port || exit - #rm $DIR/etc/$port.config + # Generate the configuration for the homeserver at localhost:848x. python3 -m synapse.app.homeserver \ --generate-config \ - -H "localhost:$https_port" \ - --config-path "$DIR/etc/$port.config" \ + --server-name "localhost:$port" \ + --config-path "$port.config" \ --report-stats no - if ! grep -F "Customisation made by demo/start.sh" -q "$DIR/etc/$port.config"; then - # Generate tls keys - openssl req -x509 -newkey rsa:4096 -keyout "$DIR/etc/localhost:$https_port.tls.key" -out "$DIR/etc/localhost:$https_port.tls.crt" -days 365 -nodes -subj "/O=matrix" + if ! grep -F "Customisation made by demo/start.sh" -q "$port.config"; then + # Generate TLS keys. + openssl req -x509 -newkey rsa:4096 \ + -keyout "localhost:$port.tls.key" \ + -out "localhost:$port.tls.crt" \ + -days 365 -nodes -subj "/O=matrix" - # Regenerate configuration + # Add customisations to the configuration. { - printf '\n\n# Customisation made by demo/start.sh\n' + printf '\n\n# Customisation made by demo/start.sh\n\n' echo "public_baseurl: http://localhost:$port/" echo 'enable_registration: true' + echo '' # Warning, this heredoc depends on the interaction of tabs and spaces. # Please don't accidentaly bork me with your fancy settings. @@ -63,38 +65,34 @@ for port in 8080 8081 8082; do echo "${listeners}" - # Disable tls for the servers - printf '\n\n# Disable tls on the servers.' + # Disable TLS for the servers + printf '\n\n# Disable TLS for the servers.' echo '# DO NOT USE IN PRODUCTION' echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' echo 'federation_verify_certificates: false' - # Set tls paths - echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" - echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" + # Set paths for the TLS certificates. + echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" + echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' echo 'trusted_key_servers:' echo ' - server_name: "matrix.org"' echo ' accept_keys_insecurely: true' - - # Reduce the blacklist - blacklist=$(cat <<-BLACK - # Set the blacklist so that it doesn't include 127.0.0.1, ::1 - federation_ip_range_blacklist: - - '10.0.0.0/8' - - '172.16.0.0/12' - - '192.168.0.0/16' - - '100.64.0.0/10' - - '169.254.0.0/16' - - 'fe80::/64' - - 'fc00::/7' - BLACK + echo '' + + # Allow the servers to communicate over localhost. + allow_list=$(cat <<-ALLOW_LIST + # Allow the servers to communicate over localhost. + ip_range_whitelist: + - '127.0.0.1/8' + - '::1/128' + ALLOW_LIST ) - echo "${blacklist}" - } >> "$DIR/etc/$port.config" + echo "${allow_list}" + } >> "$port.config" fi # Check script parameters @@ -141,19 +139,18 @@ for port in 8080 8081 8082; do burst_count: 1000 RC ) - echo "${ratelimiting}" >> "$DIR/etc/$port.config" + echo "${ratelimiting}" >> "$port.config" fi fi - if ! grep -F "full_twisted_stacktraces" -q "$DIR/etc/$port.config"; then - echo "full_twisted_stacktraces: true" >> "$DIR/etc/$port.config" - fi - if ! grep -F "report_stats" -q "$DIR/etc/$port.config" ; then - echo "report_stats: false" >> "$DIR/etc/$port.config" + # Always disable reporting of stats if the option is not there. + if ! grep -F "report_stats" -q "$port.config" ; then + echo "report_stats: false" >> "$port.config" fi + # Run the homeserver in the background. python3 -m synapse.app.homeserver \ - --config-path "$DIR/etc/$port.config" \ + --config-path "$port.config" \ -D \ popd || exit diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ef9cabf55524..21f80efc9998 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -82,6 +82,7 @@ - [Release Cycle](development/releases.md) - [Git Usage](development/git.md) - [Testing]() + - [Demo scripts](development/demo.md) - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) diff --git a/docs/development/demo.md b/docs/development/demo.md new file mode 100644 index 000000000000..4277252ceb60 --- /dev/null +++ b/docs/development/demo.md @@ -0,0 +1,41 @@ +# Synapse demo setup + +**DO NOT USE THESE DEMO SERVERS IN PRODUCTION** + +Requires you to have a [Synapse development environment setup](https://matrix-org.github.io/synapse/develop/development/contributing_guide.html#4-install-the-dependencies). + +The demo setup allows running three federation Synapse servers, with server +names `localhost:8080`, `localhost:8081`, and `localhost:8082`. + +You can access them via any Matrix client over HTTP at `localhost:8080`, +`localhost:8081`, and `localhost:8082` or over HTTPS at `localhost:8480`, +`localhost:8481`, and `localhost:8482`. + +To enable the servers to communicate, self-signed SSL certificates are generated +and the servers are configured in a highly insecure way, including: + +* Not checking certificates over federation. +* Not verifying keys. + +The servers are configured to store their data under `demo/8080`, `demo/8081`, and +`demo/8082`. This includes configuration, logs, SQLite databases, and media. + +Note that when joining a public room on a different HS via "#foo:bar.net", then +you are (in the current impl) joining a room with room_id "foo". This means that +it won't work if your HS already has a room with that name. + +## Using the demo scripts + +There's three main scripts with straightforward purposes: + +* `start.sh` will start the Synapse servers, generating any missing configuration. + * This accepts a single parameter `--no-rate-limit` to "disable" rate limits + (they actually still exist, but are very high). +* `stop.sh` will stop the Synapse servers. +* `clean.sh` will delete the configuration, databases, log files, etc. + +To start a completely new set of servers, run: + +```sh +./demo/stop.sh; ./demo/clean.sh && ./demo/start.sh +``` diff --git a/docs/federate.md b/docs/federate.md index 5107f995be98..df4c87da51e2 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -63,4 +63,5 @@ release of Synapse. If you want to get up and running quickly with a trio of homeservers in a private federation, there is a script in the `demo` directory. This is mainly -useful just for development purposes. See [demo/README](https://github.com/matrix-org/synapse/tree/develop/demo/). +useful just for development purposes. See +[demo scripts](https://matrix-org.github.io/synapse/develop/development/demo.html). From dc8d825ef26714f610db9c286f2f2517db064b79 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 9 Mar 2022 11:00:48 +0000 Subject: [PATCH 374/474] Skip attempt to get state at backwards-extremities (#12173) We don't *have* the state at a backwards-extremity, so this is never going to do anything useful. --- changelog.d/12173.misc | 1 + synapse/handlers/federation.py | 60 ++-------------------------------- 2 files changed, 4 insertions(+), 57 deletions(-) create mode 100644 changelog.d/12173.misc diff --git a/changelog.d/12173.misc b/changelog.d/12173.misc new file mode 100644 index 000000000000..9f333e718a86 --- /dev/null +++ b/changelog.d/12173.misc @@ -0,0 +1 @@ +Avoid trying to calculate the state at outlier events. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index eb03a5accbac..db39aeabded6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -23,8 +23,6 @@ from signedjson.sign import verify_signed_json from unpaddedbase64 import decode_base64 -from twisted.internet import defer - from synapse import event_auth from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import ( @@ -45,11 +43,7 @@ from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict -from synapse.logging.context import ( - make_deferred_yieldable, - nested_logging_context, - preserve_fn, -) +from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, @@ -355,56 +349,8 @@ async def try_backfill(domains: List[str]) -> bool: if success: return True - # Huh, well *those* domains didn't work out. Lets try some domains - # from the time. - - tried_domains = set(likely_domains) - tried_domains.add(self.server_name) - - event_ids = list(extremities.keys()) - - logger.debug("calling resolve_state_groups in _maybe_backfill") - resolve = preserve_fn(self.state_handler.resolve_state_groups_for_events) - states_list = await make_deferred_yieldable( - defer.gatherResults( - [resolve(room_id, [e]) for e in event_ids], consumeErrors=True - ) - ) - - # A map from event_id to state map of event_ids. - state_ids: Dict[str, StateMap[str]] = dict( - zip(event_ids, [s.state for s in states_list]) - ) - - state_map = await self.store.get_events( - [e_id for ids in state_ids.values() for e_id in ids.values()], - get_prev_content=False, - ) - - # A map from event_id to state map of events. - state_events: Dict[str, StateMap[EventBase]] = { - key: { - k: state_map[e_id] - for k, e_id in state_dict.items() - if e_id in state_map - } - for key, state_dict in state_ids.items() - } - - for e_id in event_ids: - likely_extremeties_domains = get_domains_from_state(state_events[e_id]) - - success = await try_backfill( - [ - dom - for dom, _ in likely_extremeties_domains - if dom not in tried_domains - ] - ) - if success: - return True - - tried_domains.update(dom for dom, _ in likely_extremeties_domains) + # TODO: we could also try servers which were previously in the room, but + # are no longer. return False From 180d8ff0d4d706344fa984abbd9ed6fa02ca13dc Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Wed, 9 Mar 2022 14:53:28 +0000 Subject: [PATCH 375/474] Retry some http replication failures (#12182) This allows for the target process to be down for around a minute which provides time for restarts during synapse upgrades/config updates. Closes: #12178 Signed off by Nick Mills-Barrett nick@beeper.com --- changelog.d/12182.misc | 1 + synapse/replication/http/_base.py | 47 +++++++++++++++++++++++-------- 2 files changed, 37 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12182.misc diff --git a/changelog.d/12182.misc b/changelog.d/12182.misc new file mode 100644 index 000000000000..7e9ad2c75244 --- /dev/null +++ b/changelog.d/12182.misc @@ -0,0 +1 @@ +Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 2e697c74a6bb..f1abb986534b 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -21,6 +21,7 @@ from prometheus_client import Counter, Gauge +from twisted.internet.error import ConnectError, DNSLookupError from twisted.web.server import Request from synapse.api.errors import HttpResponseException, SynapseError @@ -87,6 +88,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): `_handle_request` must return a Deferred. RETRY_ON_TIMEOUT(bool): Whether or not to retry the request when a 504 is received. + RETRY_ON_CONNECT_ERROR (bool): Whether or not to retry the request when + a connection error is received. + RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when + receiving connection errors, each will backoff exponentially longer. """ NAME: str = abc.abstractproperty() # type: ignore @@ -94,6 +99,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): METHOD = "POST" CACHE = True RETRY_ON_TIMEOUT = True + RETRY_ON_CONNECT_ERROR = True + RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1) def __init__(self, hs: "HomeServer"): if self.CACHE: @@ -236,18 +243,20 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: "/".join(url_args), ) + headers: Dict[bytes, List[bytes]] = {} + # Add an authorization header, if configured. + if replication_secret: + headers[b"Authorization"] = [b"Bearer " + replication_secret] + opentracing.inject_header_dict(headers, check_destination=False) + try: + # Keep track of attempts made so we can bail if we don't manage to + # connect to the target after N tries. + attempts = 0 # We keep retrying the same request for timeouts. This is so that we # have a good idea that the request has either succeeded or failed # on the master, and so whether we should clean up or not. while True: - headers: Dict[bytes, List[bytes]] = {} - # Add an authorization header, if configured. - if replication_secret: - headers[b"Authorization"] = [ - b"Bearer " + replication_secret - ] - opentracing.inject_header_dict(headers, check_destination=False) try: result = await request_func(uri, data, headers=headers) break @@ -255,11 +264,27 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: if not cls.RETRY_ON_TIMEOUT: raise - logger.warning("%s request timed out; retrying", cls.NAME) + logger.warning("%s request timed out; retrying", cls.NAME) + + # If we timed out we probably don't need to worry about backing + # off too much, but lets just wait a little anyway. + await clock.sleep(1) + except (ConnectError, DNSLookupError) as e: + if not cls.RETRY_ON_CONNECT_ERROR: + raise + if attempts > cls.RETRY_ON_CONNECT_ERROR_ATTEMPTS: + raise + + delay = 2 ** attempts + logger.warning( + "%s request connection failed; retrying in %ds: %r", + cls.NAME, + delay, + e, + ) - # If we timed out we probably don't need to worry about backing - # off too much, but lets just wait a little anyway. - await clock.sleep(1) + await clock.sleep(delay) + attempts += 1 except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError # on the main process that we should send to the client. (And From 032688854babeea832cbb4f762fc70fe31e73cc6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 10:29:39 -0500 Subject: [PATCH 376/474] Remove some unused variables/parameters. (#12187) --- changelog.d/12187.misc | 1 + synapse/storage/databases/main/roommember.py | 14 +++++--------- 2 files changed, 6 insertions(+), 9 deletions(-) create mode 100644 changelog.d/12187.misc diff --git a/changelog.d/12187.misc b/changelog.d/12187.misc new file mode 100644 index 000000000000..c53e68faa508 --- /dev/null +++ b/changelog.d/12187.misc @@ -0,0 +1 @@ +Remove unused variables. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index e48ec5f495af..bef675b8453c 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -46,7 +46,7 @@ ProfileInfo, RoomsForUser, ) -from synapse.types import PersistedEventPosition, StateMap, get_domain_from_id +from synapse.types import PersistedEventPosition, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -273,7 +273,7 @@ def _get_room_summary_txn(txn): txn.execute(sql, (room_id,)) res = {} for count, membership in txn: - summary = res.setdefault(membership, MemberSummary([], count)) + res.setdefault(membership, MemberSummary([], count)) # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent @@ -839,18 +839,14 @@ async def get_joined_hosts(self, room_id: str, state_entry): with Measure(self._clock, "get_joined_hosts"): return await self._get_joined_hosts( - room_id, state_group, state_entry.state, state_entry=state_entry + room_id, state_group, state_entry=state_entry ) @cached(num_args=2, max_entries=10000, iterable=True) async def _get_joined_hosts( - self, - room_id: str, - state_group: int, - current_state_ids: StateMap[str], - state_entry: "_StateCacheEntry", + self, room_id: str, state_group: int, state_entry: "_StateCacheEntry" ) -> FrozenSet[str]: - # We don't use `state_group`, its there so that we can cache based on + # We don't use `state_group`, it's there so that we can cache based on # it. However, its important that its never None, since two # current_state's with a state_group of None are likely to be different. # From 690cb4f3b32938f5ced5590abe9429733040a129 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 13:07:41 -0500 Subject: [PATCH 377/474] Allow for ignoring some arguments when caching. (#12189) * `@cached` can now take an `uncached_args` which is an iterable of names to not use in the cache key. * Requires `@cached`, @cachedList` and `@lru_cache` to use keyword arguments for clarity. * Asserts that keyword-only arguments in cached functions are not accepted. (I tested this briefly and I don't believe this works properly.) --- changelog.d/12189.misc | 1 + .../storage/databases/main/events_worker.py | 4 +- synapse/util/caches/descriptors.py | 74 ++++++++++++---- tests/util/caches/test_descriptors.py | 84 ++++++++++++++++++- 4 files changed, 142 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12189.misc diff --git a/changelog.d/12189.misc b/changelog.d/12189.misc new file mode 100644 index 000000000000..015e808e63c7 --- /dev/null +++ b/changelog.d/12189.misc @@ -0,0 +1 @@ +Support skipping some arguments when generating cache keys. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 26784f755e40..59454a47dfdd 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1286,7 +1286,7 @@ async def have_seen_events( ) return {eid for ((_rid, eid), have_event) in res.items() if have_event} - @cachedList("have_seen_event", "keys") + @cachedList(cached_method_name="have_seen_event", list_name="keys") async def _have_seen_events_dict( self, keys: Iterable[Tuple[str, str]] ) -> Dict[Tuple[str, str], bool]: @@ -1954,7 +1954,7 @@ def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: get_event_id_for_timestamp_txn, ) - @cachedList("is_partial_state_event", list_name="event_ids") + @cachedList(cached_method_name="is_partial_state_event", list_name="event_ids") async def get_partial_state_events( self, event_ids: Collection[str] ) -> Dict[str, bool]: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1cdead02f14b..c3c5c16db96e 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -20,6 +20,7 @@ Any, Awaitable, Callable, + Collection, Dict, Generic, Hashable, @@ -69,6 +70,7 @@ def __init__( self, orig: Callable[..., Any], num_args: Optional[int], + uncached_args: Optional[Collection[str]] = None, cache_context: bool = False, ): self.orig = orig @@ -76,6 +78,13 @@ def __init__( arg_spec = inspect.getfullargspec(orig) all_args = arg_spec.args + # There's no reason that keyword-only arguments couldn't be supported, + # but right now they're buggy so do not allow them. + if arg_spec.kwonlyargs: + raise ValueError( + "_CacheDescriptorBase does not support keyword-only arguments." + ) + if "cache_context" in all_args: if not cache_context: raise ValueError( @@ -88,6 +97,9 @@ def __init__( " named `cache_context`" ) + if num_args is not None and uncached_args is not None: + raise ValueError("Cannot provide both num_args and uncached_args") + if num_args is None: num_args = len(all_args) - 1 if cache_context: @@ -105,6 +117,12 @@ def __init__( # list of the names of the args used as the cache key self.arg_names = all_args[1 : num_args + 1] + # If there are args to not cache on, filter them out (and fix the size of num_args). + if uncached_args is not None: + include_arg_in_cache_key = [n not in uncached_args for n in self.arg_names] + else: + include_arg_in_cache_key = [True] * len(self.arg_names) + # self.arg_defaults is a map of arg name to its default value for each # argument that has a default value if arg_spec.defaults: @@ -119,8 +137,8 @@ def __init__( self.add_cache_context = cache_context - self.cache_key_builder = get_cache_key_builder( - self.arg_names, self.arg_defaults + self.cache_key_builder = _get_cache_key_builder( + self.arg_names, include_arg_in_cache_key, self.arg_defaults ) @@ -130,8 +148,7 @@ class _LruCachedFunction(Generic[F]): def lru_cache( - max_entries: int = 1000, - cache_context: bool = False, + *, max_entries: int = 1000, cache_context: bool = False ) -> Callable[[F], _LruCachedFunction[F]]: """A method decorator that applies a memoizing cache around the function. @@ -186,7 +203,9 @@ def __init__( max_entries: int = 1000, cache_context: bool = False, ): - super().__init__(orig, num_args=None, cache_context=cache_context) + super().__init__( + orig, num_args=None, uncached_args=None, cache_context=cache_context + ) self.max_entries = max_entries def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: @@ -260,6 +279,9 @@ def foo(self, key, cache_context): num_args: number of positional arguments (excluding ``self`` and ``cache_context``) to use as cache keys. Defaults to all named args of the function. + uncached_args: a list of argument names to not use as the cache key. + (``self`` and ``cache_context`` are always ignored.) Cannot be used + with num_args. tree: cache_context: iterable: @@ -273,12 +295,18 @@ def __init__( orig: Callable[..., Any], max_entries: int = 1000, num_args: Optional[int] = None, + uncached_args: Optional[Collection[str]] = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, ): - super().__init__(orig, num_args=num_args, cache_context=cache_context) + super().__init__( + orig, + num_args=num_args, + uncached_args=uncached_args, + cache_context=cache_context, + ) if tree and self.num_args < 2: raise RuntimeError( @@ -369,7 +397,7 @@ def __init__( but including list_name) to use as cache keys. Defaults to all named args of the function. """ - super().__init__(orig, num_args=num_args) + super().__init__(orig, num_args=num_args, uncached_args=None) self.list_name = list_name @@ -530,8 +558,10 @@ def get_instance( def cached( + *, max_entries: int = 1000, num_args: Optional[int] = None, + uncached_args: Optional[Collection[str]] = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, @@ -541,6 +571,7 @@ def cached( orig, max_entries=max_entries, num_args=num_args, + uncached_args=uncached_args, tree=tree, cache_context=cache_context, iterable=iterable, @@ -551,7 +582,7 @@ def cached( def cachedList( - cached_method_name: str, list_name: str, num_args: Optional[int] = None + *, cached_method_name: str, list_name: str, num_args: Optional[int] = None ) -> Callable[[F], _CachedFunction[F]]: """Creates a descriptor that wraps a function in a `CacheListDescriptor`. @@ -590,13 +621,16 @@ def batch_do_something(self, first_arg, second_args): return cast(Callable[[F], _CachedFunction[F]], func) -def get_cache_key_builder( - param_names: Sequence[str], param_defaults: Mapping[str, Any] +def _get_cache_key_builder( + param_names: Sequence[str], + include_params: Sequence[bool], + param_defaults: Mapping[str, Any], ) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]: """Construct a function which will build cache keys suitable for a cached function Args: param_names: list of formal parameter names for the cached function + include_params: list of bools of whether to include the parameter name in the cache key param_defaults: a mapping from parameter name to default value for that param Returns: @@ -608,6 +642,7 @@ def get_cache_key_builder( if len(param_names) == 1: nm = param_names[0] + assert include_params[0] is True def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: if nm in kwargs: @@ -620,13 +655,18 @@ def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: else: def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: - return tuple(_get_cache_key_gen(param_names, param_defaults, args, kwargs)) + return tuple( + _get_cache_key_gen( + param_names, include_params, param_defaults, args, kwargs + ) + ) return get_cache_key def _get_cache_key_gen( param_names: Iterable[str], + include_params: Iterable[bool], param_defaults: Mapping[str, Any], args: Sequence[Any], kwargs: Mapping[str, Any], @@ -637,16 +677,18 @@ def _get_cache_key_gen( This is essentially the same operation as `inspect.getcallargs`, but optimised so that we don't need to inspect the target function for each call. """ - # We loop through each arg name, looking up if its in the `kwargs`, # otherwise using the next argument in `args`. If there are no more # args then we try looking the arg name up in the defaults. pos = 0 - for nm in param_names: + for nm, inc in zip(param_names, include_params): if nm in kwargs: - yield kwargs[nm] + if inc: + yield kwargs[nm] elif pos < len(args): - yield args[pos] + if inc: + yield args[pos] pos += 1 else: - yield param_defaults[nm] + if inc: + yield param_defaults[nm] diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 19741ffcdaf1..6a4b17527a7f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -141,6 +141,84 @@ def fn(self, arg1, arg2): self.assertEqual(r, "chips") obj.mock.assert_not_called() + @defer.inlineCallbacks + def test_cache_uncached_args(self): + """ + Only the arguments not named in uncached_args should matter to the cache + + Note that this is identical to test_cache_num_args, but provides the + arguments differently. + """ + + class Cls: + # Note that it is important that this is not the last argument to + # test behaviour of skipping arguments properly. + @descriptors.cached(uncached_args=("arg2",)) + def fn(self, arg1, arg2, arg3): + return self.mock(arg1, arg2, arg3) + + def __init__(self): + self.mock = mock.Mock() + + obj = Cls() + obj.mock.return_value = "fish" + r = yield obj.fn(1, 2, 3) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, 2, 3) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = yield obj.fn(2, 3, 4) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(2, 3, 4) + obj.mock.reset_mock() + + # the two values should now be cached; we should be able to vary + # the second argument and still get the cached result. + r = yield obj.fn(1, 4, 3) + self.assertEqual(r, "fish") + r = yield obj.fn(2, 5, 4) + self.assertEqual(r, "chips") + obj.mock.assert_not_called() + + @defer.inlineCallbacks + def test_cache_kwargs(self): + """Test that keyword arguments are treated properly""" + + class Cls: + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached() + def fn(self, arg1, kwarg1=2): + return self.mock(arg1, kwarg1=kwarg1) + + obj = Cls() + obj.mock.return_value = "fish" + r = yield obj.fn(1, kwarg1=2) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, kwarg1=2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = yield obj.fn(1, kwarg1=3) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(1, kwarg1=3) + obj.mock.reset_mock() + + # the values should now be cached. + r = yield obj.fn(1, kwarg1=2) + self.assertEqual(r, "fish") + # We should be able to not provide kwarg1 and get the cached value back. + r = yield obj.fn(1) + self.assertEqual(r, "fish") + # Keyword arguments can be in any order. + r = yield obj.fn(kwarg1=2, arg1=1) + self.assertEqual(r, "fish") + obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): """If the wrapped function throws synchronously, things should continue to work""" @@ -656,7 +734,7 @@ def __init__(self): def fn(self, arg1, arg2): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") async def list_fn(self, args1, arg2): assert current_context().name == "c1" # we want this to behave like an asynchronous function @@ -715,7 +793,7 @@ def __init__(self): def fn(self, arg1): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") def list_fn(self, args1) -> "Deferred[dict]": return self.mock(args1) @@ -758,7 +836,7 @@ def __init__(self): def fn(self, arg1, arg2): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") async def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function await run_on_reactor() From 15382b1afad65366df13c3b9040b6fdfb1eccfca Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 9 Mar 2022 18:23:57 +0000 Subject: [PATCH 378/474] Add third_party module callbacks to check if a user can delete a room and deactivate a user (#12028) * Add check_can_deactivate_user * Add check_can_shutdown_rooms * Documentation * callbacks, not functions * Various suggested tweaks * Add tests for test_check_can_shutdown_room and test_check_can_deactivate_user * Update check_can_deactivate_user to not take a Requester * Fix check_can_shutdown_room docs * Renegade and use `by_admin` instead of `admin_user_id` * fix lint * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier * Update docs/modules/third_party_rules_callbacks.md Co-authored-by: Brendan Abolivier Co-authored-by: Brendan Abolivier --- changelog.d/12028.feature | 1 + docs/modules/third_party_rules_callbacks.md | 43 +++++++ synapse/events/third_party_rules.py | 55 +++++++++ synapse/handlers/deactivate_account.py | 12 +- synapse/handlers/room.py | 8 ++ synapse/module_api/__init__.py | 6 + synapse/rest/admin/rooms.py | 9 ++ tests/rest/client/test_third_party_rules.py | 121 ++++++++++++++++++++ 8 files changed, 254 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12028.feature diff --git a/changelog.d/12028.feature b/changelog.d/12028.feature new file mode 100644 index 000000000000..5549c8f6fcf6 --- /dev/null +++ b/changelog.d/12028.feature @@ -0,0 +1 @@ +Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index 09ac838107b8..1d3c39967faa 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -148,6 +148,49 @@ deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#c If multiple modules implement this callback, Synapse runs them all in order. +### `check_can_shutdown_room` + +_First introduced in Synapse v1.55.0_ + +```python +async def check_can_shutdown_room( + user_id: str, room_id: str, +) -> bool: +``` + +Called when an admin user requests the shutdown of a room. The module must return a +boolean indicating whether the shutdown can go through. If the callback returns `False`, +the shutdown will not proceed and the caller will see a `M_FORBIDDEN` error. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + +### `check_can_deactivate_user` + +_First introduced in Synapse v1.55.0_ + +```python +async def check_can_deactivate_user( + user_id: str, by_admin: bool, +) -> bool: +``` + +Called when the deactivation of a user is requested. User deactivation can be +performed by an admin or the user themselves, so developers are encouraged to check the +requester when implementing this callback. The module must return a +boolean indicating whether the deactivation can go through. If the callback returns `False`, +the deactivation will not proceed and the caller will see a `M_FORBIDDEN` error. + +The module is passed two parameters, `user_id` which is the ID of the user being deactivated, and `by_admin` which is `True` if the request is made by a serve admin, and `False` otherwise. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `True`, Synapse falls through to the next one. The value of the first +callback that does not return `True` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. + + ### `on_profile_update` _First introduced in Synapse v1.54.0_ diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index ede72ee87631..bfca454f510d 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -38,6 +38,8 @@ [str, StateMap[EventBase], str], Awaitable[bool] ] ON_NEW_EVENT_CALLBACK = Callable[[EventBase, StateMap[EventBase]], Awaitable] +CHECK_CAN_SHUTDOWN_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +CHECK_CAN_DEACTIVATE_USER_CALLBACK = Callable[[str, bool], Awaitable[bool]] ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] @@ -157,6 +159,12 @@ def __init__(self, hs: "HomeServer"): CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = [] self._on_new_event_callbacks: List[ON_NEW_EVENT_CALLBACK] = [] + self._check_can_shutdown_room_callbacks: List[ + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK + ] = [] + self._check_can_deactivate_user_callbacks: List[ + CHECK_CAN_DEACTIVATE_USER_CALLBACK + ] = [] self._on_profile_update_callbacks: List[ON_PROFILE_UPDATE_CALLBACK] = [] self._on_user_deactivation_status_changed_callbacks: List[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -173,6 +181,8 @@ def register_third_party_rules_callbacks( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, + check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -198,6 +208,11 @@ def register_third_party_rules_callbacks( if on_new_event is not None: self._on_new_event_callbacks.append(on_new_event) + if check_can_shutdown_room is not None: + self._check_can_shutdown_room_callbacks.append(check_can_shutdown_room) + + if check_can_deactivate_user is not None: + self._check_can_deactivate_user_callbacks.append(check_can_deactivate_user) if on_profile_update is not None: self._on_profile_update_callbacks.append(on_profile_update) @@ -369,6 +384,46 @@ async def on_new_event(self, event_id: str) -> None: "Failed to run module API callback %s: %s", callback, e ) + async def check_can_shutdown_room(self, user_id: str, room_id: str) -> bool: + """Intercept requests to shutdown a room. If `False` is returned, the + room must not be shut down. + + Args: + requester: The ID of the user requesting the shutdown. + room_id: The ID of the room. + """ + for callback in self._check_can_shutdown_room_callbacks: + try: + if await callback(user_id, room_id) is False: + return False + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + return True + + async def check_can_deactivate_user( + self, + user_id: str, + by_admin: bool, + ) -> bool: + """Intercept requests to deactivate a user. If `False` is returned, the + user should not be deactivated. + + Args: + requester + user_id: The ID of the room. + """ + for callback in self._check_can_deactivate_user_callbacks: + try: + if await callback(user_id, by_admin) is False: + return False + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + return True + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: """Given a room ID, return the state events of that room. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 76ae768e6ef5..816e1a6d79c8 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -17,7 +17,7 @@ from synapse.api.errors import SynapseError from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import Requester, UserID, create_requester +from synapse.types import Codes, Requester, UserID, create_requester if TYPE_CHECKING: from synapse.server import HomeServer @@ -42,6 +42,7 @@ def __init__(self, hs: "HomeServer"): # Flag that indicates whether the process to part users from rooms is running self._user_parter_running = False + self._third_party_rules = hs.get_third_party_event_rules() # Start the user parter loop so it can resume parting users from rooms where # it left off (if it has work left to do). @@ -74,6 +75,15 @@ async def deactivate_account( Returns: True if identity server supports removing threepids, otherwise False. """ + + # Check if this user can be deactivated + if not await self._third_party_rules.check_can_deactivate_user( + user_id, by_admin + ): + raise SynapseError( + 403, "Deactivation of this user is forbidden", Codes.FORBIDDEN + ) + # FIXME: Theoretically there is a race here wherein user resets # password using threepid. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7b965b4b962a..b9735631fcd3 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1475,6 +1475,7 @@ def __init__(self, hs: "HomeServer"): self.room_member_handler = hs.get_room_member_handler() self._room_creation_handler = hs.get_room_creation_handler() self._replication = hs.get_replication_data_handler() + self._third_party_rules = hs.get_third_party_event_rules() self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastores().main @@ -1548,6 +1549,13 @@ async def shutdown_room( if not RoomID.is_valid(room_id): raise SynapseError(400, "%s is not a legal room ID" % (room_id,)) + if not await self._third_party_rules.check_can_shutdown_room( + requester_user_id, room_id + ): + raise SynapseError( + 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN + ) + # Action the block first (even if the room doesn't exist yet) if block: # This will work even if the room is already blocked, but that is diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index c42eeedd87ae..d735c1d4616e 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -54,6 +54,8 @@ USER_MAY_SEND_3PID_INVITE_CALLBACK, ) from synapse.events.third_party_rules import ( + CHECK_CAN_DEACTIVATE_USER_CALLBACK, + CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, CHECK_EVENT_ALLOWED_CALLBACK, CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, @@ -283,6 +285,8 @@ def register_third_party_rules_callbacks( CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK ] = None, on_new_event: Optional[ON_NEW_EVENT_CALLBACK] = None, + check_can_shutdown_room: Optional[CHECK_CAN_SHUTDOWN_ROOM_CALLBACK] = None, + check_can_deactivate_user: Optional[CHECK_CAN_DEACTIVATE_USER_CALLBACK] = None, on_profile_update: Optional[ON_PROFILE_UPDATE_CALLBACK] = None, on_user_deactivation_status_changed: Optional[ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK @@ -298,6 +302,8 @@ def register_third_party_rules_callbacks( check_threepid_can_be_invited=check_threepid_can_be_invited, check_visibility_can_be_modified=check_visibility_can_be_modified, on_new_event=on_new_event, + check_can_shutdown_room=check_can_shutdown_room, + check_can_deactivate_user=check_can_deactivate_user, on_profile_update=on_profile_update, on_user_deactivation_status_changed=on_user_deactivation_status_changed, ) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index f4736a3dad83..356d6f74d7ef 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -67,6 +67,7 @@ def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._store = hs.get_datastores().main self._pagination_handler = hs.get_pagination_handler() + self._third_party_rules = hs.get_third_party_event_rules() async def on_DELETE( self, request: SynapseRequest, room_id: str @@ -106,6 +107,14 @@ async def on_DELETE( HTTPStatus.BAD_REQUEST, "%s is not a legal room ID" % (room_id,) ) + # Check this here, as otherwise we'll only fail after the background job has been started. + if not await self._third_party_rules.check_can_shutdown_room( + requester.user.to_string(), room_id + ): + raise SynapseError( + 403, "Shutdown of this room is forbidden", Codes.FORBIDDEN + ) + delete_id = self._pagination_handler.start_shutdown_and_purge_room( room_id=room_id, new_room_user_id=content.get("new_room_user_id"), diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 58f1ea11b7da..e7de67e3a3b0 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -775,3 +775,124 @@ def test_on_user_deactivation_status_changed_admin(self) -> None: self.assertEqual(args[0], user_id) self.assertFalse(args[1]) self.assertTrue(args[2]) + + def test_check_can_deactivate_user(self) -> None: + """Tests that the on_user_deactivation_status_changed module callback is called + correctly when processing a user's deactivation. + """ + # Register a mocked callback. + deactivation_mock = Mock(return_value=make_awaitable(False)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._check_can_deactivate_user_callbacks.append( + deactivation_mock, + ) + + # Register a user that we'll deactivate. + user_id = self.register_user("altan", "password") + tok = self.login("altan", "password") + + # Deactivate that user. + channel = self.make_request( + "POST", + "/_matrix/client/v3/account/deactivate", + { + "auth": { + "type": LoginType.PASSWORD, + "password": "password", + "identifier": { + "type": "m.id.user", + "user": user_id, + }, + }, + "erase": True, + }, + access_token=tok, + ) + + # Check that the deactivation was blocked + self.assertEqual(channel.code, 403, channel.json_body) + + # Check that the mock was called once. + deactivation_mock.assert_called_once() + args = deactivation_mock.call_args[0] + + # Check that the mock was called with the right user ID + self.assertEqual(args[0], user_id) + + # Check that the request was not made by an admin + self.assertEqual(args[1], False) + + def test_check_can_deactivate_user_admin(self) -> None: + """Tests that the on_user_deactivation_status_changed module callback is called + correctly when processing a user's deactivation triggered by a server admin. + """ + # Register a mocked callback. + deactivation_mock = Mock(return_value=make_awaitable(False)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._check_can_deactivate_user_callbacks.append( + deactivation_mock, + ) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Register a user that we'll deactivate. + user_id = self.register_user("altan", "password") + + # Deactivate the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + {"deactivated": True}, + access_token=admin_tok, + ) + + # Check that the deactivation was blocked + self.assertEqual(channel.code, 403, channel.json_body) + + # Check that the mock was called once. + deactivation_mock.assert_called_once() + args = deactivation_mock.call_args[0] + + # Check that the mock was called with the right user ID + self.assertEqual(args[0], user_id) + + # Check that the mock was made by an admin + self.assertEqual(args[1], True) + + def test_check_can_shutdown_room(self) -> None: + """Tests that the check_can_shutdown_room module callback is called + correctly when processing an admin's shutdown room request. + """ + # Register a mocked callback. + shutdown_mock = Mock(return_value=make_awaitable(False)) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules._check_can_shutdown_room_callbacks.append( + shutdown_mock, + ) + + # Register an admin user. + admin_user_id = self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Shutdown the room. + channel = self.make_request( + "DELETE", + "/_synapse/admin/v2/rooms/%s" % self.room_id, + {}, + access_token=admin_tok, + ) + + # Check that the shutdown was blocked + self.assertEqual(channel.code, 403, channel.json_body) + + # Check that the mock was called once. + shutdown_mock.assert_called_once() + args = shutdown_mock.call_args[0] + + # Check that the mock was called with the right user ID + self.assertEqual(args[0], admin_user_id) + + # Check that the mock was called with the right room ID + self.assertEqual(args[1], self.room_id) From a4c1fdb44a16471964ed6a347be6a191102f5c07 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 9 Mar 2022 18:45:21 +0000 Subject: [PATCH 379/474] Remove dead code in `tests/storage/test_database.py` (#12197) Signed-off-by: Sean Quah --- changelog.d/12197.misc | 1 + tests/storage/test_database.py | 16 ---------------- 2 files changed, 1 insertion(+), 16 deletions(-) create mode 100644 changelog.d/12197.misc diff --git a/changelog.d/12197.misc b/changelog.d/12197.misc new file mode 100644 index 000000000000..7d0e9b6bbf4c --- /dev/null +++ b/changelog.d/12197.misc @@ -0,0 +1 @@ +Remove some dead code. diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 6fbac0ab1466..85978675634b 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -13,26 +13,10 @@ # limitations under the License. from synapse.storage.database import make_tuple_comparison_clause -from synapse.storage.engines import BaseDatabaseEngine from tests import unittest -def _stub_db_engine(**kwargs) -> BaseDatabaseEngine: - # returns a DatabaseEngine, circumventing the abc mechanism - # any kwargs are set as attributes on the class before instantiating it - t = type( - "TestBaseDatabaseEngine", - (BaseDatabaseEngine,), - dict(BaseDatabaseEngine.__dict__), - ) - # defeat the abc mechanism - t.__abstractmethods__ = set() - for k, v in kwargs.items(): - setattr(t, k, v) - return t(None, None) - - class TupleComparisonClauseTestCase(unittest.TestCase): def test_native_tuple_comparison(self): clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) From 3e4af36bc8515504721b3c1b1d64d4f45359bf88 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 08:01:56 -0500 Subject: [PATCH 380/474] Rename get_tcp_replication to get_replication_command_handler. (#12192) Since the object it returns is a ReplicationCommandHandler. This is clean-up from adding support to Redis where the command handler was added as an additional layer of abstraction from the TCP protocol. --- changelog.d/12192.misc | 1 + synapse/app/generic_worker.py | 2 +- synapse/app/homeserver.py | 2 +- synapse/federation/transport/server/_base.py | 2 +- synapse/handlers/presence.py | 4 ++-- synapse/replication/slave/storage/client_ips.py | 2 +- synapse/replication/tcp/client.py | 4 +++- synapse/replication/tcp/handler.py | 4 +--- synapse/replication/tcp/redis.py | 2 +- synapse/replication/tcp/resource.py | 4 ++-- synapse/server.py | 2 +- tests/replication/_base.py | 4 ++-- tests/replication/tcp/streams/test_events.py | 2 +- tests/replication/tcp/streams/test_typing.py | 2 +- tests/replication/test_federation_ack.py | 2 +- 15 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 changelog.d/12192.misc diff --git a/changelog.d/12192.misc b/changelog.d/12192.misc new file mode 100644 index 000000000000..bdfe8dad98a6 --- /dev/null +++ b/changelog.d/12192.misc @@ -0,0 +1 @@ +Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1536a4272333..a10a63b06c7e 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -417,7 +417,7 @@ def start_listening(self) -> None: else: logger.warning("Unsupported listener type: %s", listener.type) - self.get_tcp_replication().start_replication(self) + self.get_replication_command_handler().start_replication(self) def start(config_options: List[str]) -> None: diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index a6789a840ede..e4dc04c0b40f 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -273,7 +273,7 @@ def start_listening(self) -> None: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). - self.get_tcp_replication().start_replication(self) + self.get_replication_command_handler().start_replication(self) for listener in self.config.server.listeners: if listener.type == "http": diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 87e99c7ddf5b..2529dee613aa 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -63,7 +63,7 @@ def __init__(self, hs: "HomeServer"): self.replication_client = None if hs.config.worker.worker_app: - self.replication_client = hs.get_tcp_replication() + self.replication_client = hs.get_replication_command_handler() # A method just so we can pass 'self' as the authenticator to the Servlets async def authenticate_request( diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c155098beeb8..9927a30e6ed5 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -424,13 +424,13 @@ def __init__(self, hs: "HomeServer"): async def _on_shutdown(self) -> None: if self._presence_enabled: - self.hs.get_tcp_replication().send_command( + self.hs.get_replication_command_handler().send_command( ClearUserSyncsCommand(self.instance_id) ) def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: if self._presence_enabled: - self.hs.get_tcp_replication().send_user_sync( + self.hs.get_replication_command_handler().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index b5b84c09ae41..14706a081755 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -54,6 +54,6 @@ async def insert_client_ip( self.client_ip_last_seen.set(key, now) - self.hs.get_tcp_replication().send_user_ip( + self.hs.get_replication_command_handler().send_user_ip( user_id, access_token, ip, user_agent, device_id, now ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index b8fc1d4db95e..deeaaec4e66c 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -462,6 +462,8 @@ async def _save_and_send_ack(self) -> None: # We ACK this token over replication so that the master can drop # its in memory queues - self._hs.get_tcp_replication().send_federation_ack(current_position) + self._hs.get_replication_command_handler().send_federation_ack( + current_position + ) except Exception: logger.exception("Error updating federation stream position") diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 0d2013a3cfc5..d51f045f229a 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -295,9 +295,7 @@ async def _process_command( raise Exception("Unrecognised command %s in stream queue", cmd.NAME) def start_replication(self, hs: "HomeServer") -> None: - """Helper method to start a replication connection to the remote server - using TCP. - """ + """Helper method to start replication.""" if hs.config.redis.redis_enabled: from synapse.replication.tcp.redis import ( RedisDirectTcpReplicationClientFactory, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index b84e572da136..989c5be0327e 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -325,7 +325,7 @@ def __init__( password=hs.config.redis.redis_password, ) - self.synapse_handler = hs.get_tcp_replication() + self.synapse_handler = hs.get_replication_command_handler() self.synapse_stream_name = hs.hostname self.synapse_outbound_redis_connection = outbound_redis_connection diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 494e42a2be8f..ab829040cde9 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -44,7 +44,7 @@ class ReplicationStreamProtocolFactory(ServerFactory): """Factory for new replication connections.""" def __init__(self, hs: "HomeServer"): - self.command_handler = hs.get_tcp_replication() + self.command_handler = hs.get_replication_command_handler() self.clock = hs.get_clock() self.server_name = hs.config.server.server_name @@ -85,7 +85,7 @@ def __init__(self, hs: "HomeServer"): self.is_looping = False self.pending_updates = False - self.command_handler = hs.get_tcp_replication() + self.command_handler = hs.get_replication_command_handler() # Set of streams to replicate. self.streams = self.command_handler.get_streams_to_replicate() diff --git a/synapse/server.py b/synapse/server.py index 46a64418ea0c..1270abb5a335 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -639,7 +639,7 @@ def get_read_marker_handler(self) -> ReadMarkerHandler: return ReadMarkerHandler(self) @cache_in_self - def get_tcp_replication(self) -> ReplicationCommandHandler: + def get_replication_command_handler(self) -> ReplicationCommandHandler: return ReplicationCommandHandler(self) @cache_in_self diff --git a/tests/replication/_base.py b/tests/replication/_base.py index a7a05a564fe9..9c5df266bd1f 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -251,7 +251,7 @@ def setUp(self): self.connect_any_redis_attempts, ) - self.hs.get_tcp_replication().start_replication(self.hs) + self.hs.get_replication_command_handler().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't @@ -375,7 +375,7 @@ def make_worker_hs( ) if worker_hs.config.redis.redis_enabled: - worker_hs.get_tcp_replication().start_replication(worker_hs) + worker_hs.get_replication_command_handler().start_replication(worker_hs) return worker_hs diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index f9d5da723cce..641a94133b1d 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -420,7 +420,7 @@ def test_backwards_stream_id(self): # Manually send an old RDATA command, which should get dropped. This # re-uses the row from above, but with an earlier stream token. - self.hs.get_tcp_replication().send_command( + self.hs.get_replication_command_handler().send_command( RdataCommand("events", "master", 1, row) ) diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 3ff5afc6e501..9a229dd23f4a 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -118,7 +118,7 @@ def test_reset(self): # Reset the typing handler self.hs.get_replication_streams()["typing"].last_token = 0 - self.hs.get_tcp_replication()._streams["typing"].last_token = 0 + self.hs.get_replication_command_handler()._streams["typing"].last_token = 0 typing._latest_room_serial = 0 typing._typing_stream_change_cache = StreamChangeCache( "TypingStreamChangeCache", typing._latest_room_serial diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 1b6a4bf4b0b1..26b8bd512a7f 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -48,7 +48,7 @@ def test_federation_ack_sent(self): transport, rather than assuming that the implementation has a ReplicationCommandHandler. """ - rch = self.hs.get_tcp_replication() + rch = self.hs.get_replication_command_handler() # wire up the ReplicationCommandHandler to a mock connection, which needs # to implement IReplicationConnection. (Note that Mock doesn't understand From 88cd6f937807e64c05458cec86ef0ba0c1c656b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 09:03:59 -0500 Subject: [PATCH 381/474] Allow retrieving the relations of a redacted event. (#12130) This is allowed per MSC2675, although the original implementation did not allow for it and would return an empty chunk / not bundle aggregations. The main thing to improve is that the various caches get cleared properly when an event is redacted, and that edits must not leak if the original event is redacted (as that would presumably leak something similar to the original event content). --- changelog.d/12130.bugfix | 1 + changelog.d/12189.bugfix | 1 + changelog.d/12189.misc | 1 - synapse/rest/client/relations.py | 82 ++++++++++----------- synapse/storage/databases/main/cache.py | 4 + synapse/storage/databases/main/events.py | 11 +-- synapse/storage/databases/main/relations.py | 60 ++++++++------- tests/rest/client/test_relations.py | 45 +++++++++-- 8 files changed, 122 insertions(+), 83 deletions(-) create mode 100644 changelog.d/12130.bugfix create mode 100644 changelog.d/12189.bugfix delete mode 100644 changelog.d/12189.misc diff --git a/changelog.d/12130.bugfix b/changelog.d/12130.bugfix new file mode 100644 index 000000000000..df9b0dc413dd --- /dev/null +++ b/changelog.d/12130.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12189.bugfix b/changelog.d/12189.bugfix new file mode 100644 index 000000000000..df9b0dc413dd --- /dev/null +++ b/changelog.d/12189.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12189.misc b/changelog.d/12189.misc deleted file mode 100644 index 015e808e63c7..000000000000 --- a/changelog.d/12189.misc +++ /dev/null @@ -1 +0,0 @@ -Support skipping some arguments when generating cache keys. diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 07fa1cdd4c67..d9a6be43f793 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -27,7 +27,7 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns -from synapse.storage.relations import AggregationPaginationToken, PaginationChunk +from synapse.storage.relations import AggregationPaginationToken from synapse.types import JsonDict, StreamToken if TYPE_CHECKING: @@ -82,28 +82,25 @@ async def on_GET( from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - if event.internal_metadata.is_redacted(): - # If the event is redacted, return an empty list of relations - pagination_chunk = PaginationChunk(chunk=[]) - else: - # Return the relations - from_token = None - if from_token_str: - from_token = await StreamToken.from_string(self.store, from_token_str) - to_token = None - if to_token_str: - to_token = await StreamToken.from_string(self.store, to_token_str) - - pagination_chunk = await self.store.get_relations_for_event( - event_id=parent_id, - room_id=room_id, - relation_type=relation_type, - event_type=event_type, - limit=limit, - direction=direction, - from_token=from_token, - to_token=to_token, - ) + # Return the relations + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) + + pagination_chunk = await self.store.get_relations_for_event( + event_id=parent_id, + event=event, + room_id=room_id, + relation_type=relation_type, + event_type=event_type, + limit=limit, + direction=direction, + from_token=from_token, + to_token=to_token, + ) events = await self.store.get_events_as_list( [c["event_id"] for c in pagination_chunk.chunk] @@ -193,27 +190,23 @@ async def on_GET( from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - if event.internal_metadata.is_redacted(): - # If the event is redacted, return an empty list of relations - pagination_chunk = PaginationChunk(chunk=[]) - else: - # Return the relations - from_token = None - if from_token_str: - from_token = AggregationPaginationToken.from_string(from_token_str) - - to_token = None - if to_token_str: - to_token = AggregationPaginationToken.from_string(to_token_str) - - pagination_chunk = await self.store.get_aggregation_groups_for_event( - event_id=parent_id, - room_id=room_id, - event_type=event_type, - limit=limit, - from_token=from_token, - to_token=to_token, - ) + # Return the relations + from_token = None + if from_token_str: + from_token = AggregationPaginationToken.from_string(from_token_str) + + to_token = None + if to_token_str: + to_token = AggregationPaginationToken.from_string(to_token_str) + + pagination_chunk = await self.store.get_aggregation_groups_for_event( + event_id=parent_id, + room_id=room_id, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) return 200, await pagination_chunk.to_dict(self.store) @@ -295,6 +288,7 @@ async def on_GET( result = await self.store.get_relations_for_event( event_id=parent_id, + event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index abd54c7dc703..d6a2df1afeb6 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -191,6 +191,10 @@ def _invalidate_caches_for_event( if redacts: self._invalidate_get_event_cache(redacts) + # Caches which might leak edits must be invalidated for the event being + # redacted. + self.get_relations_for_event.invalidate((redacts,)) + self.get_applicable_edit.invalidate((redacts,)) if etype == EventTypes.Member: self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1dc83aa5e3a6..1a322882bf3f 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1619,9 +1619,12 @@ def prefill(): txn.call_after(prefill) - def _store_redaction(self, txn, event): - # invalidate the cache for the redacted event + def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: + # Invalidate the caches for the redacted event, note that these caches + # are also cleared as part of event replication in _invalidate_caches_for_event. txn.call_after(self.store._invalidate_get_event_cache, event.redacts) + txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,)) + txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,)) self.db_pool.simple_upsert_txn( txn, @@ -1812,9 +1815,7 @@ def _handle_event_relations( txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) if rel_type == RelationTypes.THREAD: - txn.call_after( - self.store.get_thread_summary.invalidate, (parent_id, event.room_id) - ) + txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,)) # It should be safe to only invalidate the cache if the user has not # previously participated in the thread, but that's difficult (and # potentially error-prone) so it is always invalidated. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 36aa1092f602..be1500092b5b 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -91,10 +91,11 @@ def __init__( self._msc3440_enabled = hs.config.experimental.msc3440_enabled - @cached(tree=True) + @cached(uncached_args=("event",), tree=True) async def get_relations_for_event( self, event_id: str, + event: EventBase, room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, @@ -108,6 +109,7 @@ async def get_relations_for_event( Args: event_id: Fetch events that relate to this event ID. + event: The matching EventBase to event_id. room_id: The room the event belongs to. relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. @@ -122,9 +124,13 @@ async def get_relations_for_event( List of event IDs that match relations requested. The rows are of the form `{"event_id": "..."}`. """ + # We don't use `event_id`, it's there so that we can cache based on + # it. The `event_id` must match the `event.event_id`. + assert event.event_id == event_id where_clause = ["relates_to_id = ?", "room_id = ?"] - where_args: List[Union[str, int]] = [event_id, room_id] + where_args: List[Union[str, int]] = [event.event_id, room_id] + is_redacted = event.internal_metadata.is_redacted() if relation_type is not None: where_clause.append("relation_type = ?") @@ -157,7 +163,7 @@ async def get_relations_for_event( order = "ASC" sql = """ - SELECT event_id, topological_ordering, stream_ordering + SELECT event_id, relation_type, topological_ordering, stream_ordering FROM event_relations INNER JOIN events USING (event_id) WHERE %s @@ -178,9 +184,12 @@ def _get_recent_references_for_event_txn( last_stream_id = None events = [] for row in txn: - events.append({"event_id": row[0]}) - last_topo_id = row[1] - last_stream_id = row[2] + # Do not include edits for redacted events as they leak event + # content. + if not is_redacted or row[1] != RelationTypes.REPLACE: + events.append({"event_id": row[0]}) + last_topo_id = row[2] + last_stream_id = row[3] # If there are more events, generate the next pagination key. next_token = None @@ -776,7 +785,7 @@ async def _get_bundled_aggregation_for_event( ) references = await self.get_relations_for_event( - event_id, room_id, RelationTypes.REFERENCE, direction="f" + event_id, event, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: aggregations.references = await references.to_dict(cast("DataStore", self)) @@ -797,41 +806,36 @@ async def get_bundled_aggregations( A map of event ID to the bundled aggregation for the event. Not all events may have bundled aggregations in the results. """ - # The already processed event IDs. Tracked separately from the result - # since the result omits events which do not have bundled aggregations. - seen_event_ids = set() - - # State events and redacted events do not get bundled aggregations. - events = [ - event - for event in events - if not event.is_state() and not event.internal_metadata.is_redacted() - ] + # De-duplicate events by ID to handle the same event requested multiple times. + # + # State events do not get bundled aggregations. + events_by_id = { + event.event_id: event for event in events if not event.is_state() + } # event ID -> bundled aggregation in non-serialized form. results: Dict[str, BundledAggregations] = {} # Fetch other relations per event. - for event in events: - # De-duplicate events by ID to handle the same event requested multiple - # times. The caches that _get_bundled_aggregation_for_event use should - # capture this, but best to reduce work. - if event.event_id in seen_event_ids: - continue - seen_event_ids.add(event.event_id) - + for event in events_by_id.values(): event_result = await self._get_bundled_aggregation_for_event(event, user_id) if event_result: results[event.event_id] = event_result - # Fetch any edits. - edits = await self._get_applicable_edits(seen_event_ids) + # Fetch any edits (but not for redacted events). + edits = await self._get_applicable_edits( + [ + event_id + for event_id, event in events_by_id.items() + if not event.internal_metadata.is_redacted() + ] + ) for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit # Fetch thread summaries. if self._msc3440_enabled: - summaries = await self._get_thread_summaries(seen_event_ids) + summaries = await self._get_thread_summaries(events_by_id.keys()) # Only fetch participated for a limited selection based on what had # summaries. participated = await self._get_threads_participated( diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index a40a5de3991c..f9ae6e663f95 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1475,12 +1475,13 @@ def test_redact_parent_edit(self) -> None: self.assertEqual(relations, {}) def test_redact_parent_annotation(self) -> None: - """Test that annotations of an event are redacted when the original event + """Test that annotations of an event are viewable when the original event is redacted. """ # Add a relation channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) + related_event_id = channel.json_body["event_id"] # The relations should exist. event_ids, relations = self._make_relation_requests() @@ -1494,11 +1495,45 @@ def test_redact_parent_annotation(self) -> None: # Redact the original event. self._redact(self.parent_id) - # The relations are not returned. + # The relations are returned. event_ids, relations = self._make_relation_requests() - self.assertEqual(event_ids, []) - self.assertEqual(relations, {}) + self.assertEquals(event_ids, [related_event_id]) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]}, + ) # There's nothing to aggregate. chunk = self._get_aggregations() - self.assertEqual(chunk, []) + self.assertEqual(chunk, [{"count": 1, "key": "👍", "type": "m.reaction"}]) + + @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + def test_redact_parent_thread(self) -> None: + """ + Test that thread replies are still available when the root event is redacted. + """ + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 1", "msgtype": "m.text"}, + ) + self.assertEqual(200, channel.code, channel.json_body) + related_event_id = channel.json_body["event_id"] + + # Redact one of the reactions. + self._redact(self.parent_id) + + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(len(event_ids), 1) + self.assertDictContainsSubset( + { + "count": 1, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + self.assertEqual( + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + related_event_id, + ) From 52a947dc4603e1bd14916efb8822c4fe58f0d200 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 10 Mar 2022 15:18:31 +0000 Subject: [PATCH 382/474] Updates to the Room DAG concepts development document (#12179) Some stuff that came up while we were talking about #12173. --- changelog.d/12179.doc | 1 + docs/development/room-dag-concepts.md | 71 ++++++++++++++++++++------- 2 files changed, 54 insertions(+), 18 deletions(-) create mode 100644 changelog.d/12179.doc diff --git a/changelog.d/12179.doc b/changelog.d/12179.doc new file mode 100644 index 000000000000..55d8caa45a8c --- /dev/null +++ b/changelog.d/12179.doc @@ -0,0 +1 @@ +Updates to the Room DAG concepts development document. diff --git a/docs/development/room-dag-concepts.md b/docs/development/room-dag-concepts.md index cbc7cf29491c..3eb4d5acc463 100644 --- a/docs/development/room-dag-concepts.md +++ b/docs/development/room-dag-concepts.md @@ -30,37 +30,72 @@ rather than skipping any that arrived late; whereas if you're looking at a historical section of timeline (i.e. `/messages`), you want to see the best representation of the state of the room as others were seeing it at the time. +## Outliers -## Forward extremity +We mark an event as an `outlier` when we haven't figured out the state for the +room at that point in the DAG yet. They are "floating" events that we haven't +yet correlated to the DAG. -Most-recent-in-time events in the DAG which are not referenced by any other events' `prev_events` yet. +Outliers typically arise when we fetch the auth chain or state for a given +event. When that happens, we just grab the events in the state/auth chain, +without calculating the state at those events, or backfilling their +`prev_events`. -The forward extremities of a room are used as the `prev_events` when the next event is sent. +So, typically, we won't have the `prev_events` of an `outlier` in the database, +(though it's entirely possible that we *might* have them for some other +reason). Other things that make outliers different from regular events: + * We don't have state for them, so there should be no entry in + `event_to_state_groups` for an outlier. (In practice this isn't always + the case, though I'm not sure why: see https://github.com/matrix-org/synapse/issues/12201). -## Backward extremity + * We don't record entries for them in the `event_edges`, + `event_forward_extremeties` or `event_backward_extremities` tables. -The current marker of where we have backfilled up to and will generally be the -`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when -backfilling history. +Since outliers are not tied into the DAG, they do not normally form part of the +timeline sent down to clients via `/sync` or `/messages`; however there is an +exception: -When we persist a non-outlier event, we clear it as a backward extremity and set -all of its `prev_events` as the new backward extremities if they aren't already -persisted in the `events` table. +### Out-of-band membership events +A special case of outlier events are some membership events for federated rooms +that we aren't full members of. For example: -## Outliers + * invites received over federation, before we join the room + * *rejections* for said invites + * knock events for rooms that we would like to join but have not yet joined. -We mark an event as an `outlier` when we haven't figured out the state for the -room at that point in the DAG yet. +In all the above cases, we don't have the state for the room, which is why they +are treated as outliers. They are a bit special though, in that they are +proactively sent to clients via `/sync`. -We won't *necessarily* have the `prev_events` of an `outlier` in the database, -but it's entirely possible that we *might*. +## Forward extremity + +Most-recent-in-time events in the DAG which are not referenced by any other +events' `prev_events` yet. (In this definition, outliers, rejected events, and +soft-failed events don't count.) + +The forward extremities of a room (or at least, a subset of them, if there are +more than ten) are used as the `prev_events` when the next event is sent. + +The "current state" of a room (ie: the state which would be used if we +generated a new event) is, therefore, the resolution of the room states +at each of the forward extremities. + +## Backward extremity + +The current marker of where we have backfilled up to and will generally be the +`prev_events` of the oldest-in-time events we have in the DAG. This gives a starting point when +backfilling history. -For example, when we fetch the event auth chain or state for a given event, we -mark all of those claimed auth events as outliers because we haven't done the -state calculation ourself. +Note that, unlike forward extremities, we typically don't have any backward +extremity events themselves in the database - or, if we do, they will be "outliers" (see +above). Either way, we don't expect to have the room state at a backward extremity. +When we persist a non-outlier event, if it was previously a backward extremity, +we clear it as a backward extremity and set all of its `prev_events` as the new +backward extremities if they aren't already persisted as non-outliers. This +therefore keeps the backward extremities up-to-date. ## State groups From ea27528b5d177dcfc5a4e38b463baeace916dc8e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 10:36:13 -0500 Subject: [PATCH 383/474] Support stable identifiers for MSC3440: Threading (#12151) The unstable identifiers are still supported if the experimental configuration flag is enabled. The unstable identifiers will be removed in a future release. --- changelog.d/12151.feature | 1 + synapse/api/constants.py | 4 +- synapse/api/filtering.py | 23 +++--- synapse/events/utils.py | 9 ++- synapse/handlers/message.py | 5 +- synapse/rest/client/versions.py | 1 + synapse/server.py | 2 +- synapse/storage/databases/main/events.py | 5 +- synapse/storage/databases/main/relations.py | 77 +++++++++++++-------- synapse/storage/databases/main/stream.py | 18 ++--- tests/rest/client/test_relations.py | 7 +- tests/rest/client/test_rooms.py | 18 +++-- tests/storage/test_stream.py | 20 +++--- 13 files changed, 109 insertions(+), 81 deletions(-) create mode 100644 changelog.d/12151.feature diff --git a/changelog.d/12151.feature b/changelog.d/12151.feature new file mode 100644 index 000000000000..18432b2da9a5 --- /dev/null +++ b/changelog.d/12151.feature @@ -0,0 +1 @@ +Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 36ace7c6134f..b0c08a074d83 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -178,7 +178,9 @@ class RelationTypes: ANNOTATION: Final = "m.annotation" REPLACE: Final = "m.replace" REFERENCE: Final = "m.reference" - THREAD: Final = "io.element.thread" + THREAD: Final = "m.thread" + # TODO Remove this in Synapse >= v1.57.0. + UNSTABLE_THREAD: Final = "io.element.thread" class LimitBlockingTypes: diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index cb532d723828..27e97d6f372d 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -88,7 +88,9 @@ "org.matrix.labels": {"type": "array", "items": {"type": "string"}}, "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}}, # MSC3440, filtering by event relations. + "related_by_senders": {"type": "array", "items": {"type": "string"}}, "io.element.relation_senders": {"type": "array", "items": {"type": "string"}}, + "related_by_rel_types": {"type": "array", "items": {"type": "string"}}, "io.element.relation_types": {"type": "array", "items": {"type": "string"}}, }, } @@ -318,19 +320,18 @@ def __init__(self, hs: "HomeServer", filter_json: JsonDict): self.labels = filter_json.get("org.matrix.labels", None) self.not_labels = filter_json.get("org.matrix.not_labels", []) - # Ideally these would be rejected at the endpoint if they were provided - # and not supported, but that would involve modifying the JSON schema - # based on the homeserver configuration. + self.related_by_senders = self.filter_json.get("related_by_senders", None) + self.related_by_rel_types = self.filter_json.get("related_by_rel_types", None) + + # Fallback to the unstable prefix if the stable version is not given. if hs.config.experimental.msc3440_enabled: - self.relation_senders = self.filter_json.get( + self.related_by_senders = self.related_by_senders or self.filter_json.get( "io.element.relation_senders", None ) - self.relation_types = self.filter_json.get( - "io.element.relation_types", None + self.related_by_rel_types = ( + self.related_by_rel_types + or self.filter_json.get("io.element.relation_types", None) ) - else: - self.relation_senders = None - self.relation_types = None def filters_all_types(self) -> bool: return "*" in self.not_types @@ -461,7 +462,7 @@ async def _check_event_relations( event_ids = [event.event_id for event in events if isinstance(event, EventBase)] # type: ignore[attr-defined] event_ids_to_keep = set( await self._store.events_have_relations( - event_ids, self.relation_senders, self.relation_types + event_ids, self.related_by_senders, self.related_by_rel_types ) ) @@ -474,7 +475,7 @@ async def _check_event_relations( async def filter(self, events: Iterable[FilterEvent]) -> List[FilterEvent]: result = [event for event in events if self._check(event)] - if self.relation_senders or self.relation_types: + if self.related_by_senders or self.related_by_rel_types: return await self._check_event_relations(result) return result diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ee34cb46e437..b2a237c1e04a 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -38,6 +38,7 @@ from . import EventBase if TYPE_CHECKING: + from synapse.server import HomeServer from synapse.storage.databases.main.relations import BundledAggregations @@ -395,6 +396,9 @@ class EventClientSerializer: clients. """ + def __init__(self, hs: "HomeServer"): + self._msc3440_enabled = hs.config.experimental.msc3440_enabled + def serialize_event( self, event: Union[JsonDict, EventBase], @@ -515,11 +519,14 @@ def _inject_bundled_aggregations( thread.latest_event, serialized_latest_event, thread.latest_edit ) - serialized_aggregations[RelationTypes.THREAD] = { + thread_summary = { "latest_event": serialized_latest_event, "count": thread.count, "current_user_participated": thread.current_user_participated, } + serialized_aggregations[RelationTypes.THREAD] = thread_summary + if self._msc3440_enabled: + serialized_aggregations[RelationTypes.UNSTABLE_THREAD] = thread_summary # Include the bundled aggregations in the event. if serialized_aggregations: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0799ec9a84df..f9544fe7fb83 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1079,7 +1079,10 @@ async def _validate_event_relation(self, event: EventBase) -> None: raise SynapseError(400, "Can't send same reaction twice") # Don't attempt to start a thread if the parent event is a relation. - elif relation_type == RelationTypes.THREAD: + elif ( + relation_type == RelationTypes.THREAD + or relation_type == RelationTypes.UNSTABLE_THREAD + ): if await self.store.event_includes_relation(relates_to): raise SynapseError( 400, "Cannot start threads from an event with a relation" diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 2e5d0e4e2258..9a65aa484360 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -101,6 +101,7 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "org.matrix.msc3030": self.config.experimental.msc3030_enabled, # Adds support for thread relations, per MSC3440. "org.matrix.msc3440": self.config.experimental.msc3440_enabled, + "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above }, }, ) diff --git a/synapse/server.py b/synapse/server.py index 1270abb5a335..7741ff29dc3f 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -754,7 +754,7 @@ def get_oidc_handler(self) -> "OidcHandler": @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer() + return EventClientSerializer(self) @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1a322882bf3f..1f60aef180d0 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1814,7 +1814,10 @@ def _handle_event_relations( if rel_type == RelationTypes.REPLACE: txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) - if rel_type == RelationTypes.THREAD: + if ( + rel_type == RelationTypes.THREAD + or rel_type == RelationTypes.UNSTABLE_THREAD + ): txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,)) # It should be safe to only invalidate the cache if the user has not # previously participated in the thread, but that's difficult (and diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index be1500092b5b..c4869d64e663 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -508,7 +508,7 @@ def _get_thread_summaries_txn( AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s ORDER BY parent.event_id, child.topological_ordering DESC, child.stream_ordering DESC """ else: @@ -523,16 +523,22 @@ def _get_thread_summaries_txn( AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s ORDER BY child.topological_ordering DESC, child.stream_ordering DESC """ clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", event_ids ) - args.append(RelationTypes.THREAD) - txn.execute(sql % (clause,), args) + if self._msc3440_enabled: + relations_clause = "(relation_type = ? OR relation_type = ?)" + args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD)) + else: + relations_clause = "relation_type = ?" + args.append(RelationTypes.THREAD) + + txn.execute(sql % (clause, relations_clause), args) latest_event_ids = {} for parent_event_id, child_event_id in txn: # Only consider the latest threaded reply (by topological ordering). @@ -552,7 +558,7 @@ def _get_thread_summaries_txn( AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s GROUP BY parent.event_id """ @@ -561,9 +567,15 @@ def _get_thread_summaries_txn( clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", latest_event_ids.keys() ) - args.append(RelationTypes.THREAD) - txn.execute(sql % (clause,), args) + if self._msc3440_enabled: + relations_clause = "(relation_type = ? OR relation_type = ?)" + args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD)) + else: + relations_clause = "relation_type = ?" + args.append(RelationTypes.THREAD) + + txn.execute(sql % (clause, relations_clause), args) counts = dict(cast(List[Tuple[str, int]], txn.fetchall())) return counts, latest_event_ids @@ -626,16 +638,24 @@ def _get_thread_summary_txn(txn: LoggingTransaction) -> Set[str]: AND parent.room_id = child.room_id WHERE %s - AND relation_type = ? + AND %s AND child.sender = ? """ clause, args = make_in_list_sql_clause( txn.database_engine, "relates_to_id", event_ids ) - args.extend((RelationTypes.THREAD, user_id)) - txn.execute(sql % (clause,), args) + if self._msc3440_enabled: + relations_clause = "(relation_type = ? OR relation_type = ?)" + args.extend((RelationTypes.THREAD, RelationTypes.UNSTABLE_THREAD)) + else: + relations_clause = "relation_type = ?" + args.append(RelationTypes.THREAD) + + args.append(user_id) + + txn.execute(sql % (clause, relations_clause), args) return {row[0] for row in txn.fetchall()} participated_threads = await self.db_pool.runInteraction( @@ -834,26 +854,23 @@ async def get_bundled_aggregations( results.setdefault(event_id, BundledAggregations()).replace = edit # Fetch thread summaries. - if self._msc3440_enabled: - summaries = await self._get_thread_summaries(events_by_id.keys()) - # Only fetch participated for a limited selection based on what had - # summaries. - participated = await self._get_threads_participated( - summaries.keys(), user_id - ) - for event_id, summary in summaries.items(): - if summary: - thread_count, latest_thread_event, edit = summary - results.setdefault( - event_id, BundledAggregations() - ).thread = _ThreadAggregation( - latest_event=latest_thread_event, - latest_edit=edit, - count=thread_count, - # If there's a thread summary it must also exist in the - # participated dictionary. - current_user_participated=participated[event_id], - ) + summaries = await self._get_thread_summaries(events_by_id.keys()) + # Only fetch participated for a limited selection based on what had + # summaries. + participated = await self._get_threads_participated(summaries.keys(), user_id) + for event_id, summary in summaries.items(): + if summary: + thread_count, latest_thread_event, edit = summary + results.setdefault( + event_id, BundledAggregations() + ).thread = _ThreadAggregation( + latest_event=latest_thread_event, + latest_edit=edit, + count=thread_count, + # If there's a thread summary it must also exist in the + # participated dictionary. + current_user_participated=participated[event_id], + ) return results diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index a898f847e7d5..39e1efe37348 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -325,21 +325,23 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: args.extend(event_filter.labels) # Filter on relation_senders / relation types from the joined tables. - if event_filter.relation_senders: + if event_filter.related_by_senders: clauses.append( "(%s)" % " OR ".join( - "related_event.sender = ?" for _ in event_filter.relation_senders + "related_event.sender = ?" for _ in event_filter.related_by_senders ) ) - args.extend(event_filter.relation_senders) + args.extend(event_filter.related_by_senders) - if event_filter.relation_types: + if event_filter.related_by_rel_types: clauses.append( "(%s)" - % " OR ".join("relation_type = ?" for _ in event_filter.relation_types) + % " OR ".join( + "relation_type = ?" for _ in event_filter.related_by_rel_types + ) ) - args.extend(event_filter.relation_types) + args.extend(event_filter.related_by_rel_types) return " AND ".join(clauses), args @@ -1203,7 +1205,7 @@ def _paginate_room_events_txn( # If there is a filter on relation_senders and relation_types join to the # relations table. if event_filter and ( - event_filter.relation_senders or event_filter.relation_types + event_filter.related_by_senders or event_filter.related_by_rel_types ): # Filtering by relations could cause the same event to appear multiple # times (since there's no limit on the number of relations to an event). @@ -1211,7 +1213,7 @@ def _paginate_room_events_txn( join_clause += """ LEFT JOIN event_relations AS relation ON (event.event_id = relation.relates_to_id) """ - if event_filter.relation_senders: + if event_filter.related_by_senders: join_clause += """ LEFT JOIN events AS related_event ON (relation.event_id = related_event.event_id) """ diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index f9ae6e663f95..0cbe6c0cf754 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -547,9 +547,7 @@ def test_aggregation_must_be_annotation(self) -> None: ) self.assertEqual(400, channel.code, channel.json_body) - @unittest.override_config( - {"experimental_features": {"msc3440_enabled": True, "msc3666_enabled": True}} - ) + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) def test_bundled_aggregations(self) -> None: """ Test that annotations, references, and threads get correctly bundled. @@ -758,7 +756,6 @@ def test_aggregation_get_event_for_thread(self) -> None: }, ) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_ignore_invalid_room(self) -> None: """Test that we ignore invalid relations over federation.""" # Create another room and send a message in it. @@ -1065,7 +1062,6 @@ def test_edit_reply(self) -> None: {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_edit_thread(self) -> None: """Test that editing a thread works.""" @@ -1383,7 +1379,6 @@ def test_redact_relation_annotation(self) -> None: chunk = self._get_aggregations() self.assertEqual(chunk, [{"type": "m.reaction", "key": "a", "count": 1}]) - @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) def test_redact_relation_thread(self) -> None: """ Test that thread replies are properly handled after the thread reply redacted. diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 37866ee330f3..3a9617d6da8e 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2141,21 +2141,19 @@ def _filter_messages(self, filter: JsonDict) -> List[JsonDict]: def test_filter_relation_senders(self) -> None: # Messages which second user reacted to. - filter = {"io.element.relation_senders": [self.second_user_id]} + filter = {"related_by_senders": [self.second_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_1) # Messages which third user reacted to. - filter = {"io.element.relation_senders": [self.third_user_id]} + filter = {"related_by_senders": [self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_2) # Messages which either user reacted to. - filter = { - "io.element.relation_senders": [self.second_user_id, self.third_user_id] - } + filter = {"related_by_senders": [self.second_user_id, self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 2, chunk) self.assertCountEqual( @@ -2164,20 +2162,20 @@ def test_filter_relation_senders(self) -> None: def test_filter_relation_type(self) -> None: # Messages which have annotations. - filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]} + filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_1) # Messages which have references. - filter = {"io.element.relation_types": [RelationTypes.REFERENCE]} + filter = {"related_by_rel_types": [RelationTypes.REFERENCE]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0]["event_id"], self.event_id_2) # Messages which have either annotations or references. filter = { - "io.element.relation_types": [ + "related_by_rel_types": [ RelationTypes.ANNOTATION, RelationTypes.REFERENCE, ] @@ -2191,8 +2189,8 @@ def test_filter_relation_type(self) -> None: def test_filter_relation_senders_and_type(self) -> None: # Messages which second user reacted to. filter = { - "io.element.relation_senders": [self.second_user_id], - "io.element.relation_types": [RelationTypes.ANNOTATION], + "related_by_senders": [self.second_user_id], + "related_by_rel_types": [RelationTypes.ANNOTATION], } chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 6a1cf3305455..eaa0d7d749b7 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -129,21 +129,19 @@ def _filter_messages(self, filter: JsonDict) -> List[EventBase]: def test_filter_relation_senders(self): # Messages which second user reacted to. - filter = {"io.element.relation_senders": [self.second_user_id]} + filter = {"related_by_senders": [self.second_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_1) # Messages which third user reacted to. - filter = {"io.element.relation_senders": [self.third_user_id]} + filter = {"related_by_senders": [self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_2) # Messages which either user reacted to. - filter = { - "io.element.relation_senders": [self.second_user_id, self.third_user_id] - } + filter = {"related_by_senders": [self.second_user_id, self.third_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 2, chunk) self.assertCountEqual( @@ -152,20 +150,20 @@ def test_filter_relation_senders(self): def test_filter_relation_type(self): # Messages which have annotations. - filter = {"io.element.relation_types": [RelationTypes.ANNOTATION]} + filter = {"related_by_rel_types": [RelationTypes.ANNOTATION]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_1) # Messages which have references. - filter = {"io.element.relation_types": [RelationTypes.REFERENCE]} + filter = {"related_by_rel_types": [RelationTypes.REFERENCE]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_2) # Messages which have either annotations or references. filter = { - "io.element.relation_types": [ + "related_by_rel_types": [ RelationTypes.ANNOTATION, RelationTypes.REFERENCE, ] @@ -179,8 +177,8 @@ def test_filter_relation_type(self): def test_filter_relation_senders_and_type(self): # Messages which second user reacted to. filter = { - "io.element.relation_senders": [self.second_user_id], - "io.element.relation_types": [RelationTypes.ANNOTATION], + "related_by_senders": [self.second_user_id], + "related_by_rel_types": [RelationTypes.ANNOTATION], } chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) @@ -201,7 +199,7 @@ def test_duplicate_relation(self): tok=self.second_tok, ) - filter = {"io.element.relation_senders": [self.second_user_id]} + filter = {"related_by_senders": [self.second_user_id]} chunk = self._filter_messages(filter) self.assertEqual(len(chunk), 1, chunk) self.assertEqual(chunk[0].event_id, self.event_id_1) From 72e7f1c420b879a0a1ef1430771698b868693ab0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 10 Mar 2022 15:53:23 +0000 Subject: [PATCH 384/474] Remove workaround introduced in Synapse v1.50.0rc1 for Mjolnir compatibility. Breaks compatibility with Mjolnir v1.3.1 and earlier. (#11700) --- changelog.d/11700.removal | 1 + docs/upgrade.md | 8 ++++++++ synapse/util/__init__.py | 7 ------- 3 files changed, 9 insertions(+), 7 deletions(-) create mode 100644 changelog.d/11700.removal diff --git a/changelog.d/11700.removal b/changelog.d/11700.removal new file mode 100644 index 000000000000..d3d3c48f0fc4 --- /dev/null +++ b/changelog.d/11700.removal @@ -0,0 +1 @@ +Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. diff --git a/docs/upgrade.md b/docs/upgrade.md index 0d0bb066ee63..95005962dc49 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -106,6 +106,14 @@ You will need to ensure `synctl` is on your `PATH`. automatically, though you might need to activate a virtual environment depending on how you installed Synapse. + +## Compatibility dropped for Mjolnir 1.3.1 and earlier + +Synapse v1.55.0 drops support for Mjolnir 1.3.1 and earlier. +If you use the Mjolnir module to moderate your homeserver, +please upgrade Mjolnir to version 1.3.2 or later before upgrading Synapse. + + # Upgrading to v1.54.0 ## Legacy structured logging configuration removal diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 58b4220ff355..d8046b7553cb 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -31,13 +31,6 @@ if typing.TYPE_CHECKING: pass -# FIXME Mjolnir imports glob_to_regex from this file, but it was moved to -# matrix_common. -# As a temporary workaround, we import glob_to_regex here for -# compatibility with current versions of Mjolnir. -# See https://github.com/matrix-org/mjolnir/pull/174 -from matrix_common.regex import glob_to_regex # noqa - logger = logging.getLogger(__name__) From ed9aea42fa991428406be96a67c311a8f9cec544 Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 10 Mar 2022 09:40:07 -0800 Subject: [PATCH 385/474] fix misleading comment in `check_events_for_spam` (#12203) --- changelog.d/12203.misc | 1 + synapse/events/spamcheck.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12203.misc diff --git a/changelog.d/12203.misc b/changelog.d/12203.misc new file mode 100644 index 000000000000..892dc5bfb7e3 --- /dev/null +++ b/changelog.d/12203.misc @@ -0,0 +1 @@ +Fix a misleading comment in the function `check_event_for_spam`. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 04afd48274e1..60904a55f5c0 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -245,8 +245,8 @@ async def check_event_for_spam( """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if - sent by a local user. If it is sent by a user on another server, then - users receive a blank event. + sent by a local user. If it is sent by a user on another server, the + event is soft-failed. Args: event: the event to be checked From 7577894bec78a063f3e85ec7d386a58a0c60fb11 Mon Sep 17 00:00:00 2001 From: ~creme Date: Thu, 10 Mar 2022 19:15:19 +0100 Subject: [PATCH 386/474] Document that most streams can only have a single writer. (#12196) This includes the `typing`, `to_device`, `account_data`, `receipts`, and `presence` streams (really anything except the `events` stream). --- changelog.d/12196.doc | 1 + docs/workers.md | 31 +++++++++++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) create mode 100644 changelog.d/12196.doc diff --git a/changelog.d/12196.doc b/changelog.d/12196.doc new file mode 100644 index 000000000000..269f06aa3386 --- /dev/null +++ b/changelog.d/12196.doc @@ -0,0 +1 @@ +Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. \ No newline at end of file diff --git a/docs/workers.md b/docs/workers.md index b0f8599ef062..8751134e654d 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -351,8 +351,11 @@ is only supported with Redis-based replication.) To enable this, the worker must have a HTTP replication listener configured, have a `worker_name` and be listed in the `instance_map` config. The same worker -can handle multiple streams. For example, to move event persistence off to a -dedicated worker, the shared configuration would include: +can handle multiple streams, but unless otherwise documented, each stream can only +have a single writer. + +For example, to move event persistence off to a dedicated worker, the shared +configuration would include: ```yaml instance_map: @@ -370,8 +373,8 @@ streams and the endpoints associated with them: ##### The `events` stream -The `events` stream also experimentally supports having multiple writers, where -work is sharded between them by room ID. Note that you *must* restart all worker +The `events` stream experimentally supports having multiple writers, where work +is sharded between them by room ID. Note that you *must* restart all worker instances when adding or removing event persisters. An example `stream_writers` configuration with multiple writers: @@ -384,38 +387,38 @@ stream_writers: ##### The `typing` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `typing` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `typing` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing ##### The `to_device` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `to_device` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `to_device` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/ ##### The `account_data` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `account_data` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `account_data` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data ##### The `receipts` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `receipts` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `receipts` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers ##### The `presence` stream -The following endpoints should be routed directly to the workers configured as -stream writers for the `presence` stream: +The following endpoints should be routed directly to the worker configured as +the stream writer for the `presence` stream: ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/ From 483f2aa2eca98500046847364ede04b034530aac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 11 Mar 2022 10:33:49 +0000 Subject: [PATCH 387/474] Retention test: avoid relying on state at purged events (#12202) This test was relying on poking events which weren't in the database into filter_events_for_client. --- changelog.d/12202.misc | 1 + tests/rest/client/test_retention.py | 29 +++++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12202.misc diff --git a/changelog.d/12202.misc b/changelog.d/12202.misc new file mode 100644 index 000000000000..9f333e718a86 --- /dev/null +++ b/changelog.d/12202.misc @@ -0,0 +1 @@ +Avoid trying to calculate the state at outlier events. diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index f3bf8d0934e6..7b8fe6d02522 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -24,6 +24,7 @@ from synapse.visibility import filter_events_for_client from tests import unittest +from tests.unittest import override_config one_hour_ms = 3600000 one_day_ms = one_hour_ms * 24 @@ -38,7 +39,10 @@ class RetentionTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() - config["retention"] = { + + # merge this default retention config with anything that was specified in + # @override_config + retention_config = { "enabled": True, "default_policy": { "min_lifetime": one_day_ms, @@ -47,6 +51,8 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: "allowed_lifetime_min": one_day_ms, "allowed_lifetime_max": one_day_ms * 3, } + retention_config.update(config.get("retention", {})) + config["retention"] = retention_config self.hs = self.setup_test_homeserver(config=config) @@ -115,22 +121,20 @@ def test_retention_event_purged_without_state_event(self) -> None: self._test_retention_event_purged(room_id, one_day_ms * 2) + @override_config({"retention": {"purge_jobs": [{"interval": "5d"}]}}) def test_visibility(self) -> None: """Tests that synapse.visibility.filter_events_for_client correctly filters out - outdated events + outdated events, even if the purge job hasn't got to them yet. + + We do this by setting a very long time between purge jobs. """ store = self.hs.get_datastores().main storage = self.hs.get_storage() room_id = self.helper.create_room_as(self.user_id, tok=self.token) - events = [] # Send a first event, which should be filtered out at the end of the test. resp = self.helper.send(room_id=room_id, body="1", tok=self.token) - - # Get the event from the store so that we end up with a FrozenEvent that we can - # give to filter_events_for_client. We need to do this now because the event won't - # be in the database anymore after it has expired. - events.append(self.get_success(store.get_event(resp.get("event_id")))) + first_event_id = resp.get("event_id") # Advance the time by 2 days. We're using the default retention policy, therefore # after this the first event will still be valid. @@ -138,16 +142,17 @@ def test_visibility(self) -> None: # Send another event, which shouldn't get filtered out. resp = self.helper.send(room_id=room_id, body="2", tok=self.token) - valid_event_id = resp.get("event_id") - events.append(self.get_success(store.get_event(valid_event_id))) - # Advance the time by another 2 days. After this, the first event should be # outdated but not the second one. self.reactor.advance(one_day_ms * 2 / 1000) - # Run filter_events_for_client with our list of FrozenEvents. + # Fetch the events, and run filter_events_for_client on them + events = self.get_success( + store.get_events_as_list([first_event_id, valid_event_id]) + ) + self.assertEqual(2, len(events), "events retrieved from database") filtered_events = self.get_success( filter_events_for_client(storage, self.user_id, events) ) From 3b12f6d61b3b10b57e7b3a45f1d7a96f9790d674 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 11 Mar 2022 11:10:20 +0000 Subject: [PATCH 388/474] Note that contributors can sign off privately (#12204) Co-authored-by: Patrick Cloke --- changelog.d/12204.doc | 1 + docs/development/contributing_guide.md | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/12204.doc diff --git a/changelog.d/12204.doc b/changelog.d/12204.doc new file mode 100644 index 000000000000..c4b2805bb112 --- /dev/null +++ b/changelog.d/12204.doc @@ -0,0 +1 @@ +Document that contributors can sign off privately by email. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 8448685952dc..071202e1965f 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -458,6 +458,17 @@ Git allows you to add this signoff automatically when using the `-s` flag to `git commit`, which uses the name and email set in your `user.name` and `user.email` git configs. +### Private Sign off + +If you would like to provide your legal name privately to the Matrix.org +Foundation (instead of in a public commit or comment), you can do so +by emailing your legal name and a link to the pull request to +[dco@matrix.org](mailto:dco@matrix.org?subject=Private%20sign%20off). +It helps to include "sign off" or similar in the subject line. You will then +be instructed further. + +Once private sign off is complete, doing so for future contributions will not +be required. # 10. Turn feedback into better code. From bc9dff1d9597251a15a15475cb8e8194b2d14910 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Mar 2022 07:06:21 -0500 Subject: [PATCH 389/474] Remove unnecessary pass statements. (#12206) --- changelog.d/12206.misc | 1 + synapse/handlers/device.py | 2 -- synapse/handlers/presence.py | 2 -- synapse/http/matrixfederationclient.py | 2 -- synapse/http/server.py | 1 - synapse/rest/media/v1/_base.py | 1 - synapse/server.py | 1 - synapse/storage/databases/main/registration.py | 2 -- synapse/storage/schema/main/delta/30/as_users.py | 1 - synapse/util/caches/treecache.py | 2 -- tests/handlers/test_password_providers.py | 1 - 11 files changed, 1 insertion(+), 15 deletions(-) create mode 100644 changelog.d/12206.misc diff --git a/changelog.d/12206.misc b/changelog.d/12206.misc new file mode 100644 index 000000000000..df59bb56cdb8 --- /dev/null +++ b/changelog.d/12206.misc @@ -0,0 +1 @@ +Remove unnecessary `pass` statements. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d90cb259a65c..d5ccaa0c37cc 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -371,7 +371,6 @@ async def delete_device(self, user_id: str, device_id: str) -> None: log_kv( {"reason": "User doesn't have device id.", "device_id": device_id} ) - pass else: raise @@ -414,7 +413,6 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: # no match set_tag("error", True) set_tag("reason", "User doesn't have that device id.") - pass else: raise diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 9927a30e6ed5..34d9411bbf61 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -267,7 +267,6 @@ async def update_external_syncs_row( is_syncing: Whether or not the user is now syncing sync_time_msec: Time in ms when the user was last syncing """ - pass async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process @@ -277,7 +276,6 @@ async def update_external_syncs_clear(self, process_id: str) -> None: This is a no-op when presence is handled by a different worker. """ - pass async def process_replication_rows( self, stream_name: str, instance_name: str, token: int, rows: list diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 40bf1e06d602..6b98d865f5bb 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -120,7 +120,6 @@ def finish(self) -> T: """Called when response has finished streaming and the parser should return the final result (or error). """ - pass @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -601,7 +600,6 @@ async def _send_request( response.code, response_phrase, ) - pass else: logger.info( "{%s} [%s] Got response headers: %d %s", diff --git a/synapse/http/server.py b/synapse/http/server.py index 09b412548968..31ca84188975 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -233,7 +233,6 @@ def register_paths( servlet_classname (str): The name of the handler to be used in prometheus and opentracing logs. """ - pass class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 9b40fd8a6c23..c35d42fab89d 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -298,7 +298,6 @@ def write_to_consumer(self, consumer: IConsumer) -> Awaitable: Returns: Resolves once the response has finished being written """ - pass def __enter__(self) -> None: pass diff --git a/synapse/server.py b/synapse/server.py index 7741ff29dc3f..2fcf18a7a69a 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -328,7 +328,6 @@ def start_listening(self) -> None: Does nothing in this base class; overridden in derived classes to start the appropriate listeners. """ - pass def setup_background_tasks(self) -> None: """ diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index dc6665237abf..a698d10cc535 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -48,8 +48,6 @@ class ExternalIDReuseException(Exception): """Exception if writing an external id for a user fails, because this external id is given to an other user.""" - pass - @attr.s(frozen=True, slots=True, auto_attribs=True) class TokenLookupResult: diff --git a/synapse/storage/schema/main/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py index 22a7901e15df..4b4b166e37a6 100644 --- a/synapse/storage/schema/main/delta/30/as_users.py +++ b/synapse/storage/schema/main/delta/30/as_users.py @@ -36,7 +36,6 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): config_files = config.appservice.app_service_config_files except AttributeError: logger.warning("Could not get app_service_config_files from config") - pass appservices = load_appservices(config.server.server_name, config_files) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 563845f86769..e78305f78746 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -22,8 +22,6 @@ class TreeCacheNode(dict): leaves. """ - pass - class TreeCache: """ diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 49d832de814d..d401fda93855 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -124,7 +124,6 @@ def __init__(self, config, api: ModuleApi): ("m.login.password", ("password",)): self.check_auth, } ) - pass def check_auth(self, *args): return mock_password_provider.check_auth(*args) From e10a2fe0c28ec9206c0e2275df492f61ff5025f2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 11 Mar 2022 07:07:15 -0500 Subject: [PATCH 390/474] Add some type hints to the tests.handlers module. (#12207) --- changelog.d/12108.misc | 2 +- changelog.d/12146.misc | 2 +- changelog.d/12207.misc | 1 + tests/handlers/test_admin.py | 18 +++--- tests/handlers/test_auth.py | 34 +++++----- tests/handlers/test_deactivate_account.py | 2 +- tests/handlers/test_device.py | 76 ++++++++++++----------- 7 files changed, 74 insertions(+), 61 deletions(-) create mode 100644 changelog.d/12207.misc diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc index 0360dbd61edc..b67a701dbb52 100644 --- a/changelog.d/12108.misc +++ b/changelog.d/12108.misc @@ -1 +1 @@ -Add type hints to `tests/rest/client`. +Add type hints to tests files. diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc index 3ca7c47212fd..b67a701dbb52 100644 --- a/changelog.d/12146.misc +++ b/changelog.d/12146.misc @@ -1 +1 @@ -Add type hints to `tests/rest`. +Add type hints to tests files. diff --git a/changelog.d/12207.misc b/changelog.d/12207.misc new file mode 100644 index 000000000000..b67a701dbb52 --- /dev/null +++ b/changelog.d/12207.misc @@ -0,0 +1 @@ +Add type hints to tests files. diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index abf2a0fe0dc5..c1579dac610f 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -15,11 +15,15 @@ from collections import Counter from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin import synapse.storage from synapse.api.constants import EventTypes, JoinRules from synapse.api.room_versions import RoomVersions from synapse.rest.client import knock, login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -32,7 +36,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): knock.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_handler = hs.get_admin_handler() self.user1 = self.register_user("user1", "password") @@ -41,7 +45,7 @@ def prepare(self, reactor, clock, hs): self.user2 = self.register_user("user2", "password") self.token2 = self.login("user2", "password") - def test_single_public_joined_room(self): + def test_single_public_joined_room(self) -> None: """Test that we write *all* events for a public room""" room_id = self.helper.create_room_as( self.user1, tok=self.token1, is_public=True @@ -74,7 +78,7 @@ def test_single_public_joined_room(self): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 1) - def test_single_private_joined_room(self): + def test_single_private_joined_room(self) -> None: """Tests that we correctly write state when we can't see all events in a room. """ @@ -112,7 +116,7 @@ def test_single_private_joined_room(self): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 1) - def test_single_left_room(self): + def test_single_left_room(self) -> None: """Tests that we don't see events in the room after we leave.""" room_id = self.helper.create_room_as(self.user1, tok=self.token1) self.helper.send(room_id, body="Hello!", tok=self.token1) @@ -144,7 +148,7 @@ def test_single_left_room(self): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 2) - def test_single_left_rejoined_private_room(self): + def test_single_left_rejoined_private_room(self) -> None: """Tests that see the correct events in private rooms when we repeatedly join and leave. """ @@ -185,7 +189,7 @@ def test_single_left_rejoined_private_room(self): self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 3) - def test_invite(self): + def test_invite(self) -> None: """Tests that pending invites get handled correctly.""" room_id = self.helper.create_room_as(self.user1, tok=self.token1) self.helper.send(room_id, body="Hello!", tok=self.token1) @@ -204,7 +208,7 @@ def test_invite(self): self.assertEqual(args[1].content["membership"], "invite") self.assertTrue(args[2]) # Assert there is at least one bit of state - def test_knock(self): + def test_knock(self) -> None: """Tests that knock get handled correctly.""" # create a knockable v7 room room_id = self.helper.create_room_as( diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 0c6e55e72592..67a7829769b6 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -15,8 +15,12 @@ import pymacaroons +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.errors import AuthError, ResourceLimitError from synapse.rest import admin +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -27,7 +31,7 @@ class AuthTestCase(unittest.HomeserverTestCase): admin.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.auth_handler = hs.get_auth_handler() self.macaroon_generator = hs.get_macaroon_generator() @@ -42,23 +46,23 @@ def prepare(self, reactor, clock, hs): self.user1 = self.register_user("a_user", "pass") - def test_macaroon_caveats(self): + def test_macaroon_caveats(self) -> None: token = self.macaroon_generator.generate_guest_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) - def verify_gen(caveat): + def verify_gen(caveat: str) -> bool: return caveat == "gen = 1" - def verify_user(caveat): + def verify_user(caveat: str) -> bool: return caveat == "user_id = a_user" - def verify_type(caveat): + def verify_type(caveat: str) -> bool: return caveat == "type = access" - def verify_nonce(caveat): + def verify_nonce(caveat: str) -> bool: return caveat.startswith("nonce =") - def verify_guest(caveat): + def verify_guest(caveat: str) -> bool: return caveat == "guest = true" v = pymacaroons.Verifier() @@ -69,7 +73,7 @@ def verify_guest(caveat): v.satisfy_general(verify_guest) v.verify(macaroon, self.hs.config.key.macaroon_secret_key) - def test_short_term_login_token_gives_user_id(self): + def test_short_term_login_token_gives_user_id(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) @@ -84,7 +88,7 @@ def test_short_term_login_token_gives_user_id(self): AuthError, ) - def test_short_term_login_token_gives_auth_provider(self): + def test_short_term_login_token_gives_auth_provider(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, auth_provider_id="my_idp" ) @@ -92,7 +96,7 @@ def test_short_term_login_token_gives_auth_provider(self): self.assertEqual(self.user1, res.user_id) self.assertEqual("my_idp", res.auth_provider_id) - def test_short_term_login_token_cannot_replace_user_id(self): + def test_short_term_login_token_cannot_replace_user_id(self) -> None: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) @@ -112,7 +116,7 @@ def test_short_term_login_token_cannot_replace_user_id(self): AuthError, ) - def test_mau_limits_disabled(self): + def test_mau_limits_disabled(self) -> None: self.auth_blocking._limit_usage_by_mau = False # Ensure does not throw exception self.get_success( @@ -127,7 +131,7 @@ def test_mau_limits_disabled(self): ) ) - def test_mau_limits_exceeded_large(self): + def test_mau_limits_exceeded_large(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastores().main.get_monthly_active_count = Mock( return_value=make_awaitable(self.large_number_of_users) @@ -150,7 +154,7 @@ def test_mau_limits_exceeded_large(self): ResourceLimitError, ) - def test_mau_limits_parity(self): + def test_mau_limits_parity(self) -> None: # Ensure we're not at the unix epoch. self.reactor.advance(1) self.auth_blocking._limit_usage_by_mau = True @@ -189,7 +193,7 @@ def test_mau_limits_parity(self): ) ) - def test_mau_limits_not_exceeded(self): + def test_mau_limits_not_exceeded(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.hs.get_datastores().main.get_monthly_active_count = Mock( @@ -211,7 +215,7 @@ def test_mau_limits_not_exceeded(self): ) ) - def _get_macaroon(self): + def _get_macaroon(self) -> pymacaroons.Macaroon: token = self.macaroon_generator.generate_short_term_login_token( self.user1, "", duration_in_ms=5000 ) diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index ddda36c5a93b..3a107912265e 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -39,7 +39,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user = self.register_user("user", "pass") self.token = self.login("user", "pass") - def _deactivate_my_account(self): + def _deactivate_my_account(self) -> None: """ Deactivates the account `self.user` using `self.token` and asserts that it returns a 200 success code. diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 683677fd0770..01ea7d2a4281 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -14,9 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import synapse.api.errors -import synapse.handlers.device -import synapse.storage +from typing import Optional + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.errors import NotFoundError, SynapseError +from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -25,28 +30,27 @@ class DeviceTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver("server", federation_http_client=None) self.handler = hs.get_device_handler() self.store = hs.get_datastores().main return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # These tests assume that it starts 1000 seconds in. self.reactor.advance(1000) - def test_device_is_created_with_invalid_name(self): + def test_device_is_created_with_invalid_name(self) -> None: self.get_failure( self.handler.check_device_registered( user_id="@boris:foo", device_id="foo", - initial_device_display_name="a" - * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1), + initial_device_display_name="a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1), ), - synapse.api.errors.SynapseError, + SynapseError, ) - def test_device_is_created_if_doesnt_exist(self): + def test_device_is_created_if_doesnt_exist(self) -> None: res = self.get_success( self.handler.check_device_registered( user_id="@boris:foo", @@ -59,7 +63,7 @@ def test_device_is_created_if_doesnt_exist(self): dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - def test_device_is_preserved_if_exists(self): + def test_device_is_preserved_if_exists(self) -> None: res1 = self.get_success( self.handler.check_device_registered( user_id="@boris:foo", @@ -81,7 +85,7 @@ def test_device_is_preserved_if_exists(self): dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) self.assertEqual(dev["display_name"], "display name") - def test_device_id_is_made_up_if_unspecified(self): + def test_device_id_is_made_up_if_unspecified(self) -> None: device_id = self.get_success( self.handler.check_device_registered( user_id="@theresa:foo", @@ -93,7 +97,7 @@ def test_device_id_is_made_up_if_unspecified(self): dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id)) self.assertEqual(dev["display_name"], "display") - def test_get_devices_by_user(self): + def test_get_devices_by_user(self) -> None: self._record_users() res = self.get_success(self.handler.get_devices_by_user(user1)) @@ -131,7 +135,7 @@ def test_get_devices_by_user(self): device_map["abc"], ) - def test_get_device(self): + def test_get_device(self) -> None: self._record_users() res = self.get_success(self.handler.get_device(user1, "abc")) @@ -146,21 +150,19 @@ def test_get_device(self): res, ) - def test_delete_device(self): + def test_delete_device(self) -> None: self._record_users() # delete the device self.get_success(self.handler.delete_device(user1, "abc")) # check the device was deleted - self.get_failure( - self.handler.get_device(user1, "abc"), synapse.api.errors.NotFoundError - ) + self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError) # we'd like to check the access token was invalidated, but that's a # bit of a PITA. - def test_delete_device_and_device_inbox(self): + def test_delete_device_and_device_inbox(self) -> None: self._record_users() # add an device_inbox @@ -191,7 +193,7 @@ def test_delete_device_and_device_inbox(self): ) self.assertIsNone(res) - def test_update_device(self): + def test_update_device(self) -> None: self._record_users() update = {"display_name": "new display"} @@ -200,32 +202,29 @@ def test_update_device(self): res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "new display") - def test_update_device_too_long_display_name(self): + def test_update_device_too_long_display_name(self) -> None: """Update a device with a display name that is invalid (too long).""" self._record_users() # Request to update a device display name with a new value that is longer than allowed. - update = { - "display_name": "a" - * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1) - } + update = {"display_name": "a" * (MAX_DEVICE_DISPLAY_NAME_LEN + 1)} self.get_failure( self.handler.update_device(user1, "abc", update), - synapse.api.errors.SynapseError, + SynapseError, ) # Ensure the display name was not updated. res = self.get_success(self.handler.get_device(user1, "abc")) self.assertEqual(res["display_name"], "display 2") - def test_update_unknown_device(self): + def test_update_unknown_device(self) -> None: update = {"display_name": "new_display"} self.get_failure( self.handler.update_device("user_id", "unknown_device_id", update), - synapse.api.errors.NotFoundError, + NotFoundError, ) - def _record_users(self): + def _record_users(self) -> None: # check this works for both devices which have a recorded client_ip, # and those which don't. self._record_user(user1, "xyz", "display 0") @@ -238,8 +237,13 @@ def _record_users(self): self.reactor.advance(10000) def _record_user( - self, user_id, device_id, display_name, access_token=None, ip=None - ): + self, + user_id: str, + device_id: str, + display_name: str, + access_token: Optional[str] = None, + ip: Optional[str] = None, + ) -> None: device_id = self.get_success( self.handler.check_device_registered( user_id=user_id, @@ -248,7 +252,7 @@ def _record_user( ) ) - if ip is not None: + if access_token is not None and ip is not None: self.get_success( self.store.insert_client_ip( user_id, access_token, ip, "user_agent", device_id @@ -258,7 +262,7 @@ def _record_user( class DehydrationTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver("server", federation_http_client=None) self.handler = hs.get_device_handler() self.registration = hs.get_registration_handler() @@ -266,7 +270,7 @@ def make_homeserver(self, reactor, clock): self.store = hs.get_datastores().main return hs - def test_dehydrate_and_rehydrate_device(self): + def test_dehydrate_and_rehydrate_device(self) -> None: user_id = "@boris:dehydration" self.get_success(self.store.register_user(user_id, "foobar")) @@ -303,7 +307,7 @@ def test_dehydrate_and_rehydrate_device(self): access_token=access_token, device_id="not the right device ID", ), - synapse.api.errors.NotFoundError, + NotFoundError, ) # dehydrating the right devices should succeed and change our device ID @@ -331,7 +335,7 @@ def test_dehydrate_and_rehydrate_device(self): # make sure that the device ID that we were initially assigned no longer exists self.get_failure( self.handler.get_device(user_id, device_id), - synapse.api.errors.NotFoundError, + NotFoundError, ) # make sure that there's no device available for dehydrating now From 32c828d0f760492711a98b11376e229d795fd1b3 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 11 Mar 2022 13:42:22 +0100 Subject: [PATCH 391/474] Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke --- changelog.d/12208.misc | 1 + mypy.ini | 1 - tests/rest/client/test_transactions.py | 19 +++- tests/rest/media/v1/test_media_storage.py | 110 +++++++++++++--------- tests/rest/media/v1/test_url_preview.py | 83 ++++++++-------- 5 files changed, 129 insertions(+), 85 deletions(-) create mode 100644 changelog.d/12208.misc diff --git a/changelog.d/12208.misc b/changelog.d/12208.misc new file mode 100644 index 000000000000..c5b635679931 --- /dev/null +++ b/changelog.d/12208.misc @@ -0,0 +1 @@ +Add type hints to tests files. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index c8390ddba96d..f9c39fcaaee3 100644 --- a/mypy.ini +++ b/mypy.ini @@ -90,7 +90,6 @@ exclude = (?x) |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py |tests/rest/media/v1/test_media_storage.py - |tests/rest/media/v1/test_url_preview.py |tests/scripts/test_new_matrix_user.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 3b5747cb12b8..8d8251b2ac99 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -1,3 +1,18 @@ +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from http import HTTPStatus from unittest.mock import Mock, call from twisted.internet import defer, reactor @@ -11,14 +26,14 @@ class HttpTransactionCacheTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.clock = MockClock() self.hs = Mock() self.hs.get_clock = Mock(return_value=self.clock) self.hs.get_auth = Mock() self.cache = HttpTransactionCache(self.hs) - self.mock_http_response = (200, "GOOD JOB!") + self.mock_http_response = (HTTPStatus.OK, "GOOD JOB!") self.mock_key = "foo" @defer.inlineCallbacks diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index cba9be17c4ca..7204b2dfe075 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -16,7 +16,7 @@ import tempfile from binascii import unhexlify from io import BytesIO -from typing import Optional +from typing import Any, BinaryIO, Dict, List, Optional, Union from unittest.mock import Mock from urllib import parse @@ -26,18 +26,24 @@ from twisted.internet import defer from twisted.internet.defer import Deferred +from twisted.test.proto_helpers import MemoryReactor +from synapse.events import EventBase from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.logging.context import make_deferred_yieldable +from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.filepath import MediaFilePaths -from synapse.rest.media.v1.media_storage import MediaStorage +from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend +from synapse.server import HomeServer +from synapse.types import RoomAlias +from synapse.util import Clock from tests import unittest -from tests.server import FakeSite, make_request +from tests.server import FakeChannel, FakeSite, make_request from tests.test_utils import SMALL_PNG from tests.utils import default_config @@ -46,7 +52,7 @@ class MediaStorageTests(unittest.HomeserverTestCase): needs_threadpool = True - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-") self.addCleanup(shutil.rmtree, self.test_dir) @@ -62,7 +68,7 @@ def prepare(self, reactor, clock, hs): hs, self.primary_base_path, self.filepaths, storage_providers ) - def test_ensure_media_is_in_local_cache(self): + def test_ensure_media_is_in_local_cache(self) -> None: media_id = "some_media_id" test_body = "Test\n" @@ -105,7 +111,7 @@ def test_ensure_media_is_in_local_cache(self): self.assertEqual(test_body, body) -@attr.s(slots=True, frozen=True) +@attr.s(auto_attribs=True, slots=True, frozen=True) class _TestImage: """An image for testing thumbnailing with the expected results @@ -121,18 +127,18 @@ class _TestImage: a 404 is expected. """ - data = attr.ib(type=bytes) - content_type = attr.ib(type=bytes) - extension = attr.ib(type=bytes) - expected_cropped = attr.ib(type=Optional[bytes], default=None) - expected_scaled = attr.ib(type=Optional[bytes], default=None) - expected_found = attr.ib(default=True, type=bool) + data: bytes + content_type: bytes + extension: bytes + expected_cropped: Optional[bytes] = None + expected_scaled: Optional[bytes] = None + expected_found: bool = True @parameterized_class( ("test_image",), [ - # smoll png + # small png ( _TestImage( SMALL_PNG, @@ -193,11 +199,17 @@ class MediaRepoTests(unittest.HomeserverTestCase): hijack_auth = True user_id = "@test:user" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.fetches = [] - def get_file(destination, path, output_stream, args=None, max_size=None): + def get_file( + destination: str, + path: str, + output_stream: BinaryIO, + args: Optional[Dict[str, Union[str, List[str]]]] = None, + max_size: Optional[int] = None, + ) -> Deferred: """ Returns tuple[int,dict,str,int] of file length, response headers, absolute URI, and response code. @@ -238,7 +250,7 @@ def write_to(r): return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: media_resource = hs.get_media_repository_resource() self.download_resource = media_resource.children[b"download"] @@ -248,8 +260,9 @@ def prepare(self, reactor, clock, hs): self.media_id = "example.com/12345" - def _req(self, content_disposition, include_content_type=True): - + def _req( + self, content_disposition: Optional[bytes], include_content_type: bool = True + ) -> FakeChannel: channel = make_request( self.reactor, FakeSite(self.download_resource, self.reactor), @@ -288,7 +301,7 @@ def _req(self, content_disposition, include_content_type=True): return channel - def test_handle_missing_content_type(self): + def test_handle_missing_content_type(self) -> None: channel = self._req( b"inline; filename=out" + self.test_image.extension, include_content_type=False, @@ -299,7 +312,7 @@ def test_handle_missing_content_type(self): headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"] ) - def test_disposition_filename_ascii(self): + def test_disposition_filename_ascii(self) -> None: """ If the filename is filename= then Synapse will decode it as an ASCII string, and use filename= in the response. @@ -315,7 +328,7 @@ def test_disposition_filename_ascii(self): [b"inline; filename=out" + self.test_image.extension], ) - def test_disposition_filenamestar_utf8escaped(self): + def test_disposition_filenamestar_utf8escaped(self) -> None: """ If the filename is filename=*utf8'' then Synapse will correctly decode it as the UTF-8 string, and use filename* in the @@ -335,7 +348,7 @@ def test_disposition_filenamestar_utf8escaped(self): [b"inline; filename*=utf-8''" + filename + self.test_image.extension], ) - def test_disposition_none(self): + def test_disposition_none(self) -> None: """ If there is no filename, one isn't passed on in the Content-Disposition of the request. @@ -348,26 +361,26 @@ def test_disposition_none(self): ) self.assertEqual(headers.getRawHeaders(b"Content-Disposition"), None) - def test_thumbnail_crop(self): + def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" self._test_thumbnail( "crop", self.test_image.expected_cropped, self.test_image.expected_found ) - def test_thumbnail_scale(self): + def test_thumbnail_scale(self) -> None: """Test that a scaled remote thumbnail is available.""" self._test_thumbnail( "scale", self.test_image.expected_scaled, self.test_image.expected_found ) - def test_invalid_type(self): + def test_invalid_type(self) -> None: """An invalid thumbnail type is never available.""" self._test_thumbnail("invalid", None, False) @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} ) - def test_no_thumbnail_crop(self): + def test_no_thumbnail_crop(self) -> None: """ Override the config to generate only scaled thumbnails, but request a cropped one. """ @@ -376,13 +389,13 @@ def test_no_thumbnail_crop(self): @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} ) - def test_no_thumbnail_scale(self): + def test_no_thumbnail_scale(self) -> None: """ Override the config to generate only cropped thumbnails, but request a scaled one. """ self._test_thumbnail("scale", None, False) - def test_thumbnail_repeated_thumbnail(self): + def test_thumbnail_repeated_thumbnail(self) -> None: """Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. """ @@ -443,7 +456,9 @@ def test_thumbnail_repeated_thumbnail(self): channel.result["body"], ) - def _test_thumbnail(self, method, expected_body, expected_found): + def _test_thumbnail( + self, method: str, expected_body: Optional[bytes], expected_found: bool + ) -> None: params = "?width=32&height=32&method=" + method channel = make_request( self.reactor, @@ -485,7 +500,7 @@ def _test_thumbnail(self, method, expected_body, expected_found): ) @parameterized.expand([("crop", 16), ("crop", 64), ("scale", 16), ("scale", 64)]) - def test_same_quality(self, method, desired_size): + def test_same_quality(self, method: str, desired_size: int) -> None: """Test that choosing between thumbnails with the same quality rating succeeds. We are not particular about which thumbnail is chosen.""" @@ -521,7 +536,7 @@ def test_same_quality(self, method, desired_size): ) ) - def test_x_robots_tag_header(self): + def test_x_robots_tag_header(self) -> None: """ Tests that the `X-Robots-Tag` header is present, which informs web crawlers to not index, archive, or follow links in media. @@ -540,29 +555,38 @@ class TestSpamChecker: `evil`. """ - def __init__(self, config, api): + def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None: self.config = config self.api = api - def parse_config(config): + def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: return config - async def check_event_for_spam(self, foo): + async def check_event_for_spam(self, event: EventBase) -> Union[bool, str]: return False # allow all events - async def user_may_invite(self, inviter_userid, invitee_userid, room_id): + async def user_may_invite( + self, + inviter_userid: str, + invitee_userid: str, + room_id: str, + ) -> bool: return True # allow all invites - async def user_may_create_room(self, userid): + async def user_may_create_room(self, userid: str) -> bool: return True # allow all room creations - async def user_may_create_room_alias(self, userid, room_alias): + async def user_may_create_room_alias( + self, userid: str, room_alias: RoomAlias + ) -> bool: return True # allow all room aliases - async def user_may_publish_room(self, userid, room_id): + async def user_may_publish_room(self, userid: str, room_id: str) -> bool: return True # allow publishing of all rooms - async def check_media_file_for_spam(self, file_wrapper, file_info) -> bool: + async def check_media_file_for_spam( + self, file_wrapper: ReadableFileWrapper, file_info: FileInfo + ) -> bool: buf = BytesIO() await file_wrapper.write_chunks_to(buf.write) @@ -575,7 +599,7 @@ class SpamCheckerTestCase(unittest.HomeserverTestCase): admin.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user = self.register_user("user", "pass") self.tok = self.login("user", "pass") @@ -586,7 +610,7 @@ def prepare(self, reactor, clock, hs): load_legacy_spam_checkers(hs) - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = default_config("test") config.update( @@ -602,13 +626,13 @@ def default_config(self): return config - def test_upload_innocent(self): + def test_upload_innocent(self) -> None: """Attempt to upload some innocent data that should be allowed.""" self.helper.upload_media( self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200 ) - def test_upload_ban(self): + def test_upload_ban(self) -> None: """Attempt to upload some data that includes bytes "evil", which should get rejected by the spam checker. """ diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index da2c53326019..5148c39874e2 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -16,16 +16,21 @@ import json import os import re +from typing import Any, Dict, Optional, Sequence, Tuple, Type from urllib.parse import urlencode from twisted.internet._resolver import HostResolution from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.error import DNSLookupError -from twisted.test.proto_helpers import AccumulatingProtocol +from twisted.internet.interfaces import IAddress, IResolutionReceiver +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from synapse.config.oembed import OEmbedEndpointConfig +from synapse.rest.media.v1.media_repository import MediaRepositoryResource from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS +from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util import Clock from synapse.util.stringutils import parse_and_validate_mxc_uri from tests import unittest @@ -52,7 +57,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): b"" ) - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["url_preview_enabled"] = True @@ -113,22 +118,22 @@ def make_homeserver(self, reactor, clock): return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.media_repo = hs.get_media_repository_resource() self.preview_url = self.media_repo.children[b"preview_url"] - self.lookups = {} + self.lookups: Dict[str, Any] = {} class Resolver: def resolveHostName( _self, - resolutionReceiver, - hostName, - portNumber=0, - addressTypes=None, - transportSemantics="TCP", - ): + resolutionReceiver: IResolutionReceiver, + hostName: str, + portNumber: int = 0, + addressTypes: Optional[Sequence[Type[IAddress]]] = None, + transportSemantics: str = "TCP", + ) -> IResolutionReceiver: resolution = HostResolution(hostName) resolutionReceiver.resolutionBegan(resolution) @@ -140,9 +145,9 @@ def resolveHostName( resolutionReceiver.resolutionComplete() return resolutionReceiver - self.reactor.nameResolver = Resolver() + self.reactor.nameResolver = Resolver() # type: ignore[assignment] - def create_test_resource(self): + def create_test_resource(self) -> MediaRepositoryResource: return self.hs.get_media_repository_resource() def _assert_small_png(self, json_body: JsonDict) -> None: @@ -153,7 +158,7 @@ def _assert_small_png(self, json_body: JsonDict) -> None: self.assertEqual(json_body["og:image:type"], "image/png") self.assertEqual(json_body["matrix:image:size"], 67) - def test_cache_returns_correct_type(self): + def test_cache_returns_correct_type(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] channel = self.make_request( @@ -207,7 +212,7 @@ def test_cache_returns_correct_type(self): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_non_ascii_preview_httpequiv(self): + def test_non_ascii_preview_httpequiv(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = ( @@ -243,7 +248,7 @@ def test_non_ascii_preview_httpequiv(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430") - def test_video_rejected(self): + def test_video_rejected(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = b"anything" @@ -279,7 +284,7 @@ def test_video_rejected(self): }, ) - def test_audio_rejected(self): + def test_audio_rejected(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = b"anything" @@ -315,7 +320,7 @@ def test_audio_rejected(self): }, ) - def test_non_ascii_preview_content_type(self): + def test_non_ascii_preview_content_type(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = ( @@ -350,7 +355,7 @@ def test_non_ascii_preview_content_type(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430") - def test_overlong_title(self): + def test_overlong_title(self) -> None: self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] end_content = ( @@ -387,7 +392,7 @@ def test_overlong_title(self): # We should only see the `og:description` field, as `title` is too long and should be stripped out self.assertCountEqual(["og:description"], res.keys()) - def test_ipaddr(self): + def test_ipaddr(self) -> None: """ IP addresses can be previewed directly. """ @@ -417,7 +422,7 @@ def test_ipaddr(self): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_specific(self): + def test_blacklisted_ip_specific(self) -> None: """ Blacklisted IP addresses, found via DNS, are not spidered. """ @@ -438,7 +443,7 @@ def test_blacklisted_ip_specific(self): }, ) - def test_blacklisted_ip_range(self): + def test_blacklisted_ip_range(self) -> None: """ Blacklisted IP ranges, IPs found over DNS, are not spidered. """ @@ -457,7 +462,7 @@ def test_blacklisted_ip_range(self): }, ) - def test_blacklisted_ip_specific_direct(self): + def test_blacklisted_ip_specific_direct(self) -> None: """ Blacklisted IP addresses, accessed directly, are not spidered. """ @@ -476,7 +481,7 @@ def test_blacklisted_ip_specific_direct(self): ) self.assertEqual(channel.code, 403) - def test_blacklisted_ip_range_direct(self): + def test_blacklisted_ip_range_direct(self) -> None: """ Blacklisted IP ranges, accessed directly, are not spidered. """ @@ -493,7 +498,7 @@ def test_blacklisted_ip_range_direct(self): }, ) - def test_blacklisted_ip_range_whitelisted_ip(self): + def test_blacklisted_ip_range_whitelisted_ip(self) -> None: """ Blacklisted but then subsequently whitelisted IP addresses can be spidered. @@ -526,7 +531,7 @@ def test_blacklisted_ip_range_whitelisted_ip(self): channel.json_body, {"og:title": "~matrix~", "og:description": "hi"} ) - def test_blacklisted_ip_with_external_ip(self): + def test_blacklisted_ip_with_external_ip(self) -> None: """ If a hostname resolves a blacklisted IP, even if there's a non-blacklisted one, it will be rejected. @@ -549,7 +554,7 @@ def test_blacklisted_ip_with_external_ip(self): }, ) - def test_blacklisted_ipv6_specific(self): + def test_blacklisted_ipv6_specific(self) -> None: """ Blacklisted IP addresses, found via DNS, are not spidered. """ @@ -572,7 +577,7 @@ def test_blacklisted_ipv6_specific(self): }, ) - def test_blacklisted_ipv6_range(self): + def test_blacklisted_ipv6_range(self) -> None: """ Blacklisted IP ranges, IPs found over DNS, are not spidered. """ @@ -591,7 +596,7 @@ def test_blacklisted_ipv6_range(self): }, ) - def test_OPTIONS(self): + def test_OPTIONS(self) -> None: """ OPTIONS returns the OPTIONS. """ @@ -601,7 +606,7 @@ def test_OPTIONS(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {}) - def test_accept_language_config_option(self): + def test_accept_language_config_option(self) -> None: """ Accept-Language header is sent to the remote server """ @@ -652,7 +657,7 @@ def test_accept_language_config_option(self): server.data, ) - def test_data_url(self): + def test_data_url(self) -> None: """ Requesting to preview a data URL is not supported. """ @@ -675,7 +680,7 @@ def test_data_url(self): self.assertEqual(channel.code, 500) - def test_inline_data_url(self): + def test_inline_data_url(self) -> None: """ An inline image (as a data URL) should be parsed properly. """ @@ -712,7 +717,7 @@ def test_inline_data_url(self): self.assertEqual(channel.code, 200) self._assert_small_png(channel.json_body) - def test_oembed_photo(self): + def test_oembed_photo(self) -> None: """Test an oEmbed endpoint which returns a 'photo' type which redirects the preview to a new URL.""" self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")] self.lookups["cdn.twitter.com"] = [(IPv4Address, "10.1.2.3")] @@ -771,7 +776,7 @@ def test_oembed_photo(self): self.assertEqual(body["og:url"], "http://twitter.com/matrixdotorg/status/12345") self._assert_small_png(body) - def test_oembed_rich(self): + def test_oembed_rich(self) -> None: """Test an oEmbed endpoint which returns HTML content via the 'rich' type.""" self.lookups["publish.twitter.com"] = [(IPv4Address, "10.1.2.3")] @@ -817,7 +822,7 @@ def test_oembed_rich(self): }, ) - def test_oembed_format(self): + def test_oembed_format(self) -> None: """Test an oEmbed endpoint which requires the format in the URL.""" self.lookups["www.hulu.com"] = [(IPv4Address, "10.1.2.3")] @@ -866,7 +871,7 @@ def test_oembed_format(self): }, ) - def test_oembed_autodiscovery(self): + def test_oembed_autodiscovery(self) -> None: """ Autodiscovery works by finding the link in the HTML response and then requesting an oEmbed URL. 1. Request a preview of a URL which is not known to the oEmbed code. @@ -962,7 +967,7 @@ def test_oembed_autodiscovery(self): ) self._assert_small_png(body) - def _download_image(self): + def _download_image(self) -> Tuple[str, str]: """Downloads an image into the URL cache. Returns: A (host, media_id) tuple representing the MXC URI of the image. @@ -995,7 +1000,7 @@ def _download_image(self): self.assertIsNone(_port) return host, media_id - def test_storage_providers_exclude_files(self): + def test_storage_providers_exclude_files(self) -> None: """Test that files are not stored in or fetched from storage providers.""" host, media_id = self._download_image() @@ -1037,7 +1042,7 @@ def test_storage_providers_exclude_files(self): "URL cache file was unexpectedly retrieved from a storage provider", ) - def test_storage_providers_exclude_thumbnails(self): + def test_storage_providers_exclude_thumbnails(self) -> None: """Test that thumbnails are not stored in or fetched from storage providers.""" host, media_id = self._download_image() @@ -1090,7 +1095,7 @@ def test_storage_providers_exclude_thumbnails(self): "URL cache thumbnail was unexpectedly retrieved from a storage provider", ) - def test_cache_expiry(self): + def test_cache_expiry(self) -> None: """Test that URL cache files and thumbnails are cleaned up properly on expiry.""" self.preview_url.clock = MockClock() From 003cc6910af177fec86ae7f43683d146975c7f4b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 11 Mar 2022 14:20:00 +0100 Subject: [PATCH 392/474] Update the SSO username picker template to comply with SIWA guidelines (#12210) Fixes https://github.com/matrix-org/synapse/issues/12205 --- changelog.d/12210.misc | 1 + docs/sample_config.yaml | 9 +++++++-- docs/templates.md | 7 +++++-- synapse/config/oidc.py | 9 +++++++-- synapse/handlers/oidc.py | 12 +++++++++++- synapse/handlers/sso.py | 8 +++++--- synapse/res/templates/sso_auth_account_details.html | 6 +++--- synapse/rest/synapse/client/pick_username.py | 8 ++++++++ 8 files changed, 47 insertions(+), 13 deletions(-) create mode 100644 changelog.d/12210.misc diff --git a/changelog.d/12210.misc b/changelog.d/12210.misc new file mode 100644 index 000000000000..3f6a8747c256 --- /dev/null +++ b/changelog.d/12210.misc @@ -0,0 +1 @@ +Update the SSO username picker template to comply with SIWA guidelines. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6f3623c88ab9..ef25a3175f98 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1947,8 +1947,13 @@ saml2_config: # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their -# own username (see 'sso_auth_account_details.html' in the 'sso' -# section of this file). +# own username (see the documentation for the +# 'sso_auth_account_details.html' template). +# +# confirm_localpart: Whether to prompt the user to validate (or +# change) the generated localpart (see the documentation for the +# 'sso_auth_account_details.html' template), instead of +# registering the account right away. # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. diff --git a/docs/templates.md b/docs/templates.md index 2b66e9d86294..b251d05cb9eb 100644 --- a/docs/templates.md +++ b/docs/templates.md @@ -176,8 +176,11 @@ Below are the templates Synapse will look for when generating pages related to S for the brand of the IdP * `user_attributes`: an object containing details about the user that we received from the IdP. May have the following attributes: - * display_name: the user's display_name - * emails: a list of email addresses + * `display_name`: the user's display name + * `emails`: a list of email addresses + * `localpart`: the local part of the Matrix user ID to register, + if `localpart_template` is set in the mapping provider configuration (empty + string if not) The template should render a form which submits the following fields: * `username`: the localpart of the user's chosen user id * `sso_new_user_consent.html`: HTML page allowing the user to consent to the diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index f7e4f9ef22b1..fc95912d9b3f 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -182,8 +182,13 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str # # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their - # own username (see 'sso_auth_account_details.html' in the 'sso' - # section of this file). + # own username (see the documentation for the + # 'sso_auth_account_details.html' template). + # + # confirm_localpart: Whether to prompt the user to validate (or + # change) the generated localpart (see the documentation for the + # 'sso_auth_account_details.html' template), instead of + # registering the account right away. # # display_name_template: Jinja2 template for the display name to set # on first login. If unset, no displayname will be set. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 593a2aac6691..d98659edc761 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -1228,6 +1228,7 @@ class OidcSessionData: class UserAttributeDict(TypedDict): localpart: Optional[str] + confirm_localpart: bool display_name: Optional[str] emails: List[str] @@ -1316,6 +1317,7 @@ class JinjaOidcMappingConfig: display_name_template: Optional[Template] email_template: Optional[Template] extra_attributes: Dict[str, Template] + confirm_localpart: bool = False class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): @@ -1357,12 +1359,17 @@ def parse_template_config(option_name: str) -> Optional[Template]: "invalid jinja template", path=["extra_attributes", key] ) from e + confirm_localpart = config.get("confirm_localpart") or False + if not isinstance(confirm_localpart, bool): + raise ConfigError("must be a bool", path=["confirm_localpart"]) + return JinjaOidcMappingConfig( subject_claim=subject_claim, localpart_template=localpart_template, display_name_template=display_name_template, email_template=email_template, extra_attributes=extra_attributes, + confirm_localpart=confirm_localpart, ) def get_remote_user_id(self, userinfo: UserInfo) -> str: @@ -1398,7 +1405,10 @@ def render_template_field(template: Optional[Template]) -> Optional[str]: emails.append(email) return UserAttributeDict( - localpart=localpart, display_name=display_name, emails=emails + localpart=localpart, + display_name=display_name, + emails=emails, + confirm_localpart=self._config.confirm_localpart, ) async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index ff5b5169cac1..4f02a060d953 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -132,6 +132,7 @@ class UserAttributes: # if `None`, the mapper has not picked a userid, and the user should be prompted to # enter one. localpart: Optional[str] + confirm_localpart: bool = False display_name: Optional[str] = None emails: Collection[str] = attr.Factory(list) @@ -561,9 +562,10 @@ def _get_url_for_next_new_user_step( # Must provide either attributes or session, not both assert (attributes is not None) != (session is not None) - if (attributes and attributes.localpart is None) or ( - session and session.chosen_localpart is None - ): + if ( + attributes + and (attributes.localpart is None or attributes.confirm_localpart is True) + ) or (session and session.chosen_localpart is None): return b"/_synapse/client/pick_username/account_details" elif self._consent_at_registration and not ( session and session.terms_accepted_version diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 00e1dcdbb866..41315e4fd4da 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,15 +130,15 @@
-

Your account is nearly ready

-

Check your details before creating an account on {{ server_name }}

+

Choose your user name

+

This is required to create your account on {{ server_name }}, and you can't change this later.

@
- +
:{{ server_name }}
diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py index 28ae08349705..6338fbaaa96d 100644 --- a/synapse/rest/synapse/client/pick_username.py +++ b/synapse/rest/synapse/client/pick_username.py @@ -92,12 +92,20 @@ async def _async_render_GET(self, request: Request) -> None: self._sso_handler.render_error(request, "bad_session", e.msg, code=e.code) return + # The configuration might mandate going through this step to validate an + # automatically generated localpart, so session.chosen_localpart might already + # be set. + localpart = "" + if session.chosen_localpart is not None: + localpart = session.chosen_localpart + idp_id = session.auth_provider_id template_params = { "idp": self._sso_handler.get_identity_providers()[idp_id], "user_attributes": { "display_name": session.display_name, "emails": session.emails, + "localpart": localpart, }, } From 735e89bd3a0755883ef0a19649adf84192b5d9fc Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Fri, 11 Mar 2022 14:45:26 +0100 Subject: [PATCH 393/474] Add an additional HTTP pusher + push rule tests. (#12188) And rename the field used for caching from _id to _cache_key. --- changelog.d/12188.misc | 1 + synapse/push/baserules.py | 38 ++++++------- synapse/push/bulk_push_rule_evaluator.py | 10 ++-- synapse/push/clientformat.py | 2 +- tests/push/test_http.py | 72 +++++++++++++++++++++++- 5 files changed, 95 insertions(+), 28 deletions(-) create mode 100644 changelog.d/12188.misc diff --git a/changelog.d/12188.misc b/changelog.d/12188.misc new file mode 100644 index 000000000000..403158481cee --- /dev/null +++ b/changelog.d/12188.misc @@ -0,0 +1 @@ +Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 832eaa34e9ef..f42f605f2383 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -169,7 +169,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "content.msgtype", "pattern": "m.notice", - "_id": "_suppress_notices", + "_cache_key": "_suppress_notices", } ], "actions": ["dont_notify"], @@ -183,13 +183,13 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.member", - "_id": "_member", + "_cache_key": "_member", }, { "kind": "event_match", "key": "content.membership", "pattern": "invite", - "_id": "_invite_member", + "_cache_key": "_invite_member", }, {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, ], @@ -212,7 +212,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.member", - "_id": "_member", + "_cache_key": "_member", } ], "actions": ["dont_notify"], @@ -237,12 +237,12 @@ def make_base_prepend_rules( "kind": "event_match", "key": "content.body", "pattern": "@room", - "_id": "_roomnotif_content", + "_cache_key": "_roomnotif_content", }, { "kind": "sender_notification_permission", "key": "room", - "_id": "_roomnotif_pl", + "_cache_key": "_roomnotif_pl", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], @@ -254,13 +254,13 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.tombstone", - "_id": "_tombstone", + "_cache_key": "_tombstone", }, { "kind": "event_match", "key": "state_key", "pattern": "", - "_id": "_tombstone_statekey", + "_cache_key": "_tombstone_statekey", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], @@ -272,7 +272,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.reaction", - "_id": "_reaction", + "_cache_key": "_reaction", } ], "actions": ["dont_notify"], @@ -288,7 +288,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.call.invite", - "_id": "_call", + "_cache_key": "_call", } ], "actions": [ @@ -302,12 +302,12 @@ def make_base_prepend_rules( { "rule_id": "global/underride/.m.rule.room_one_to_one", "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, + {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", "key": "type", "pattern": "m.room.message", - "_id": "_message", + "_cache_key": "_message", }, ], "actions": [ @@ -321,12 +321,12 @@ def make_base_prepend_rules( { "rule_id": "global/underride/.m.rule.encrypted_room_one_to_one", "conditions": [ - {"kind": "room_member_count", "is": "2", "_id": "member_count"}, + {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, { "kind": "event_match", "key": "type", "pattern": "m.room.encrypted", - "_id": "_encrypted", + "_cache_key": "_encrypted", }, ], "actions": [ @@ -342,7 +342,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.message", - "_id": "_message", + "_cache_key": "_message", } ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], @@ -356,7 +356,7 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "m.room.encrypted", - "_id": "_encrypted", + "_cache_key": "_encrypted", } ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], @@ -368,19 +368,19 @@ def make_base_prepend_rules( "kind": "event_match", "key": "type", "pattern": "im.vector.modular.widgets", - "_id": "_type_modular_widgets", + "_cache_key": "_type_modular_widgets", }, { "kind": "event_match", "key": "content.type", "pattern": "jitsi", - "_id": "_content_type_jitsi", + "_cache_key": "_content_type_jitsi", }, { "kind": "event_match", "key": "state_key", "pattern": "*", - "_id": "_is_state_event", + "_cache_key": "_is_state_event", }, ], "actions": ["notify", {"set_tweak": "highlight", "value": False}], diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index fecf86034eb9..8140afcb6b37 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -274,17 +274,17 @@ def _condition_checker( cache: Dict[str, bool], ) -> bool: for cond in conditions: - _id = cond.get("_id", None) - if _id: - res = cache.get(_id, None) + _cache_key = cond.get("_cache_key", None) + if _cache_key: + res = cache.get(_cache_key, None) if res is False: return False elif res is True: continue res = evaluator.matches(cond, uid, display_name) - if _id: - cache[_id] = bool(res) + if _cache_key: + cache[_cache_key] = bool(res) if not res: return False diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index c5708cd8885b..63b22d50aea9 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -40,7 +40,7 @@ def format_push_rules_for_user( # Remove internal stuff. for c in r["conditions"]: - c.pop("_id", None) + c.pop("_cache_key", None) pattern_type = c.pop("pattern_type", None) if pattern_type == "user_id": diff --git a/tests/push/test_http.py b/tests/push/test_http.py index c284beb37ce0..6691e0712896 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Tuple from unittest.mock import Mock from twisted.internet.defer import Deferred @@ -18,7 +19,7 @@ import synapse.rest.admin from synapse.logging.context import make_deferred_yieldable from synapse.push import PusherConfigException -from synapse.rest.client import login, receipts, room +from synapse.rest.client import login, push_rule, receipts, room from tests.unittest import HomeserverTestCase, override_config @@ -29,6 +30,7 @@ class HTTPPusherTests(HomeserverTestCase): room.register_servlets, login.register_servlets, receipts.register_servlets, + push_rule.register_servlets, ] user_id = True hijack_auth = False @@ -39,12 +41,12 @@ def default_config(self): return config def make_homeserver(self, reactor, clock): - self.push_attempts = [] + self.push_attempts: List[tuple[Deferred, str, dict]] = [] m = Mock() def post_json_get_json(url, body): - d = Deferred() + d: Deferred = Deferred() self.push_attempts.append((d, url, body)) return make_deferred_yieldable(d) @@ -719,3 +721,67 @@ def _send_read_request(self, access_token, message_event_id, room_id): access_token=access_token, ) self.assertEqual(channel.code, 200, channel.json_body) + + def _make_user_with_pusher(self, username: str) -> Tuple[str, str]: + user_id = self.register_user(username, "pass") + access_token = self.login(username, "pass") + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + token_id = user_tuple.token_id + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "http://example.com/_matrix/push/v1/notify"}, + ) + ) + + return user_id, access_token + + def test_dont_notify_rule_overrides_message(self): + """ + The override push rule will suppress notification + """ + + user_id, access_token = self._make_user_with_pusher("user") + other_user_id, other_access_token = self._make_user_with_pusher("otheruser") + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Disable user notifications for this room -> user + body = { + "conditions": [{"kind": "event_match", "key": "room_id", "pattern": room}], + "actions": ["dont_notify"], + } + channel = self.make_request( + "PUT", + "/pushrules/global/override/best.friend", + body, + access_token=access_token, + ) + self.assertEqual(channel.code, 200) + + # Check we start with no pushes + self.assertEqual(len(self.push_attempts), 0) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends a message (ignored by dont_notify push rule set above) + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 0) + + # The user sends a message back (sends a notification) + self.helper.send(room, body="Hello", tok=access_token) + self.assertEqual(len(self.push_attempts), 1) From 4a53f357379c2dc407617a3d39e6da4790dec9aa Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 11 Mar 2022 14:00:15 +0000 Subject: [PATCH 394/474] Improve code documentation for the typing stream over replication. (#12211) --- changelog.d/12211.misc | 1 + synapse/handlers/typing.py | 5 +++-- synapse/replication/tcp/handler.py | 2 +- synapse/replication/tcp/resource.py | 6 +++--- synapse/replication/tcp/streams/_base.py | 12 ++++++++++++ 5 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12211.misc diff --git a/changelog.d/12211.misc b/changelog.d/12211.misc new file mode 100644 index 000000000000..d11634a1ee0f --- /dev/null +++ b/changelog.d/12211.misc @@ -0,0 +1 @@ +Improve code documentation for the typing stream over replication. \ No newline at end of file diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 3b8912652856..6854428b7ca5 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -160,8 +160,9 @@ def process_replication_rows( """Should be called whenever we receive updates for typing stream.""" if self._latest_room_serial > token: - # The master has gone backwards. To prevent inconsistent data, just - # clear everything. + # The typing worker has gone backwards (e.g. it may have restarted). + # To prevent inconsistent data, just clear everything. + logger.info("Typing handler stream went backwards; resetting") self._reset() # Set the latest serial token to whatever the server gave us. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index d51f045f229a..b217c35f995d 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -709,7 +709,7 @@ def send_remote_server_up(self, server: str) -> None: self.send_command(RemoteServerUpCommand(server)) def stream_update(self, stream_name: str, token: Optional[int], data: Any) -> None: - """Called when a new update is available to stream to clients. + """Called when a new update is available to stream to Redis subscribers. We need to check if the client is interested in the stream or not """ diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index ab829040cde9..c6870df8f954 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -67,8 +67,8 @@ def buildProtocol(self, addr: IAddress) -> ServerReplicationStreamProtocol: class ReplicationStreamer: """Handles replication connections. - This needs to be poked when new replication data may be available. When new - data is available it will propagate to all connected clients. + This needs to be poked when new replication data may be available. + When new data is available it will propagate to all Redis subscribers. """ def __init__(self, hs: "HomeServer"): @@ -109,7 +109,7 @@ def __init__(self, hs: "HomeServer"): def on_notifier_poke(self) -> None: """Checks if there is actually any new data and sends it to the - connections if there are. + Redis subscribers if there are. This should get called each time new data is available, even if it is currently being executed, so that nothing gets missed diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 23d631a76944..495f2f0285ba 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -316,7 +316,19 @@ def __init__(self, hs: "HomeServer"): class TypingStream(Stream): @attr.s(slots=True, frozen=True, auto_attribs=True) class TypingStreamRow: + """ + An entry in the typing stream. + Describes all the users that are 'typing' right now in one room. + + When a user stops typing, it will be streamed as a new update with that + user absent; you can think of the `user_ids` list as overwriting the + entire list that was there previously. + """ + + # The room that this update is for. room_id: str + + # All the users that are 'typing' right now in the specified room. user_ids: List[str] NAME = "typing" From e6a106fd5ebbf30a7c84f8ba09dc903d20213be3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 11 Mar 2022 16:15:11 +0100 Subject: [PATCH 395/474] Implement a Jinja2 filter to extract localparts from email addresses (#12212) --- changelog.d/12212.feature | 1 + docs/sample_config.yaml | 3 ++- docs/templates.md | 7 +++++++ synapse/config/oidc.py | 3 ++- synapse/handlers/oidc.py | 6 ++++++ synapse/util/templates.py | 5 +++++ 6 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12212.feature diff --git a/changelog.d/12212.feature b/changelog.d/12212.feature new file mode 100644 index 000000000000..fe337ff99057 --- /dev/null +++ b/changelog.d/12212.feature @@ -0,0 +1 @@ +Add a new Jinja2 template filter to extract the local part of an email address. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ef25a3175f98..d634fd8ff54c 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1948,7 +1948,8 @@ saml2_config: # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their # own username (see the documentation for the -# 'sso_auth_account_details.html' template). +# 'sso_auth_account_details.html' template). This template can +# use the 'localpart_from_email' filter. # # confirm_localpart: Whether to prompt the user to validate (or # change) the generated localpart (see the documentation for the diff --git a/docs/templates.md b/docs/templates.md index b251d05cb9eb..f87692a4538d 100644 --- a/docs/templates.md +++ b/docs/templates.md @@ -36,6 +36,13 @@ Turns a `mxc://` URL for media content into an HTTP(S) one using the homeserver' Example: `message.sender_avatar_url|mxc_to_http(32,32)` +```python +localpart_from_email(address: str) -> str +``` + +Returns the local part of an email address (e.g. `alice` in `alice@example.com`). + +Example: `user.email_address|localpart_from_email` ## Email templates diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index fc95912d9b3f..5d571651cbc9 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -183,7 +183,8 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs) -> str # localpart_template: Jinja2 template for the localpart of the MXID. # If this is not set, the user will be prompted to choose their # own username (see the documentation for the - # 'sso_auth_account_details.html' template). + # 'sso_auth_account_details.html' template). This template can + # use the 'localpart_from_email' filter. # # confirm_localpart: Whether to prompt the user to validate (or # change) the generated localpart (see the documentation for the diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index d98659edc761..724b9cfcb4bb 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -45,6 +45,7 @@ from synapse.util import Clock, json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.templates import _localpart_from_email_filter if TYPE_CHECKING: from synapse.server import HomeServer @@ -1308,6 +1309,11 @@ def jinja_finalize(thing: Any) -> Any: env = Environment(finalize=jinja_finalize) +env.filters.update( + { + "localpart_from_email": _localpart_from_email_filter, + } +) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/util/templates.py b/synapse/util/templates.py index 12941065ca60..fb758b7180f2 100644 --- a/synapse/util/templates.py +++ b/synapse/util/templates.py @@ -64,6 +64,7 @@ def build_jinja_env( { "format_ts": _format_ts_filter, "mxc_to_http": _create_mxc_to_http_filter(config.server.public_baseurl), + "localpart_from_email": _localpart_from_email_filter, } ) @@ -112,3 +113,7 @@ def mxc_to_http_filter( def _format_ts_filter(value: int, format: str) -> str: return time.strftime(format, time.localtime(value / 1000)) + + +def _localpart_from_email_filter(address: str) -> str: + return address.rsplit("@", 1)[0] From ef3619e61d84493d98470eb2a69131d15eb1166b Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 11 Mar 2022 10:46:45 -0800 Subject: [PATCH 396/474] Add config settings for background update parameters (#11980) --- changelog.d/11980.misc | 1 + docs/sample_config.yaml | 32 +++ synapse/config/_base.pyi | 2 + synapse/config/background_updates.py | 68 ++++++ synapse/config/homeserver.py | 2 + synapse/storage/background_updates.py | 39 +-- tests/config/test_background_update.py | 58 +++++ tests/rest/admin/test_background_updates.py | 9 +- tests/storage/test_background_update.py | 253 ++++++++++++++++++-- 9 files changed, 430 insertions(+), 34 deletions(-) create mode 100644 changelog.d/11980.misc create mode 100644 synapse/config/background_updates.py create mode 100644 tests/config/test_background_update.py diff --git a/changelog.d/11980.misc b/changelog.d/11980.misc new file mode 100644 index 000000000000..36e992e645a3 --- /dev/null +++ b/changelog.d/11980.misc @@ -0,0 +1 @@ +Add config settings for background update parameters. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d634fd8ff54c..36c6c56e58f7 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2735,3 +2735,35 @@ redis: # Optional password if configured on the Redis instance # #password: + + +## Background Updates ## + +# Background updates are database updates that are run in the background in batches. +# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to +# sleep can all be configured. This is helpful to speed up or slow down the updates. +# +background_updates: + # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set + # a time to change the default. + # + #background_update_duration_ms: 500 + + # Whether to sleep between updates. Defaults to True. Uncomment to change the default. + # + #sleep_enabled: false + + # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment + # and set a duration to change the default. + # + #sleep_duration_ms: 300 + + # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and + # set a size to change the default. + # + #min_batch_size: 10 + + # The batch size to use for the first iteration of a new background update. The default is 100. + # Uncomment and set a size to change the default. + # + #default_batch_size: 50 diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index 1eb5f5a68cf8..363d8b45545e 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -19,6 +19,7 @@ from synapse.config import ( api, appservice, auth, + background_updates, cache, captcha, cas, @@ -113,6 +114,7 @@ class RootConfig: caches: cache.CacheConfig federation: federation.FederationConfig retention: retention.RetentionConfig + background_updates: background_updates.BackgroundUpdateConfig config_classes: List[Type["Config"]] = ... def __init__(self) -> None: ... diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py new file mode 100644 index 000000000000..f6cdeacc4b19 --- /dev/null +++ b/synapse/config/background_updates.py @@ -0,0 +1,68 @@ +# Copyright 2022 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config + + +class BackgroundUpdateConfig(Config): + section = "background_updates" + + def generate_config_section(self, **kwargs) -> str: + return """\ + ## Background Updates ## + + # Background updates are database updates that are run in the background in batches. + # The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to + # sleep can all be configured. This is helpful to speed up or slow down the updates. + # + background_updates: + # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set + # a time to change the default. + # + #background_update_duration_ms: 500 + + # Whether to sleep between updates. Defaults to True. Uncomment to change the default. + # + #sleep_enabled: false + + # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment + # and set a duration to change the default. + # + #sleep_duration_ms: 300 + + # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and + # set a size to change the default. + # + #min_batch_size: 10 + + # The batch size to use for the first iteration of a new background update. The default is 100. + # Uncomment and set a size to change the default. + # + #default_batch_size: 50 + """ + + def read_config(self, config, **kwargs) -> None: + bg_update_config = config.get("background_updates") or {} + + self.update_duration_ms = bg_update_config.get( + "background_update_duration_ms", 100 + ) + + self.sleep_enabled = bg_update_config.get("sleep_enabled", True) + + self.sleep_duration_ms = bg_update_config.get("sleep_duration_ms", 1000) + + self.min_batch_size = bg_update_config.get("min_batch_size", 1) + + self.default_batch_size = bg_update_config.get("default_batch_size", 100) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 001605c265fb..a4ec70690802 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -16,6 +16,7 @@ from .api import ApiConfig from .appservice import AppServiceConfig from .auth import AuthConfig +from .background_updates import BackgroundUpdateConfig from .cache import CacheConfig from .captcha import CaptchaConfig from .cas import CasConfig @@ -99,4 +100,5 @@ class HomeServerConfig(RootConfig): WorkerConfig, RedisConfig, ExperimentalConfig, + BackgroundUpdateConfig, ] diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 4acc2c997dce..08c6eabc6d1a 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -60,18 +60,19 @@ class _BackgroundUpdateHandler: class _BackgroundUpdateContextManager: - BACKGROUND_UPDATE_INTERVAL_MS = 1000 - BACKGROUND_UPDATE_DURATION_MS = 100 - - def __init__(self, sleep: bool, clock: Clock): + def __init__( + self, sleep: bool, clock: Clock, sleep_duration_ms: int, update_duration: int + ): self._sleep = sleep self._clock = clock + self._sleep_duration_ms = sleep_duration_ms + self._update_duration_ms = update_duration async def __aenter__(self) -> int: if self._sleep: - await self._clock.sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000) + await self._clock.sleep(self._sleep_duration_ms / 1000) - return self.BACKGROUND_UPDATE_DURATION_MS + return self._update_duration_ms async def __aexit__(self, *exc) -> None: pass @@ -133,9 +134,6 @@ class BackgroundUpdater: process and autotuning the batch size. """ - MINIMUM_BACKGROUND_BATCH_SIZE = 1 - DEFAULT_BACKGROUND_BATCH_SIZE = 100 - def __init__(self, hs: "HomeServer", database: "DatabasePool"): self._clock = hs.get_clock() self.db_pool = database @@ -160,6 +158,14 @@ def __init__(self, hs: "HomeServer", database: "DatabasePool"): # enable/disable background updates via the admin API. self.enabled = True + self.minimum_background_batch_size = hs.config.background_updates.min_batch_size + self.default_background_batch_size = ( + hs.config.background_updates.default_batch_size + ) + self.update_duration_ms = hs.config.background_updates.update_duration_ms + self.sleep_duration_ms = hs.config.background_updates.sleep_duration_ms + self.sleep_enabled = hs.config.background_updates.sleep_enabled + def register_update_controller_callbacks( self, on_update: ON_UPDATE_CALLBACK, @@ -216,7 +222,9 @@ def _get_context_manager_for_update( if self._on_update_callback is not None: return self._on_update_callback(update_name, database_name, oneshot) - return _BackgroundUpdateContextManager(sleep, self._clock) + return _BackgroundUpdateContextManager( + sleep, self._clock, self.sleep_duration_ms, self.update_duration_ms + ) async def _default_batch_size(self, update_name: str, database_name: str) -> int: """The batch size to use for the first iteration of a new background @@ -225,7 +233,7 @@ async def _default_batch_size(self, update_name: str, database_name: str) -> int if self._default_batch_size_callback is not None: return await self._default_batch_size_callback(update_name, database_name) - return self.DEFAULT_BACKGROUND_BATCH_SIZE + return self.default_background_batch_size async def _min_batch_size(self, update_name: str, database_name: str) -> int: """A lower bound on the batch size of a new background update. @@ -235,7 +243,7 @@ async def _min_batch_size(self, update_name: str, database_name: str) -> int: if self._min_batch_size_callback is not None: return await self._min_batch_size_callback(update_name, database_name) - return self.MINIMUM_BACKGROUND_BATCH_SIZE + return self.minimum_background_batch_size def get_current_update(self) -> Optional[BackgroundUpdatePerformance]: """Returns the current background update, if any.""" @@ -254,9 +262,12 @@ def start_doing_background_updates(self) -> None: if self.enabled: # if we start a new background update, not all updates are done. self._all_done = False - run_as_background_process("background_updates", self.run_background_updates) + sleep = self.sleep_enabled + run_as_background_process( + "background_updates", self.run_background_updates, sleep + ) - async def run_background_updates(self, sleep: bool = True) -> None: + async def run_background_updates(self, sleep: bool) -> None: if self._running or not self.enabled: return diff --git a/tests/config/test_background_update.py b/tests/config/test_background_update.py new file mode 100644 index 000000000000..0c32c1ca299e --- /dev/null +++ b/tests/config/test_background_update.py @@ -0,0 +1,58 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import yaml + +from synapse.storage.background_updates import BackgroundUpdater + +from tests.unittest import HomeserverTestCase, override_config + + +class BackgroundUpdateConfigTestCase(HomeserverTestCase): + # Tests that the default values in the config are correctly loaded. Note that the default + # values are loaded when the corresponding config options are commented out, which is why there isn't + # a config specified here. + def test_default_configuration(self): + background_updater = BackgroundUpdater( + self.hs, self.hs.get_datastores().main.db_pool + ) + + self.assertEqual(background_updater.minimum_background_batch_size, 1) + self.assertEqual(background_updater.default_background_batch_size, 100) + self.assertEqual(background_updater.sleep_enabled, True) + self.assertEqual(background_updater.sleep_duration_ms, 1000) + self.assertEqual(background_updater.update_duration_ms, 100) + + # Tests that non-default values for the config options are properly picked up and passed on. + @override_config( + yaml.safe_load( + """ + background_updates: + background_update_duration_ms: 1000 + sleep_enabled: false + sleep_duration_ms: 600 + min_batch_size: 5 + default_batch_size: 50 + """ + ) + ) + def test_custom_configuration(self): + background_updater = BackgroundUpdater( + self.hs, self.hs.get_datastores().main.db_pool + ) + + self.assertEqual(background_updater.minimum_background_batch_size, 5) + self.assertEqual(background_updater.default_background_batch_size, 50) + self.assertEqual(background_updater.sleep_enabled, False) + self.assertEqual(background_updater.sleep_duration_ms, 600) + self.assertEqual(background_updater.update_duration_ms, 1000) diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index becec84524cb..6cf56b1e352f 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -39,6 +39,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") + self.updater = BackgroundUpdater(hs, self.store.db_pool) @parameterized.expand( [ @@ -135,10 +136,10 @@ def test_status_bg_update(self) -> None: """Test the status API works with a background update.""" # Create a new background update - self._register_bg_update() self.store.db_pool.updates.start_doing_background_updates() + self.reactor.pump([1.0, 1.0, 1.0]) channel = self.make_request( @@ -158,7 +159,7 @@ def test_status_bg_update(self) -> None: "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE + self.updater.default_background_batch_size ), } }, @@ -213,7 +214,7 @@ def test_enabled(self) -> None: "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE + self.updater.default_background_batch_size ), } }, @@ -242,7 +243,7 @@ def test_enabled(self) -> None: "average_items_per_ms": 0.1, "total_duration_ms": 1000.0, "total_item_count": ( - BackgroundUpdater.DEFAULT_BACKGROUND_BATCH_SIZE + self.updater.default_background_batch_size ), } }, diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 9fdf54ea31f9..5cf18b690e48 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -14,12 +14,15 @@ from unittest.mock import Mock +import yaml + from twisted.internet.defer import Deferred, ensureDeferred from synapse.storage.background_updates import BackgroundUpdater from tests import unittest from tests.test_utils import make_awaitable, simple_async_mock +from tests.unittest import override_config class BackgroundUpdateTestCase(unittest.HomeserverTestCase): @@ -34,6 +37,19 @@ def prepare(self, reactor, clock, homeserver): self.updates.register_background_update_handler( "test_update", self.update_handler ) + self.store = self.hs.get_datastores().main + + async def update(self, progress, count): + duration_ms = 10 + await self.clock.sleep((count * duration_ms) / 1000) + progress = {"my_key": progress["my_key"] + 1} + await self.store.db_pool.runInteraction( + "update_progress", + self.updates._background_update_progress_txn, + "test_update", + progress, + ) + return count def test_do_background_update(self): # the time we claim it takes to update one item when running the update @@ -42,27 +58,14 @@ def test_do_background_update(self): # the target runtime for each bg update target_background_update_duration_ms = 100 - store = self.hs.get_datastores().main self.get_success( - store.db_pool.simple_insert( + self.store.db_pool.simple_insert( "background_updates", values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, ) ) - # first step: make a bit of progress - async def update(progress, count): - await self.clock.sleep((count * duration_ms) / 1000) - progress = {"my_key": progress["my_key"] + 1} - await store.db_pool.runInteraction( - "update_progress", - self.updates._background_update_progress_txn, - "test_update", - progress, - ) - return count - - self.update_handler.side_effect = update + self.update_handler.side_effect = self.update self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), @@ -72,7 +75,7 @@ async def update(progress, count): # on the first call, we should get run with the default background update size self.update_handler.assert_called_once_with( - {"my_key": 1}, self.updates.DEFAULT_BACKGROUND_BATCH_SIZE + {"my_key": 1}, self.updates.default_background_batch_size ) # second step: complete the update @@ -99,6 +102,224 @@ async def update(progress, count): self.assertTrue(result) self.assertFalse(self.update_handler.called) + @override_config( + yaml.safe_load( + """ + background_updates: + default_batch_size: 20 + """ + ) + ) + def test_background_update_default_batch_set_by_config(self): + """ + Test that the background update is run with the default_batch_size set by the config + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + res = self.get_success( + self.updates.do_next_background_update(False), + by=0.01, + ) + self.assertFalse(res) + + # on the first call, we should get run with the default background update size specified in the config + self.update_handler.assert_called_once_with({"my_key": 1}, 20) + + def test_background_update_default_sleep_behavior(self): + """ + Test default background update behavior, which is to sleep + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + self.updates.start_doing_background_updates(), + + # 2: advance the reactor less than the default sleep duration (1000ms) + self.reactor.pump([0.5]) + # check that an update has not been run + self.update_handler.assert_not_called() + + # advance reactor past default sleep duration + self.reactor.pump([1]) + # check that update has been run + self.update_handler.assert_called() + + @override_config( + yaml.safe_load( + """ + background_updates: + sleep_duration_ms: 500 + """ + ) + ) + def test_background_update_sleep_set_in_config(self): + """ + Test that changing the sleep time in the config changes how long it sleeps + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + self.updates.start_doing_background_updates(), + + # 2: advance the reactor less than the configured sleep duration (500ms) + self.reactor.pump([0.45]) + # check that an update has not been run + self.update_handler.assert_not_called() + + # advance reactor past config sleep duration but less than default duration + self.reactor.pump([0.75]) + # check that update has been run + self.update_handler.assert_called() + + @override_config( + yaml.safe_load( + """ + background_updates: + sleep_enabled: false + """ + ) + ) + def test_disabling_background_update_sleep(self): + """ + Test that disabling sleep in the config results in bg update not sleeping + """ + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + self.updates.start_doing_background_updates(), + + # 2: advance the reactor very little + self.reactor.pump([0.025]) + # check that an update has run + self.update_handler.assert_called() + + @override_config( + yaml.safe_load( + """ + background_updates: + background_update_duration_ms: 500 + """ + ) + ) + def test_background_update_duration_set_in_config(self): + """ + Test that the desired duration set in the config is used in determining batch size + """ + # Duration of one background update item + duration_ms = 10 + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + self.update_handler.side_effect = self.update + self.update_handler.reset_mock() + res = self.get_success( + self.updates.do_next_background_update(False), + by=0.02, + ) + self.assertFalse(res) + + # the first update was run with the default batch size, this should be run with 500ms as the + # desired duration + async def update(progress, count): + self.assertEqual(progress, {"my_key": 2}) + self.assertAlmostEqual( + count, + 500 / duration_ms, + places=0, + ) + await self.updates._end_background_update("test_update") + return count + + self.update_handler.side_effect = update + self.get_success(self.updates.do_next_background_update(False)) + + @override_config( + yaml.safe_load( + """ + background_updates: + min_batch_size: 5 + """ + ) + ) + def test_background_update_min_batch_set_in_config(self): + """ + Test that the minimum batch size set in the config is used + """ + # a very long-running individual update + duration_ms = 50 + + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + values={"update_name": "test_update", "progress_json": '{"my_key": 1}'}, + ) + ) + + # Run the update with the long-running update item + async def update(progress, count): + await self.clock.sleep((count * duration_ms) / 1000) + progress = {"my_key": progress["my_key"] + 1} + await self.store.db_pool.runInteraction( + "update_progress", + self.updates._background_update_progress_txn, + "test_update", + progress, + ) + return count + + self.update_handler.side_effect = update + self.update_handler.reset_mock() + res = self.get_success( + self.updates.do_next_background_update(False), + by=1, + ) + self.assertFalse(res) + + # the first update was run with the default batch size, this should be run with minimum batch size + # as the first items took a very long time + async def update(progress, count): + self.assertEqual(progress, {"my_key": 2}) + self.assertEqual(count, 5) + await self.updates._end_background_update("test_update") + return count + + self.update_handler.side_effect = update + self.get_success(self.updates.do_next_background_update(False)) + class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, homeserver): From 54f674f7a9107d3dccd6c126c3e99337314a12c2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Sat, 12 Mar 2022 13:23:37 -0500 Subject: [PATCH 397/474] Deprecate the groups/communities endpoints and add an experimental configuration flag. (#12200) --- changelog.d/12200.removal | 1 + docs/upgrade.md | 14 ++++++++++++++ synapse/app/generic_worker.py | 3 ++- synapse/config/experimental.py | 3 +++ synapse/federation/transport/server/__init__.py | 15 +++++++++++---- synapse/rest/__init__.py | 3 ++- synapse/rest/admin/__init__.py | 3 ++- 7 files changed, 35 insertions(+), 7 deletions(-) create mode 100644 changelog.d/12200.removal diff --git a/changelog.d/12200.removal b/changelog.d/12200.removal new file mode 100644 index 000000000000..312c7ae32597 --- /dev/null +++ b/changelog.d/12200.removal @@ -0,0 +1 @@ +The groups/communities feature in Synapse has been deprecated. diff --git a/docs/upgrade.md b/docs/upgrade.md index 95005962dc49..f9ac605e7b29 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -85,6 +85,20 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.56.0 + +## Groups/communities feature has been deprecated + +The non-standard groups/communities feature in Synapse has been deprecated and will +be disabled by default in Synapse v1.58.0. + +You can test disabling it by adding the following to your homeserver configuration: + +```yaml +experimental_features: + groups_enabled: false +``` + # Upgrading to v1.55.0 ## `synctl` script has been moved diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index a10a63b06c7e..b6f510ed3058 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -322,7 +322,8 @@ def _listen_http(self, listener_config: ListenerConfig) -> None: presence.register_servlets(self, resource) - groups.register_servlets(self, resource) + if self.config.experimental.groups_enabled: + groups.register_servlets(self, resource) resources.update({CLIENT_API_PREFIX: resource}) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 41338b39df21..064db4487c85 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -74,3 +74,6 @@ def read_config(self, config: JsonDict, **kwargs): # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) + + # The deprecated groups feature. + self.groups_enabled: bool = experimental.get("groups_enabled", True) diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 67a634790712..71b2f90eb920 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -289,7 +289,7 @@ async def on_GET( return 200, {"sub": user_id} -DEFAULT_SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { +SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = { "federation": FEDERATION_SERVLET_CLASSES, "room_list": (PublicRoomList,), "group_server": GROUP_SERVER_SERVLET_CLASSES, @@ -298,6 +298,10 @@ async def on_GET( "openid": (OpenIdUserInfo,), } +DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid") + +GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation") + def register_servlets( hs: "HomeServer", @@ -320,16 +324,19 @@ def register_servlets( Defaults to ``DEFAULT_SERVLET_GROUPS``. """ if not servlet_groups: - servlet_groups = DEFAULT_SERVLET_GROUPS.keys() + servlet_groups = DEFAULT_SERVLET_GROUPS + # Only allow the groups servlets if the deprecated groups feature is enabled. + if hs.config.experimental.groups_enabled: + servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS for servlet_group in servlet_groups: # Skip unknown servlet groups. - if servlet_group not in DEFAULT_SERVLET_GROUPS: + if servlet_group not in SERVLET_GROUPS: raise RuntimeError( f"Attempting to register unknown federation servlet: '{servlet_group}'" ) - for servletclass in DEFAULT_SERVLET_GROUPS[servlet_group]: + for servletclass in SERVLET_GROUPS[servlet_group]: # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled if ( servletclass == FederationTimestampLookupServlet diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index cebdeecb8127..762808a5717b 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -118,7 +118,8 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: thirdparty.register_servlets(hs, client_resource) sendtodevice.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource) - groups.register_servlets(hs, client_resource) + if hs.config.experimental.groups_enabled: + groups.register_servlets(hs, client_resource) room_upgrade_rest_servlet.register_servlets(hs, client_resource) room_batch.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 6de302f81352..cb4d55c89d78 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -293,7 +293,8 @@ def register_servlets_for_client_rest_resource( ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) UserRegisterServlet(hs).register(http_server) - DeleteGroupAdminRestServlet(hs).register(http_server) + if hs.config.experimental.groups_enabled: + DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) # Load the media repo ones if we're using them. Otherwise load the servlets which From 90b2327066d2343faa86c464a182b6f3c4422ecd Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 17:52:15 +0000 Subject: [PATCH 398/474] Add `delay_cancellation` utility function (#12180) `delay_cancellation` behaves like `stop_cancellation`, except it delays `CancelledError`s until the original `Deferred` resolves. This is handy for unifying cleanup paths and ensuring that uncancelled coroutines don't use finished logcontexts. Signed-off-by: Sean Quah --- changelog.d/12180.misc | 1 + synapse/util/async_helpers.py | 48 ++++++++++-- tests/util/test_async_helpers.py | 124 +++++++++++++++++++++++++++++-- 3 files changed, 161 insertions(+), 12 deletions(-) create mode 100644 changelog.d/12180.misc diff --git a/changelog.d/12180.misc b/changelog.d/12180.misc new file mode 100644 index 000000000000..7a347352fd91 --- /dev/null +++ b/changelog.d/12180.misc @@ -0,0 +1 @@ +Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index a9f67dcbac6a..69c8c1baa9fc 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -686,12 +686,48 @@ def stop_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": Synapse logcontext rules. Returns: - A new `Deferred`, which will contain the result of the original `Deferred`, - but will not propagate cancellation through to the original. When cancelled, - the new `Deferred` will fail with a `CancelledError` and will not follow the - Synapse logcontext rules. `make_deferred_yieldable` should be used to wrap - the new `Deferred`. + A new `Deferred`, which will contain the result of the original `Deferred`. + The new `Deferred` will not propagate cancellation through to the original. + When cancelled, the new `Deferred` will fail with a `CancelledError`. + + The new `Deferred` will not follow the Synapse logcontext rules and should be + wrapped with `make_deferred_yieldable`. + """ + new_deferred: "defer.Deferred[T]" = defer.Deferred() + deferred.chainDeferred(new_deferred) + return new_deferred + + +def delay_cancellation(deferred: "defer.Deferred[T]") -> "defer.Deferred[T]": + """Delay cancellation of a `Deferred` until it resolves. + + Has the same effect as `stop_cancellation`, but the returned `Deferred` will not + resolve with a `CancelledError` until the original `Deferred` resolves. + + Args: + deferred: The `Deferred` to protect against cancellation. May optionally follow + the Synapse logcontext rules. + + Returns: + A new `Deferred`, which will contain the result of the original `Deferred`. + The new `Deferred` will not propagate cancellation through to the original. + When cancelled, the new `Deferred` will wait until the original `Deferred` + resolves before failing with a `CancelledError`. + + The new `Deferred` will follow the Synapse logcontext rules if `deferred` + follows the Synapse logcontext rules. Otherwise the new `Deferred` should be + wrapped with `make_deferred_yieldable`. """ - new_deferred: defer.Deferred[T] = defer.Deferred() + + def handle_cancel(new_deferred: "defer.Deferred[T]") -> None: + # before the new deferred is cancelled, we `pause` it to stop the cancellation + # propagating. we then `unpause` it once the wrapped deferred completes, to + # propagate the exception. + new_deferred.pause() + new_deferred.errback(Failure(CancelledError())) + + deferred.addBoth(lambda _: new_deferred.unpause()) + + new_deferred: "defer.Deferred[T]" = defer.Deferred(handle_cancel) deferred.chainDeferred(new_deferred) return new_deferred diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index ff53ce114bd7..e5bc416de12f 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -13,6 +13,8 @@ # limitations under the License. import traceback +from parameterized import parameterized_class + from twisted.internet import defer from twisted.internet.defer import CancelledError, Deferred, ensureDeferred from twisted.internet.task import Clock @@ -23,10 +25,12 @@ LoggingContext, PreserveLoggingContext, current_context, + make_deferred_yieldable, ) from synapse.util.async_helpers import ( ObservableDeferred, concurrently_execute, + delay_cancellation, stop_cancellation, timeout_deferred, ) @@ -313,13 +317,27 @@ async def caller(): self.successResultOf(d2) -class StopCancellationTests(TestCase): - """Tests for the `stop_cancellation` function.""" +@parameterized_class( + ("wrapper",), + [("stop_cancellation",), ("delay_cancellation",)], +) +class CancellationWrapperTests(TestCase): + """Common tests for the `stop_cancellation` and `delay_cancellation` functions.""" + + wrapper: str + + def wrap_deferred(self, deferred: "Deferred[str]") -> "Deferred[str]": + if self.wrapper == "stop_cancellation": + return stop_cancellation(deferred) + elif self.wrapper == "delay_cancellation": + return delay_cancellation(deferred) + else: + raise ValueError(f"Unsupported wrapper type: {self.wrapper}") def test_succeed(self): """Test that the new `Deferred` receives the result.""" deferred: "Deferred[str]" = Deferred() - wrapper_deferred = stop_cancellation(deferred) + wrapper_deferred = self.wrap_deferred(deferred) # Success should propagate through. deferred.callback("success") @@ -329,7 +347,7 @@ def test_succeed(self): def test_failure(self): """Test that the new `Deferred` receives the `Failure`.""" deferred: "Deferred[str]" = Deferred() - wrapper_deferred = stop_cancellation(deferred) + wrapper_deferred = self.wrap_deferred(deferred) # Failure should propagate through. deferred.errback(ValueError("abc")) @@ -337,6 +355,10 @@ def test_failure(self): self.failureResultOf(wrapper_deferred, ValueError) self.assertIsNone(deferred.result, "`Failure` was not consumed") + +class StopCancellationTests(TestCase): + """Tests for the `stop_cancellation` function.""" + def test_cancellation(self): """Test that cancellation of the new `Deferred` leaves the original running.""" deferred: "Deferred[str]" = Deferred() @@ -347,11 +369,101 @@ def test_cancellation(self): self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( - deferred.called, "Original `Deferred` was unexpectedly cancelled." + deferred.called, "Original `Deferred` was unexpectedly cancelled" + ) + + # Now make the original `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + +class DelayCancellationTests(TestCase): + """Tests for the `delay_cancellation` function.""" + + def test_cancellation(self): + """Test that cancellation of the new `Deferred` waits for the original.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Cancel the new `Deferred`. + wrapper_deferred.cancel() + self.assertNoResult(wrapper_deferred) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled" + ) + + # Now make the original `Deferred` fail. + # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed + # in logs. + deferred.errback(ValueError("abc")) + self.assertIsNone(deferred.result, "`Failure` was not consumed") + + # Now that the original `Deferred` has failed, we should get a `CancelledError`. + self.failureResultOf(wrapper_deferred, CancelledError) + + def test_suppresses_second_cancellation(self): + """Test that a second cancellation is suppressed. + + Identical to `test_cancellation` except the new `Deferred` is cancelled twice. + """ + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Cancel the new `Deferred`, twice. + wrapper_deferred.cancel() + wrapper_deferred.cancel() + self.assertNoResult(wrapper_deferred) + self.assertFalse( + deferred.called, "Original `Deferred` was unexpectedly cancelled" ) - # Now make the inner `Deferred` fail. + # Now make the original `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed") + + # Now that the original `Deferred` has failed, we should get a `CancelledError`. + self.failureResultOf(wrapper_deferred, CancelledError) + + def test_propagates_cancelled_error(self): + """Test that a `CancelledError` from the original `Deferred` gets propagated.""" + deferred: "Deferred[str]" = Deferred() + wrapper_deferred = delay_cancellation(deferred) + + # Fail the original `Deferred` with a `CancelledError`. + cancelled_error = CancelledError() + deferred.errback(cancelled_error) + + # The new `Deferred` should fail with exactly the same `CancelledError`. + self.assertTrue(wrapper_deferred.called) + self.assertIs(cancelled_error, self.failureResultOf(wrapper_deferred).value) + + def test_preserves_logcontext(self): + """Test that logging contexts are preserved.""" + blocking_d: "Deferred[None]" = Deferred() + + async def inner(): + await make_deferred_yieldable(blocking_d) + + async def outer(): + with LoggingContext("c") as c: + try: + await delay_cancellation(defer.ensureDeferred(inner())) + self.fail("`CancelledError` was not raised") + except CancelledError: + self.assertEqual(c, current_context()) + # Succeed with no error, unless the logging context is wrong. + + # Run and block inside `inner()`. + d = defer.ensureDeferred(outer()) + self.assertEqual(SENTINEL_CONTEXT, current_context()) + + d.cancel() + + # Now unblock. `outer()` will consume the `CancelledError` and check the + # logging context. + blocking_d.callback(None) + self.successResultOf(d) From 8e5706d14448c0fe8d1c55eaca38a672c701d7a9 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 17:52:58 +0000 Subject: [PATCH 399/474] Fix broken background updates when using sqlite with `enable_search` off (#12215) Signed-off-by: Sean Quah --- changelog.d/12215.bugfix | 1 + synapse/storage/databases/main/search.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12215.bugfix diff --git a/changelog.d/12215.bugfix b/changelog.d/12215.bugfix new file mode 100644 index 000000000000..593b12556beb --- /dev/null +++ b/changelog.d/12215.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index e23b1190726b..c5e9010c83b5 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -125,9 +125,6 @@ def __init__( ): super().__init__(database, db_conn, hs) - if not hs.config.server.enable_search: - return - self.db_pool.updates.register_background_update_handler( self.EVENT_SEARCH_UPDATE_NAME, self._background_reindex_search ) @@ -243,9 +240,13 @@ def reindex_search_txn(txn): return len(event_search_rows) - result = await self.db_pool.runInteraction( - self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn - ) + if self.hs.config.server.enable_search: + result = await self.db_pool.runInteraction( + self.EVENT_SEARCH_UPDATE_NAME, reindex_search_txn + ) + else: + # Don't index anything if search is not enabled. + result = 0 if not result: await self.db_pool.updates._end_background_update( From 605d161d7d585847fd1bb98d14d5281daeac8e86 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 18:49:07 +0000 Subject: [PATCH 400/474] Add cancellation support to `ReadWriteLock` (#12120) Also convert `ReadWriteLock` to use async context managers. Signed-off-by: Sean Quah --- changelog.d/12120.misc | 1 + synapse/handlers/pagination.py | 8 +- synapse/util/async_helpers.py | 71 +++--- tests/util/test_rwlock.py | 395 ++++++++++++++++++++++++++++----- 4 files changed, 382 insertions(+), 93 deletions(-) create mode 100644 changelog.d/12120.misc diff --git a/changelog.d/12120.misc b/changelog.d/12120.misc new file mode 100644 index 000000000000..360309650032 --- /dev/null +++ b/changelog.d/12120.misc @@ -0,0 +1 @@ +Add support for cancellation to `ReadWriteLock`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 183fabcfc09e..60059fec3e0f 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -350,7 +350,7 @@ async def _purge_history( """ self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): await self.storage.purge_events.purge_history( room_id, token, delete_local_events ) @@ -406,7 +406,7 @@ async def purge_room(self, room_id: str, force: bool = False) -> None: room_id: room to be purged force: set true to skip checking for joined users. """ - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): # first check that we have no users in this room if not force: joined = await self.store.is_host_joined(room_id, self._server_name) @@ -448,7 +448,7 @@ async def get_messages( room_token = from_token.room_key - with await self.pagination_lock.read(room_id): + async with self.pagination_lock.read(room_id): ( membership, member_event_id, @@ -615,7 +615,7 @@ async def _shutdown_and_purge_room( self._purges_in_progress_by_room.add(room_id) try: - with await self.pagination_lock.write(room_id): + async with self.pagination_lock.write(room_id): self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN self._delete_by_id[ delete_id diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 69c8c1baa9fc..6a8e844d6365 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -18,9 +18,10 @@ import inspect import itertools import logging -from contextlib import contextmanager +from contextlib import asynccontextmanager, contextmanager from typing import ( Any, + AsyncIterator, Awaitable, Callable, Collection, @@ -40,7 +41,7 @@ ) import attr -from typing_extensions import ContextManager, Literal +from typing_extensions import AsyncContextManager, Literal from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -491,7 +492,7 @@ class ReadWriteLock: Example: - with await read_write_lock.read("test_key"): + async with read_write_lock.read("test_key"): # do some work """ @@ -514,22 +515,24 @@ def __init__(self) -> None: # Latest writer queued self.key_to_current_writer: Dict[str, defer.Deferred] = {} - async def read(self, key: str) -> ContextManager: - new_defer: "defer.Deferred[None]" = defer.Deferred() + def read(self, key: str) -> AsyncContextManager: + @asynccontextmanager + async def _ctx_manager() -> AsyncIterator[None]: + new_defer: "defer.Deferred[None]" = defer.Deferred() - curr_readers = self.key_to_current_readers.setdefault(key, set()) - curr_writer = self.key_to_current_writer.get(key, None) + curr_readers = self.key_to_current_readers.setdefault(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) - curr_readers.add(new_defer) + curr_readers.add(new_defer) - # We wait for the latest writer to finish writing. We can safely ignore - # any existing readers... as they're readers. - if curr_writer: - await make_deferred_yieldable(curr_writer) - - @contextmanager - def _ctx_manager() -> Iterator[None]: try: + # We wait for the latest writer to finish writing. We can safely ignore + # any existing readers... as they're readers. + # May raise a `CancelledError` if the `Deferred` wrapping us is + # cancelled. The `Deferred` we are waiting on must not be cancelled, + # since we do not own it. + if curr_writer: + await make_deferred_yieldable(stop_cancellation(curr_writer)) yield finally: with PreserveLoggingContext(): @@ -538,29 +541,35 @@ def _ctx_manager() -> Iterator[None]: return _ctx_manager() - async def write(self, key: str) -> ContextManager: - new_defer: "defer.Deferred[None]" = defer.Deferred() + def write(self, key: str) -> AsyncContextManager: + @asynccontextmanager + async def _ctx_manager() -> AsyncIterator[None]: + new_defer: "defer.Deferred[None]" = defer.Deferred() - curr_readers = self.key_to_current_readers.get(key, set()) - curr_writer = self.key_to_current_writer.get(key, None) + curr_readers = self.key_to_current_readers.get(key, set()) + curr_writer = self.key_to_current_writer.get(key, None) - # We wait on all latest readers and writer. - to_wait_on = list(curr_readers) - if curr_writer: - to_wait_on.append(curr_writer) + # We wait on all latest readers and writer. + to_wait_on = list(curr_readers) + if curr_writer: + to_wait_on.append(curr_writer) - # We can clear the list of current readers since the new writer waits - # for them to finish. - curr_readers.clear() - self.key_to_current_writer[key] = new_defer + # We can clear the list of current readers since `new_defer` waits + # for them to finish. + curr_readers.clear() + self.key_to_current_writer[key] = new_defer - await make_deferred_yieldable(defer.gatherResults(to_wait_on)) - - @contextmanager - def _ctx_manager() -> Iterator[None]: + to_wait_on_defer = defer.gatherResults(to_wait_on) try: + # Wait for all current readers and the latest writer to finish. + # May raise a `CancelledError` immediately after the wait if the + # `Deferred` wrapping us is cancelled. We must only release the lock + # once we have acquired it, hence the use of `delay_cancellation` + # rather than `stop_cancellation`. + await make_deferred_yieldable(delay_cancellation(to_wait_on_defer)) yield finally: + # Release the lock. with PreserveLoggingContext(): new_defer.callback(None) # `self.key_to_current_writer[key]` may be missing if there was another diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index 0774625b8551..0c842261971f 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import AsyncContextManager, Callable, Sequence, Tuple + from twisted.internet import defer -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from synapse.util.async_helpers import ReadWriteLock @@ -21,87 +23,187 @@ class ReadWriteLockTestCase(unittest.TestCase): - def _assert_called_before_not_after(self, lst, first_false): - for i, d in enumerate(lst[:first_false]): - self.assertTrue(d.called, msg="%d was unexpectedly false" % i) + def _start_reader_or_writer( + self, + read_or_write: Callable[[str], AsyncContextManager], + key: str, + return_value: str, + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a reader or writer which acquires the lock, blocks, then completes. + + Args: + read_or_write: A function returning a context manager for a lock. + Either a bound `ReadWriteLock.read` or `ReadWriteLock.write`. + key: The key to read or write. + return_value: A string that the reader or writer will resolve with when + done. + + Returns: + A tuple of three `Deferred`s: + * A `Deferred` that resolves with `return_value` once the reader or writer + completes successfully. + * A `Deferred` that resolves once the reader or writer acquires the lock. + * A `Deferred` that blocks the reader or writer. Must be resolved by the + caller to allow the reader or writer to release the lock and complete. + """ + acquired_d: "Deferred[None]" = Deferred() + unblock_d: "Deferred[None]" = Deferred() + + async def reader_or_writer(): + async with read_or_write(key): + acquired_d.callback(None) + await unblock_d + return return_value + + d = defer.ensureDeferred(reader_or_writer()) + return d, acquired_d, unblock_d + + def _start_blocking_reader( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a reader which acquires the lock, blocks, then releases the lock. + + See the docstring for `_start_reader_or_writer` for details about the arguments + and return values. + """ + return self._start_reader_or_writer(rwlock.read, key, return_value) + + def _start_blocking_writer( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]", "Deferred[None]"]: + """Starts a writer which acquires the lock, blocks, then releases the lock. + + See the docstring for `_start_reader_or_writer` for details about the arguments + and return values. + """ + return self._start_reader_or_writer(rwlock.write, key, return_value) + + def _start_nonblocking_reader( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]"]: + """Starts a reader which acquires the lock, then releases it immediately. + + See the docstring for `_start_reader_or_writer` for details about the arguments. + + Returns: + A tuple of two `Deferred`s: + * A `Deferred` that resolves with `return_value` once the reader completes + successfully. + * A `Deferred` that resolves once the reader acquires the lock. + """ + d, acquired_d, unblock_d = self._start_reader_or_writer( + rwlock.read, key, return_value + ) + unblock_d.callback(None) + return d, acquired_d + + def _start_nonblocking_writer( + self, rwlock: ReadWriteLock, key: str, return_value: str + ) -> Tuple["Deferred[str]", "Deferred[None]"]: + """Starts a writer which acquires the lock, then releases it immediately. + + See the docstring for `_start_reader_or_writer` for details about the arguments. + + Returns: + A tuple of two `Deferred`s: + * A `Deferred` that resolves with `return_value` once the writer completes + successfully. + * A `Deferred` that resolves once the writer acquires the lock. + """ + d, acquired_d, unblock_d = self._start_reader_or_writer( + rwlock.write, key, return_value + ) + unblock_d.callback(None) + return d, acquired_d + + def _assert_first_n_resolved( + self, deferreds: Sequence["defer.Deferred[None]"], n: int + ) -> None: + """Assert that exactly the first n `Deferred`s in the given list are resolved. - for i, d in enumerate(lst[first_false:]): + Args: + deferreds: The list of `Deferred`s to be checked. + n: The number of `Deferred`s at the start of `deferreds` that should be + resolved. + """ + for i, d in enumerate(deferreds[:n]): + self.assertTrue(d.called, msg="deferred %d was unexpectedly unresolved" % i) + + for i, d in enumerate(deferreds[n:]): self.assertFalse( - d.called, msg="%d was unexpectedly true" % (i + first_false) + d.called, msg="deferred %d was unexpectedly resolved" % (i + n) ) def test_rwlock(self): rwlock = ReadWriteLock() - - key = object() + key = "key" ds = [ - rwlock.read(key), # 0 - rwlock.read(key), # 1 - rwlock.write(key), # 2 - rwlock.write(key), # 3 - rwlock.read(key), # 4 - rwlock.read(key), # 5 - rwlock.write(key), # 6 + self._start_blocking_reader(rwlock, key, "0"), + self._start_blocking_reader(rwlock, key, "1"), + self._start_blocking_writer(rwlock, key, "2"), + self._start_blocking_writer(rwlock, key, "3"), + self._start_blocking_reader(rwlock, key, "4"), + self._start_blocking_reader(rwlock, key, "5"), + self._start_blocking_writer(rwlock, key, "6"), ] - ds = [defer.ensureDeferred(d) for d in ds] + # `Deferred`s that resolve when each reader or writer acquires the lock. + acquired_ds = [acquired_d for _, acquired_d, _ in ds] + # `Deferred`s that will trigger the release of locks when resolved. + release_ds = [release_d for _, _, release_d in ds] - self._assert_called_before_not_after(ds, 2) + # The first two readers should acquire their locks. + self._assert_first_n_resolved(acquired_ds, 2) - with ds[0].result: - self._assert_called_before_not_after(ds, 2) - self._assert_called_before_not_after(ds, 2) + # Release one of the read locks. The next writer should not acquire the lock, + # because there is another reader holding the lock. + self._assert_first_n_resolved(acquired_ds, 2) + release_ds[0].callback(None) + self._assert_first_n_resolved(acquired_ds, 2) - with ds[1].result: - self._assert_called_before_not_after(ds, 2) - self._assert_called_before_not_after(ds, 3) + # Release the other read lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 2) + release_ds[1].callback(None) + self._assert_first_n_resolved(acquired_ds, 3) - with ds[2].result: - self._assert_called_before_not_after(ds, 3) - self._assert_called_before_not_after(ds, 4) + # Release the write lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 3) + release_ds[2].callback(None) + self._assert_first_n_resolved(acquired_ds, 4) - with ds[3].result: - self._assert_called_before_not_after(ds, 4) - self._assert_called_before_not_after(ds, 6) + # Release the write lock. The next two readers should acquire locks. + self._assert_first_n_resolved(acquired_ds, 4) + release_ds[3].callback(None) + self._assert_first_n_resolved(acquired_ds, 6) - with ds[5].result: - self._assert_called_before_not_after(ds, 6) - self._assert_called_before_not_after(ds, 6) + # Release one of the read locks. The next writer should not acquire the lock, + # because there is another reader holding the lock. + self._assert_first_n_resolved(acquired_ds, 6) + release_ds[5].callback(None) + self._assert_first_n_resolved(acquired_ds, 6) - with ds[4].result: - self._assert_called_before_not_after(ds, 6) - self._assert_called_before_not_after(ds, 7) + # Release the other read lock. The next writer should acquire the lock. + self._assert_first_n_resolved(acquired_ds, 6) + release_ds[4].callback(None) + self._assert_first_n_resolved(acquired_ds, 7) - with ds[6].result: - pass + # Release the write lock. + release_ds[6].callback(None) - d = defer.ensureDeferred(rwlock.write(key)) - self.assertTrue(d.called) - with d.result: - pass + # Acquire and release the write and read locks one last time for good measure. + _, acquired_d = self._start_nonblocking_writer(rwlock, key, "last writer") + self.assertTrue(acquired_d.called) - d = defer.ensureDeferred(rwlock.read(key)) - self.assertTrue(d.called) - with d.result: - pass + _, acquired_d = self._start_nonblocking_reader(rwlock, key, "last reader") + self.assertTrue(acquired_d.called) def test_lock_handoff_to_nonblocking_writer(self): """Test a writer handing the lock to another writer that completes instantly.""" rwlock = ReadWriteLock() key = "key" - unblock: "Deferred[None]" = Deferred() - - async def blocking_write(): - with await rwlock.write(key): - await unblock - - async def nonblocking_write(): - with await rwlock.write(key): - pass - - d1 = defer.ensureDeferred(blocking_write()) - d2 = defer.ensureDeferred(nonblocking_write()) + d1, _, unblock = self._start_blocking_writer(rwlock, key, "write 1 completed") + d2, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") self.assertFalse(d1.called) self.assertFalse(d2.called) @@ -111,5 +213,182 @@ async def nonblocking_write(): self.assertTrue(d2.called) # The `ReadWriteLock` should operate as normal. - d3 = defer.ensureDeferred(nonblocking_write()) + d3, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") self.assertTrue(d3.called) + + def test_cancellation_while_holding_read_lock(self): + """Test cancellation while holding a read lock. + + A waiting writer should be given the lock when the reader holding the lock is + cancelled. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A reader takes the lock and blocks. + reader_d, _, _ = self._start_blocking_reader(rwlock, key, "read completed") + + # 2. A writer waits for the reader to complete. + writer_d, _ = self._start_nonblocking_writer(rwlock, key, "write completed") + self.assertFalse(writer_d.called) + + # 3. The reader is cancelled. + reader_d.cancel() + self.failureResultOf(reader_d, CancelledError) + + # 4. The writer should take the lock and complete. + self.assertTrue( + writer_d.called, "Writer is stuck waiting for a cancelled reader" + ) + self.assertEqual("write completed", self.successResultOf(writer_d)) + + def test_cancellation_while_holding_write_lock(self): + """Test cancellation while holding a write lock. + + A waiting reader should be given the lock when the writer holding the lock is + cancelled. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A writer takes the lock and blocks. + writer_d, _, _ = self._start_blocking_writer(rwlock, key, "write completed") + + # 2. A reader waits for the writer to complete. + reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed") + self.assertFalse(reader_d.called) + + # 3. The writer is cancelled. + writer_d.cancel() + self.failureResultOf(writer_d, CancelledError) + + # 4. The reader should take the lock and complete. + self.assertTrue( + reader_d.called, "Reader is stuck waiting for a cancelled writer" + ) + self.assertEqual("read completed", self.successResultOf(reader_d)) + + def test_cancellation_while_waiting_for_read_lock(self): + """Test cancellation while waiting for a read lock. + + Tests that cancelling a waiting reader: + * does not cancel the writer it is waiting on + * does not cancel the next writer waiting on it + * does not allow the next writer to acquire the lock before an earlier writer + has finished + * does not keep the next writer waiting indefinitely + + These correspond to the asserts with explicit messages. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A writer takes the lock and blocks. + writer1_d, _, unblock_writer1 = self._start_blocking_writer( + rwlock, key, "write 1 completed" + ) + + # 2. A reader waits for the first writer to complete. + # This reader will be cancelled later. + reader_d, _ = self._start_nonblocking_reader(rwlock, key, "read completed") + self.assertFalse(reader_d.called) + + # 3. A second writer waits for both the first writer and the reader to complete. + writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") + self.assertFalse(writer2_d.called) + + # 4. The waiting reader is cancelled. + # Neither of the writers should be cancelled. + # The second writer should still be waiting, but only on the first writer. + reader_d.cancel() + self.failureResultOf(reader_d, CancelledError) + self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") + self.assertFalse( + writer2_d.called, + "Second writer was unexpectedly cancelled or given the lock before the " + "first writer finished", + ) + + # 5. Unblock the first writer, which should complete. + unblock_writer1.callback(None) + self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) + + # 6. The second writer should take the lock and complete. + self.assertTrue( + writer2_d.called, "Second writer is stuck waiting for a cancelled reader" + ) + self.assertEqual("write 2 completed", self.successResultOf(writer2_d)) + + def test_cancellation_while_waiting_for_write_lock(self): + """Test cancellation while waiting for a write lock. + + Tests that cancelling a waiting writer: + * does not cancel the reader or writer it is waiting on + * does not cancel the next writer waiting on it + * does not allow the next writer to acquire the lock before an earlier reader + and writer have finished + * does not keep the next writer waiting indefinitely + + These correspond to the asserts with explicit messages. + """ + rwlock = ReadWriteLock() + key = "key" + + # 1. A reader takes the lock and blocks. + reader_d, _, unblock_reader = self._start_blocking_reader( + rwlock, key, "read completed" + ) + + # 2. A writer waits for the reader to complete. + writer1_d, _, unblock_writer1 = self._start_blocking_writer( + rwlock, key, "write 1 completed" + ) + + # 3. A second writer waits for both the reader and first writer to complete. + # This writer will be cancelled later. + writer2_d, _ = self._start_nonblocking_writer(rwlock, key, "write 2 completed") + self.assertFalse(writer2_d.called) + + # 4. A third writer waits for the second writer to complete. + writer3_d, _ = self._start_nonblocking_writer(rwlock, key, "write 3 completed") + self.assertFalse(writer3_d.called) + + # 5. The second writer is cancelled, but continues waiting for the lock. + # The reader, first writer and third writer should not be cancelled. + # The first writer should still be waiting on the reader. + # The third writer should still be waiting on the second writer. + writer2_d.cancel() + self.assertNoResult(writer2_d) + self.assertFalse(reader_d.called, "Reader was unexpectedly cancelled") + self.assertFalse(writer1_d.called, "First writer was unexpectedly cancelled") + self.assertFalse( + writer3_d.called, + "Third writer was unexpectedly cancelled or given the lock before the first " + "writer finished", + ) + + # 6. Unblock the reader, which should complete. + # The first writer should be given the lock and block. + # The third writer should still be waiting on the second writer. + unblock_reader.callback(None) + self.assertEqual("read completed", self.successResultOf(reader_d)) + self.assertNoResult(writer2_d) + self.assertFalse( + writer3_d.called, + "Third writer was unexpectedly given the lock before the first writer " + "finished", + ) + + # 7. Unblock the first writer, which should complete. + unblock_writer1.callback(None) + self.assertEqual("write 1 completed", self.successResultOf(writer1_d)) + + # 8. The second writer should take the lock and release it immediately, since it + # has been cancelled. + self.failureResultOf(writer2_d, CancelledError) + + # 9. The third writer should take the lock and complete. + self.assertTrue( + writer3_d.called, "Third writer is stuck waiting for a cancelled writer" + ) + self.assertEqual("write 3 completed", self.successResultOf(writer3_d)) From 2fcf4b3f6cd2a0be6597622664636d2219957c2a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 14 Mar 2022 19:04:29 +0000 Subject: [PATCH 401/474] Add cancellation support to `@cached` and `@cachedList` decorators (#12183) These decorators mostly support cancellation already. Add cancellation tests and fix use of finished logging contexts by delaying cancellation, as suggested by @erikjohnston. Signed-off-by: Sean Quah --- changelog.d/12183.misc | 1 + synapse/util/caches/descriptors.py | 11 ++ tests/util/caches/test_descriptors.py | 147 +++++++++++++++++++++++++- 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12183.misc diff --git a/changelog.d/12183.misc b/changelog.d/12183.misc new file mode 100644 index 000000000000..dd441bb64ff7 --- /dev/null +++ b/changelog.d/12183.misc @@ -0,0 +1 @@ +Add cancellation support to `@cached` and `@cachedList` decorators. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index c3c5c16db96e..eda92d864dea 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -41,6 +41,7 @@ from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.util import unwrapFirstError +from synapse.util.async_helpers import delay_cancellation from synapse.util.caches.deferred_cache import DeferredCache from synapse.util.caches.lrucache import LruCache @@ -350,6 +351,11 @@ def _wrapped(*args: Any, **kwargs: Any) -> Any: ret = defer.maybeDeferred(preserve_fn(self.orig), obj, *args, **kwargs) ret = cache.set(cache_key, ret, callback=invalidate_callback) + # We started a new call to `self.orig`, so we must always wait for it to + # complete. Otherwise we might mark our current logging context as + # finished while `self.orig` is still using it in the background. + ret = delay_cancellation(ret) + return make_deferred_yieldable(ret) wrapped = cast(_CachedFunction, _wrapped) @@ -510,6 +516,11 @@ def errback_all(f: Failure) -> None: d = defer.gatherResults(cached_defers, consumeErrors=True).addCallbacks( lambda _: results, unwrapFirstError ) + if missing: + # We started a new call to `self.orig`, so we must always wait for it to + # complete. Otherwise we might mark our current logging context as + # finished while `self.orig` is still using it in the background. + d = delay_cancellation(d) return make_deferred_yieldable(d) else: return defer.succeed(results) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 6a4b17527a7f..48e616ac7419 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -17,7 +17,7 @@ from unittest import mock from twisted.internet import defer, reactor -from twisted.internet.defer import Deferred +from twisted.internet.defer import CancelledError, Deferred from synapse.api.errors import SynapseError from synapse.logging.context import ( @@ -28,7 +28,7 @@ make_deferred_yieldable, ) from synapse.util.caches import descriptors -from synapse.util.caches.descriptors import cached, lru_cache +from synapse.util.caches.descriptors import cached, cachedList, lru_cache from tests import unittest from tests.test_utils import get_awaitable_result @@ -493,6 +493,74 @@ def func3(self, key, cache_context): obj.invalidate() top_invalidate.assert_called_once() + def test_cancel(self): + """Test that cancelling a lookup does not cancel other lookups""" + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + @cached() + async def fn(self, arg1): + await complete_lookup + return str(arg1) + + obj = Cls() + + d1 = obj.fn(123) + d2 = obj.fn(123) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + # Cancel `d1`, which is the lookup that caused `fn` to run. + d1.cancel() + + # `d2` should complete normally. + complete_lookup.callback(None) + self.failureResultOf(d1, CancelledError) + self.assertEqual(d2.result, "123") + + def test_cancel_logcontexts(self): + """Test that cancellation does not break logcontexts. + + * The `CancelledError` must be raised with the correct logcontext. + * The inner lookup must not resume with a finished logcontext. + * The inner lookup must not restore a finished logcontext when done. + """ + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + inner_context_was_finished = False + + @cached() + async def fn(self, arg1): + await make_deferred_yieldable(complete_lookup) + self.inner_context_was_finished = current_context().finished + return str(arg1) + + obj = Cls() + + async def do_lookup(): + with LoggingContext("c1") as c1: + try: + await obj.fn(123) + self.fail("No CancelledError thrown") + except CancelledError: + self.assertEqual( + current_context(), + c1, + "CancelledError was not raised with the correct logcontext", + ) + # suppress the error and succeed + + d = defer.ensureDeferred(do_lookup()) + d.cancel() + + complete_lookup.callback(None) + self.successResultOf(d) + self.assertFalse( + obj.inner_context_was_finished, "Tried to restart a finished logcontext" + ) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + class CacheDecoratorTestCase(unittest.HomeserverTestCase): """More tests for @cached @@ -865,3 +933,78 @@ async def list_fn(self, args1, arg2): obj.fn.invalidate((10, 2)) invalidate0.assert_called_once() invalidate1.assert_called_once() + + def test_cancel(self): + """Test that cancelling a lookup does not cancel other lookups""" + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + @cached() + def fn(self, arg1): + pass + + @cachedList(cached_method_name="fn", list_name="args") + async def list_fn(self, args): + await complete_lookup + return {arg: str(arg) for arg in args} + + obj = Cls() + + d1 = obj.list_fn([123, 456]) + d2 = obj.list_fn([123, 456, 789]) + self.assertFalse(d1.called) + self.assertFalse(d2.called) + + d1.cancel() + + # `d2` should complete normally. + complete_lookup.callback(None) + self.failureResultOf(d1, CancelledError) + self.assertEqual(d2.result, {123: "123", 456: "456", 789: "789"}) + + def test_cancel_logcontexts(self): + """Test that cancellation does not break logcontexts. + + * The `CancelledError` must be raised with the correct logcontext. + * The inner lookup must not resume with a finished logcontext. + * The inner lookup must not restore a finished logcontext when done. + """ + complete_lookup: "Deferred[None]" = Deferred() + + class Cls: + inner_context_was_finished = False + + @cached() + def fn(self, arg1): + pass + + @cachedList(cached_method_name="fn", list_name="args") + async def list_fn(self, args): + await make_deferred_yieldable(complete_lookup) + self.inner_context_was_finished = current_context().finished + return {arg: str(arg) for arg in args} + + obj = Cls() + + async def do_lookup(): + with LoggingContext("c1") as c1: + try: + await obj.list_fn([123]) + self.fail("No CancelledError thrown") + except CancelledError: + self.assertEqual( + current_context(), + c1, + "CancelledError was not raised with the correct logcontext", + ) + # suppress the error and succeed + + d = defer.ensureDeferred(do_lookup()) + d.cancel() + + complete_lookup.callback(None) + self.successResultOf(d) + self.assertFalse( + obj.inner_context_was_finished, "Tried to restart a finished logcontext" + ) + self.assertEqual(current_context(), SENTINEL_CONTEXT) From d1130a249b4f462a3e457b783b483d5a6c7486f0 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Mar 2022 11:00:01 +0000 Subject: [PATCH 402/474] 1.55.0rc1 --- CHANGES.md | 82 +++++++++++++++++++++++++++++++++++++++ changelog.d/11700.removal | 1 - changelog.d/11915.misc | 1 - changelog.d/11980.misc | 1 - changelog.d/11998.doc | 1 - changelog.d/12028.feature | 1 - changelog.d/12042.misc | 1 - changelog.d/12090.bugfix | 1 - changelog.d/12101.misc | 1 - changelog.d/12108.misc | 1 - changelog.d/12113.bugfix | 1 - changelog.d/12118.misc | 1 - changelog.d/12120.misc | 1 - changelog.d/12121.bugfix | 1 - changelog.d/12128.misc | 1 - changelog.d/12130.bugfix | 1 - changelog.d/12131.misc | 1 - changelog.d/12132.feature | 1 - changelog.d/12135.feature | 1 - changelog.d/12136.misc | 1 - changelog.d/12137.misc | 1 - changelog.d/12138.removal | 1 - changelog.d/12140.misc | 1 - changelog.d/12142.misc | 1 - changelog.d/12143.doc | 1 - changelog.d/12144.misc | 1 - changelog.d/12145.misc | 1 - changelog.d/12146.misc | 1 - changelog.d/12149.misc | 1 - changelog.d/12150.misc | 1 - changelog.d/12151.feature | 1 - changelog.d/12152.misc | 1 - changelog.d/12153.misc | 1 - changelog.d/12154.misc | 1 - changelog.d/12155.misc | 1 - changelog.d/12156.misc | 1 - changelog.d/12157.bugfix | 1 - changelog.d/12159.misc | 1 - changelog.d/12161.misc | 1 - changelog.d/12163.misc | 1 - changelog.d/12173.misc | 1 - changelog.d/12175.bugfix | 1 - changelog.d/12179.doc | 1 - changelog.d/12180.misc | 1 - changelog.d/12182.misc | 1 - changelog.d/12183.misc | 1 - changelog.d/12187.misc | 1 - changelog.d/12188.misc | 1 - changelog.d/12189.bugfix | 1 - changelog.d/12192.misc | 1 - changelog.d/12196.doc | 1 - changelog.d/12197.misc | 1 - changelog.d/12200.removal | 1 - changelog.d/12202.misc | 1 - changelog.d/12203.misc | 1 - changelog.d/12204.doc | 1 - changelog.d/12206.misc | 1 - changelog.d/12207.misc | 1 - changelog.d/12208.misc | 1 - changelog.d/12210.misc | 1 - changelog.d/12211.misc | 1 - changelog.d/12212.feature | 1 - changelog.d/12215.bugfix | 1 - debian/changelog | 6 +++ synapse/__init__.py | 2 +- 65 files changed, 89 insertions(+), 63 deletions(-) delete mode 100644 changelog.d/11700.removal delete mode 100644 changelog.d/11915.misc delete mode 100644 changelog.d/11980.misc delete mode 100644 changelog.d/11998.doc delete mode 100644 changelog.d/12028.feature delete mode 100644 changelog.d/12042.misc delete mode 100644 changelog.d/12090.bugfix delete mode 100644 changelog.d/12101.misc delete mode 100644 changelog.d/12108.misc delete mode 100644 changelog.d/12113.bugfix delete mode 100644 changelog.d/12118.misc delete mode 100644 changelog.d/12120.misc delete mode 100644 changelog.d/12121.bugfix delete mode 100644 changelog.d/12128.misc delete mode 100644 changelog.d/12130.bugfix delete mode 100644 changelog.d/12131.misc delete mode 100644 changelog.d/12132.feature delete mode 100644 changelog.d/12135.feature delete mode 100644 changelog.d/12136.misc delete mode 100644 changelog.d/12137.misc delete mode 100644 changelog.d/12138.removal delete mode 100644 changelog.d/12140.misc delete mode 100644 changelog.d/12142.misc delete mode 100644 changelog.d/12143.doc delete mode 100644 changelog.d/12144.misc delete mode 100644 changelog.d/12145.misc delete mode 100644 changelog.d/12146.misc delete mode 100644 changelog.d/12149.misc delete mode 100644 changelog.d/12150.misc delete mode 100644 changelog.d/12151.feature delete mode 100644 changelog.d/12152.misc delete mode 100644 changelog.d/12153.misc delete mode 100644 changelog.d/12154.misc delete mode 100644 changelog.d/12155.misc delete mode 100644 changelog.d/12156.misc delete mode 100644 changelog.d/12157.bugfix delete mode 100644 changelog.d/12159.misc delete mode 100644 changelog.d/12161.misc delete mode 100644 changelog.d/12163.misc delete mode 100644 changelog.d/12173.misc delete mode 100644 changelog.d/12175.bugfix delete mode 100644 changelog.d/12179.doc delete mode 100644 changelog.d/12180.misc delete mode 100644 changelog.d/12182.misc delete mode 100644 changelog.d/12183.misc delete mode 100644 changelog.d/12187.misc delete mode 100644 changelog.d/12188.misc delete mode 100644 changelog.d/12189.bugfix delete mode 100644 changelog.d/12192.misc delete mode 100644 changelog.d/12196.doc delete mode 100644 changelog.d/12197.misc delete mode 100644 changelog.d/12200.removal delete mode 100644 changelog.d/12202.misc delete mode 100644 changelog.d/12203.misc delete mode 100644 changelog.d/12204.doc delete mode 100644 changelog.d/12206.misc delete mode 100644 changelog.d/12207.misc delete mode 100644 changelog.d/12208.misc delete mode 100644 changelog.d/12210.misc delete mode 100644 changelog.d/12211.misc delete mode 100644 changelog.d/12212.feature delete mode 100644 changelog.d/12215.bugfix diff --git a/CHANGES.md b/CHANGES.md index ef671e73f178..b0311f73bf84 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,85 @@ +Synapse 1.55.0rc1 (2022-03-15) +============================== + +Features +-------- + +- Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) +- Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) +- Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) +- Add a new Jinja2 template filter to extract the local part of an email address. ([\#12212](https://github.com/matrix-org/synapse/issues/12212)) + + +Bugfixes +-------- + +- Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) +- Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) +- Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) +- Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) + + +Improved Documentation +---------------------- + +- Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. ([\#11998](https://github.com/matrix-org/synapse/issues/11998)) +- Improve documentation for demo scripts. ([\#12143](https://github.com/matrix-org/synapse/issues/12143)) +- Updates to the Room DAG concepts development document. ([\#12179](https://github.com/matrix-org/synapse/issues/12179)) +- Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. ([\#12196](https://github.com/matrix-org/synapse/issues/12196)) +- Document that contributors can sign off privately by email. ([\#12204](https://github.com/matrix-org/synapse/issues/12204)) + + +Deprecations and Removals +------------------------- + +- Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700)) +- Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) +- The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) + + +Internal Changes +---------------- + +- Simplify the `ApplicationService` class' set of public methods related to interest checking. ([\#11915](https://github.com/matrix-org/synapse/issues/11915)) +- Add config settings for background update parameters. ([\#11980](https://github.com/matrix-org/synapse/issues/11980)) +- Correct type hints for txredis. ([\#12042](https://github.com/matrix-org/synapse/issues/12042)) +- Limit the size of `aggregation_key` on annotations. ([\#12101](https://github.com/matrix-org/synapse/issues/12101)) +- Add type hints to tests files. ([\#12108](https://github.com/matrix-org/synapse/issues/12108), [\#12146](https://github.com/matrix-org/synapse/issues/12146), [\#12207](https://github.com/matrix-org/synapse/issues/12207), [\#12208](https://github.com/matrix-org/synapse/issues/12208)) +- Move scripts to Synapse package and expose as setuptools entry points. ([\#12118](https://github.com/matrix-org/synapse/issues/12118)) +- Add support for cancellation to `ReadWriteLock`. ([\#12120](https://github.com/matrix-org/synapse/issues/12120)) +- Fix data validation to compare to lists, not sequences. ([\#12128](https://github.com/matrix-org/synapse/issues/12128)) +- Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) +- Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) +- Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) +- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) +- Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) +- Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) +- Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) +- Add test for `ObservableDeferred`'s cancellation behaviour. ([\#12149](https://github.com/matrix-org/synapse/issues/12149)) +- Use `ParamSpec` in type hints for `synapse.logging.context`. ([\#12150](https://github.com/matrix-org/synapse/issues/12150)) +- Prune unused jobs from `tox` config. ([\#12152](https://github.com/matrix-org/synapse/issues/12152)) +- Move CI checks out of tox, to facilitate a move to using poetry. ([\#12153](https://github.com/matrix-org/synapse/issues/12153)) +- Avoid generating state groups for local out-of-band leaves. ([\#12154](https://github.com/matrix-org/synapse/issues/12154)) +- Avoid trying to calculate the state at outlier events. ([\#12155](https://github.com/matrix-org/synapse/issues/12155), [\#12173](https://github.com/matrix-org/synapse/issues/12173), [\#12202](https://github.com/matrix-org/synapse/issues/12202)) +- Fix some type annotations. ([\#12156](https://github.com/matrix-org/synapse/issues/12156)) +- Add type hints for `ObservableDeferred` attributes. ([\#12159](https://github.com/matrix-org/synapse/issues/12159)) +- Use a prebuilt Action for the `tests-done` CI job. ([\#12161](https://github.com/matrix-org/synapse/issues/12161)) +- Reduce number of DB queries made during processing of `/sync`. ([\#12163](https://github.com/matrix-org/synapse/issues/12163)) +- Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. ([\#12180](https://github.com/matrix-org/synapse/issues/12180)) +- Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. ([\#12182](https://github.com/matrix-org/synapse/issues/12182)) +- Add cancellation support to `@cached` and `@cachedList` decorators. ([\#12183](https://github.com/matrix-org/synapse/issues/12183)) +- Remove unused variables. ([\#12187](https://github.com/matrix-org/synapse/issues/12187)) +- Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. ([\#12188](https://github.com/matrix-org/synapse/issues/12188)) +- Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. ([\#12192](https://github.com/matrix-org/synapse/issues/12192)) +- Remove some dead code. ([\#12197](https://github.com/matrix-org/synapse/issues/12197)) +- Fix a misleading comment in the function `check_event_for_spam`. ([\#12203](https://github.com/matrix-org/synapse/issues/12203)) +- Remove unnecessary `pass` statements. ([\#12206](https://github.com/matrix-org/synapse/issues/12206)) +- Update the SSO username picker template to comply with SIWA guidelines. ([\#12210](https://github.com/matrix-org/synapse/issues/12210)) +- Improve code documentation for the typing stream over replication. ([\#12211](https://github.com/matrix-org/synapse/issues/12211)) + + Synapse 1.54.0 (2022-03-08) =========================== diff --git a/changelog.d/11700.removal b/changelog.d/11700.removal deleted file mode 100644 index d3d3c48f0fc4..000000000000 --- a/changelog.d/11700.removal +++ /dev/null @@ -1 +0,0 @@ -Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. diff --git a/changelog.d/11915.misc b/changelog.d/11915.misc deleted file mode 100644 index e3cef1511eb6..000000000000 --- a/changelog.d/11915.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the `ApplicationService` class' set of public methods related to interest checking. \ No newline at end of file diff --git a/changelog.d/11980.misc b/changelog.d/11980.misc deleted file mode 100644 index 36e992e645a3..000000000000 --- a/changelog.d/11980.misc +++ /dev/null @@ -1 +0,0 @@ -Add config settings for background update parameters. \ No newline at end of file diff --git a/changelog.d/11998.doc b/changelog.d/11998.doc deleted file mode 100644 index 33ab7b7880be..000000000000 --- a/changelog.d/11998.doc +++ /dev/null @@ -1 +0,0 @@ -Fix complexity checking config example in [Resource Constrained Devices](https://matrix-org.github.io/synapse/v1.54/other/running_synapse_on_single_board_computers.html) docs page. \ No newline at end of file diff --git a/changelog.d/12028.feature b/changelog.d/12028.feature deleted file mode 100644 index 5549c8f6fcf6..000000000000 --- a/changelog.d/12028.feature +++ /dev/null @@ -1 +0,0 @@ -Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. diff --git a/changelog.d/12042.misc b/changelog.d/12042.misc deleted file mode 100644 index 6ecdc960210c..000000000000 --- a/changelog.d/12042.misc +++ /dev/null @@ -1 +0,0 @@ -Correct type hints for txredis. diff --git a/changelog.d/12090.bugfix b/changelog.d/12090.bugfix deleted file mode 100644 index 087065dcb1cd..000000000000 --- a/changelog.d/12090.bugfix +++ /dev/null @@ -1 +0,0 @@ -Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. diff --git a/changelog.d/12101.misc b/changelog.d/12101.misc deleted file mode 100644 index d165f73d13e8..000000000000 --- a/changelog.d/12101.misc +++ /dev/null @@ -1 +0,0 @@ -Limit the size of `aggregation_key` on annotations. diff --git a/changelog.d/12108.misc b/changelog.d/12108.misc deleted file mode 100644 index b67a701dbb52..000000000000 --- a/changelog.d/12108.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12113.bugfix b/changelog.d/12113.bugfix deleted file mode 100644 index df9b0dc413dd..000000000000 --- a/changelog.d/12113.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12118.misc b/changelog.d/12118.misc deleted file mode 100644 index a2c397d90755..000000000000 --- a/changelog.d/12118.misc +++ /dev/null @@ -1 +0,0 @@ -Move scripts to Synapse package and expose as setuptools entry points. diff --git a/changelog.d/12120.misc b/changelog.d/12120.misc deleted file mode 100644 index 360309650032..000000000000 --- a/changelog.d/12120.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for cancellation to `ReadWriteLock`. diff --git a/changelog.d/12121.bugfix b/changelog.d/12121.bugfix deleted file mode 100644 index df9b0dc413dd..000000000000 --- a/changelog.d/12121.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12128.misc b/changelog.d/12128.misc deleted file mode 100644 index 0570a8e3272f..000000000000 --- a/changelog.d/12128.misc +++ /dev/null @@ -1 +0,0 @@ -Fix data validation to compare to lists, not sequences. diff --git a/changelog.d/12130.bugfix b/changelog.d/12130.bugfix deleted file mode 100644 index df9b0dc413dd..000000000000 --- a/changelog.d/12130.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12131.misc b/changelog.d/12131.misc deleted file mode 100644 index 8ef23c22d524..000000000000 --- a/changelog.d/12131.misc +++ /dev/null @@ -1 +0,0 @@ -Fix CI not attaching source distributions and wheels to the GitHub releases. \ No newline at end of file diff --git a/changelog.d/12132.feature b/changelog.d/12132.feature deleted file mode 100644 index 3b8362ad35ed..000000000000 --- a/changelog.d/12132.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of logging in for large accounts. diff --git a/changelog.d/12135.feature b/changelog.d/12135.feature deleted file mode 100644 index b337f51730e6..000000000000 --- a/changelog.d/12135.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. diff --git a/changelog.d/12136.misc b/changelog.d/12136.misc deleted file mode 100644 index 98b1c1c9d8ac..000000000000 --- a/changelog.d/12136.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused mocks from `test_typing`. \ No newline at end of file diff --git a/changelog.d/12137.misc b/changelog.d/12137.misc deleted file mode 100644 index 118ff77a91c6..000000000000 --- a/changelog.d/12137.misc +++ /dev/null @@ -1 +0,0 @@ -Give `scripts-dev` scripts suffixes for neater CI config. \ No newline at end of file diff --git a/changelog.d/12138.removal b/changelog.d/12138.removal deleted file mode 100644 index 6ed84d476cd9..000000000000 --- a/changelog.d/12138.removal +++ /dev/null @@ -1 +0,0 @@ -Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. diff --git a/changelog.d/12140.misc b/changelog.d/12140.misc deleted file mode 100644 index 33a21a29f0f4..000000000000 --- a/changelog.d/12140.misc +++ /dev/null @@ -1 +0,0 @@ -Move `synctl` into `synapse._scripts` and expose as an entry point. \ No newline at end of file diff --git a/changelog.d/12142.misc b/changelog.d/12142.misc deleted file mode 100644 index 5d09f90b5244..000000000000 --- a/changelog.d/12142.misc +++ /dev/null @@ -1 +0,0 @@ -Move the snapcraft configuration file to `contrib`. \ No newline at end of file diff --git a/changelog.d/12143.doc b/changelog.d/12143.doc deleted file mode 100644 index 4b9db74b1fc9..000000000000 --- a/changelog.d/12143.doc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for demo scripts. diff --git a/changelog.d/12144.misc b/changelog.d/12144.misc deleted file mode 100644 index d8f71bb203eb..000000000000 --- a/changelog.d/12144.misc +++ /dev/null @@ -1 +0,0 @@ -Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. diff --git a/changelog.d/12145.misc b/changelog.d/12145.misc deleted file mode 100644 index 4092a2d66e45..000000000000 --- a/changelog.d/12145.misc +++ /dev/null @@ -1 +0,0 @@ -Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. diff --git a/changelog.d/12146.misc b/changelog.d/12146.misc deleted file mode 100644 index b67a701dbb52..000000000000 --- a/changelog.d/12146.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12149.misc b/changelog.d/12149.misc deleted file mode 100644 index d39af9672365..000000000000 --- a/changelog.d/12149.misc +++ /dev/null @@ -1 +0,0 @@ -Add test for `ObservableDeferred`'s cancellation behaviour. diff --git a/changelog.d/12150.misc b/changelog.d/12150.misc deleted file mode 100644 index 2d2706dac769..000000000000 --- a/changelog.d/12150.misc +++ /dev/null @@ -1 +0,0 @@ -Use `ParamSpec` in type hints for `synapse.logging.context`. diff --git a/changelog.d/12151.feature b/changelog.d/12151.feature deleted file mode 100644 index 18432b2da9a5..000000000000 --- a/changelog.d/12151.feature +++ /dev/null @@ -1 +0,0 @@ -Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. diff --git a/changelog.d/12152.misc b/changelog.d/12152.misc deleted file mode 100644 index b9877eaccbee..000000000000 --- a/changelog.d/12152.misc +++ /dev/null @@ -1 +0,0 @@ -Prune unused jobs from `tox` config. \ No newline at end of file diff --git a/changelog.d/12153.misc b/changelog.d/12153.misc deleted file mode 100644 index f02d140f3871..000000000000 --- a/changelog.d/12153.misc +++ /dev/null @@ -1 +0,0 @@ -Move CI checks out of tox, to facilitate a move to using poetry. \ No newline at end of file diff --git a/changelog.d/12154.misc b/changelog.d/12154.misc deleted file mode 100644 index 18d2a4728be9..000000000000 --- a/changelog.d/12154.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid generating state groups for local out-of-band leaves. diff --git a/changelog.d/12155.misc b/changelog.d/12155.misc deleted file mode 100644 index 9f333e718a86..000000000000 --- a/changelog.d/12155.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12156.misc b/changelog.d/12156.misc deleted file mode 100644 index 4818d988d771..000000000000 --- a/changelog.d/12156.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some type annotations. diff --git a/changelog.d/12157.bugfix b/changelog.d/12157.bugfix deleted file mode 100644 index c3d2e700bb1d..000000000000 --- a/changelog.d/12157.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. diff --git a/changelog.d/12159.misc b/changelog.d/12159.misc deleted file mode 100644 index 30500f2fd95d..000000000000 --- a/changelog.d/12159.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints for `ObservableDeferred` attributes. diff --git a/changelog.d/12161.misc b/changelog.d/12161.misc deleted file mode 100644 index 43eff08d467e..000000000000 --- a/changelog.d/12161.misc +++ /dev/null @@ -1 +0,0 @@ -Use a prebuilt Action for the `tests-done` CI job. diff --git a/changelog.d/12163.misc b/changelog.d/12163.misc deleted file mode 100644 index 13de0895f5fa..000000000000 --- a/changelog.d/12163.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of DB queries made during processing of `/sync`. diff --git a/changelog.d/12173.misc b/changelog.d/12173.misc deleted file mode 100644 index 9f333e718a86..000000000000 --- a/changelog.d/12173.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12175.bugfix b/changelog.d/12175.bugfix deleted file mode 100644 index 881cb9b76c20..000000000000 --- a/changelog.d/12175.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. diff --git a/changelog.d/12179.doc b/changelog.d/12179.doc deleted file mode 100644 index 55d8caa45a8c..000000000000 --- a/changelog.d/12179.doc +++ /dev/null @@ -1 +0,0 @@ -Updates to the Room DAG concepts development document. diff --git a/changelog.d/12180.misc b/changelog.d/12180.misc deleted file mode 100644 index 7a347352fd91..000000000000 --- a/changelog.d/12180.misc +++ /dev/null @@ -1 +0,0 @@ -Add `delay_cancellation` utility function, which behaves like `stop_cancellation` but waits until the original `Deferred` resolves before raising a `CancelledError`. diff --git a/changelog.d/12182.misc b/changelog.d/12182.misc deleted file mode 100644 index 7e9ad2c75244..000000000000 --- a/changelog.d/12182.misc +++ /dev/null @@ -1 +0,0 @@ -Retry HTTP replication failures, this should prevent 502's when restarting stateful workers (main, event persisters, stream writers). Contributed by Nick @ Beeper. diff --git a/changelog.d/12183.misc b/changelog.d/12183.misc deleted file mode 100644 index dd441bb64ff7..000000000000 --- a/changelog.d/12183.misc +++ /dev/null @@ -1 +0,0 @@ -Add cancellation support to `@cached` and `@cachedList` decorators. diff --git a/changelog.d/12187.misc b/changelog.d/12187.misc deleted file mode 100644 index c53e68faa508..000000000000 --- a/changelog.d/12187.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused variables. diff --git a/changelog.d/12188.misc b/changelog.d/12188.misc deleted file mode 100644 index 403158481cee..000000000000 --- a/changelog.d/12188.misc +++ /dev/null @@ -1 +0,0 @@ -Add combined test for HTTP pusher and push rule. Contributed by Nick @ Beeper. diff --git a/changelog.d/12189.bugfix b/changelog.d/12189.bugfix deleted file mode 100644 index df9b0dc413dd..000000000000 --- a/changelog.d/12189.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12192.misc b/changelog.d/12192.misc deleted file mode 100644 index bdfe8dad98a6..000000000000 --- a/changelog.d/12192.misc +++ /dev/null @@ -1 +0,0 @@ -Rename `HomeServer.get_tcp_replication` to `get_replication_command_handler`. diff --git a/changelog.d/12196.doc b/changelog.d/12196.doc deleted file mode 100644 index 269f06aa3386..000000000000 --- a/changelog.d/12196.doc +++ /dev/null @@ -1 +0,0 @@ -Document that the `typing`, `to_device`, `account_data`, `receipts`, and `presence` stream writer can only be used on a single worker. \ No newline at end of file diff --git a/changelog.d/12197.misc b/changelog.d/12197.misc deleted file mode 100644 index 7d0e9b6bbf4c..000000000000 --- a/changelog.d/12197.misc +++ /dev/null @@ -1 +0,0 @@ -Remove some dead code. diff --git a/changelog.d/12200.removal b/changelog.d/12200.removal deleted file mode 100644 index 312c7ae32597..000000000000 --- a/changelog.d/12200.removal +++ /dev/null @@ -1 +0,0 @@ -The groups/communities feature in Synapse has been deprecated. diff --git a/changelog.d/12202.misc b/changelog.d/12202.misc deleted file mode 100644 index 9f333e718a86..000000000000 --- a/changelog.d/12202.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid trying to calculate the state at outlier events. diff --git a/changelog.d/12203.misc b/changelog.d/12203.misc deleted file mode 100644 index 892dc5bfb7e3..000000000000 --- a/changelog.d/12203.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a misleading comment in the function `check_event_for_spam`. diff --git a/changelog.d/12204.doc b/changelog.d/12204.doc deleted file mode 100644 index c4b2805bb112..000000000000 --- a/changelog.d/12204.doc +++ /dev/null @@ -1 +0,0 @@ -Document that contributors can sign off privately by email. diff --git a/changelog.d/12206.misc b/changelog.d/12206.misc deleted file mode 100644 index df59bb56cdb8..000000000000 --- a/changelog.d/12206.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary `pass` statements. diff --git a/changelog.d/12207.misc b/changelog.d/12207.misc deleted file mode 100644 index b67a701dbb52..000000000000 --- a/changelog.d/12207.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. diff --git a/changelog.d/12208.misc b/changelog.d/12208.misc deleted file mode 100644 index c5b635679931..000000000000 --- a/changelog.d/12208.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests files. \ No newline at end of file diff --git a/changelog.d/12210.misc b/changelog.d/12210.misc deleted file mode 100644 index 3f6a8747c256..000000000000 --- a/changelog.d/12210.misc +++ /dev/null @@ -1 +0,0 @@ -Update the SSO username picker template to comply with SIWA guidelines. diff --git a/changelog.d/12211.misc b/changelog.d/12211.misc deleted file mode 100644 index d11634a1ee0f..000000000000 --- a/changelog.d/12211.misc +++ /dev/null @@ -1 +0,0 @@ -Improve code documentation for the typing stream over replication. \ No newline at end of file diff --git a/changelog.d/12212.feature b/changelog.d/12212.feature deleted file mode 100644 index fe337ff99057..000000000000 --- a/changelog.d/12212.feature +++ /dev/null @@ -1 +0,0 @@ -Add a new Jinja2 template filter to extract the local part of an email address. diff --git a/changelog.d/12215.bugfix b/changelog.d/12215.bugfix deleted file mode 100644 index 593b12556beb..000000000000 --- a/changelog.d/12215.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. diff --git a/debian/changelog b/debian/changelog index 02136a0d606f..09ef24ebb0d3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.55.0~rc1) stable; urgency=medium + + * New synapse release 1.55.0~rc1. + + -- Synapse Packaging team Tue, 15 Mar 2022 10:59:31 +0000 + matrix-synapse-py3 (1.54.0) stable; urgency=medium * New synapse release 1.54.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4b0056597692..870707f47643 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -68,7 +68,7 @@ except ImportError: pass -__version__ = "1.54.0" +__version__ = "1.55.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 9e90d643e6947c2fa4286c21a6351061cf510026 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 15 Mar 2022 11:16:36 +0000 Subject: [PATCH 403/474] Changelog tweaks --- CHANGES.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b0311f73bf84..60e7ecb1b9c1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,12 @@ Synapse 1.55.0rc1 (2022-03-15) ============================== +This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. + Features -------- -- Add third-party rules rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) +- Add third-party rules callbacks `check_can_shutdown_room` and `check_can_deactivate_user`. ([\#12028](https://github.com/matrix-org/synapse/issues/12028)) - Improve performance of logging in for large accounts. ([\#12132](https://github.com/matrix-org/synapse/issues/12132)) - Add experimental env var `SYNAPSE_ASYNC_IO_REACTOR` that causes Synapse to use the asyncio reactor for Twisted. ([\#12135](https://github.com/matrix-org/synapse/issues/12135)) - Support the stable identifiers from [MSC3440](https://github.com/matrix-org/matrix-doc/pull/3440): threads. ([\#12151](https://github.com/matrix-org/synapse/issues/12151)) @@ -16,9 +18,9 @@ Bugfixes - Use the proper serialization format for bundled thread aggregations. The bug has existed since Synapse v1.48.0. ([\#12090](https://github.com/matrix-org/synapse/issues/12090)) - Fix a long-standing bug when redacting events with relations. ([\#12113](https://github.com/matrix-org/synapse/issues/12113), [\#12121](https://github.com/matrix-org/synapse/issues/12121), [\#12130](https://github.com/matrix-org/synapse/issues/12130), [\#12189](https://github.com/matrix-org/synapse/issues/12189)) -- Fix a bug introduced in #4864 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) +- Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) - Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) -- Fix a bug introduced in 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) Improved Documentation @@ -34,7 +36,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700)) +- **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** - Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) From 5dd949bee6158a8b651db9f2ae417a62c8184bfd Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 15 Mar 2022 14:16:37 +0100 Subject: [PATCH 404/474] Add type hints to some tests/handlers files. (#12224) --- changelog.d/12224.misc | 1 + mypy.ini | 5 -- tests/handlers/test_directory.py | 84 +++++++++++++++------------- tests/handlers/test_e2e_keys.py | 36 ++++++------ tests/handlers/test_oidc.py | 94 +++++++++++++++++--------------- tests/handlers/test_profile.py | 43 ++++++++------- tests/handlers/test_saml.py | 24 ++++---- 7 files changed, 156 insertions(+), 131 deletions(-) create mode 100644 changelog.d/12224.misc diff --git a/changelog.d/12224.misc b/changelog.d/12224.misc new file mode 100644 index 000000000000..b67a701dbb52 --- /dev/null +++ b/changelog.d/12224.misc @@ -0,0 +1 @@ +Add type hints to tests files. diff --git a/mypy.ini b/mypy.ini index f9c39fcaaee3..fe31bfb8bb37 100644 --- a/mypy.ini +++ b/mypy.ini @@ -67,13 +67,8 @@ exclude = (?x) |tests/federation/transport/test_knocking.py |tests/federation/transport/test_server.py |tests/handlers/test_cas.py - |tests/handlers/test_directory.py - |tests/handlers/test_e2e_keys.py |tests/handlers/test_federation.py - |tests/handlers/test_oidc.py |tests/handlers/test_presence.py - |tests/handlers/test_profile.py - |tests/handlers/test_saml.py |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 6e403a87c5d0..11ad44223d39 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -12,14 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.api.errors import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import directory, login, room -from synapse.types import RoomAlias, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, RoomAlias, create_requester +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -28,13 +32,15 @@ class DirectoryTestCase(unittest.HomeserverTestCase): """Tests the directory service.""" - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.mock_federation = Mock() self.mock_registry = Mock() - self.query_handlers = {} + self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} - def register_query_handler(query_type, handler): + def register_query_handler( + query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] + ) -> None: self.query_handlers[query_type] = handler self.mock_registry.register_query_handler = register_query_handler @@ -54,7 +60,7 @@ def register_query_handler(query_type, handler): return hs - def test_get_local_association(self): + def test_get_local_association(self) -> None: self.get_success( self.store.create_room_alias_association( self.my_room, "!8765qwer:test", ["test"] @@ -65,7 +71,7 @@ def test_get_local_association(self): self.assertEqual({"room_id": "!8765qwer:test", "servers": ["test"]}, result) - def test_get_remote_association(self): + def test_get_remote_association(self) -> None: self.mock_federation.make_query.return_value = make_awaitable( {"room_id": "!8765qwer:test", "servers": ["test", "remote"]} ) @@ -83,7 +89,7 @@ def test_get_remote_association(self): ignore_backoff=True, ) - def test_incoming_fed_query(self): + def test_incoming_fed_query(self) -> None: self.get_success( self.store.create_room_alias_association( self.your_room, "!8765asdf:test", ["test"] @@ -105,7 +111,7 @@ class TestCreateAlias(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_directory_handler() # Create user @@ -125,7 +131,7 @@ def prepare(self, reactor, clock, hs): self.test_user_tok = self.login("user", "pass") self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) - def test_create_alias_joined_room(self): + def test_create_alias_joined_room(self) -> None: """A user can create an alias for a room they're in.""" self.get_success( self.handler.create_association( @@ -135,7 +141,7 @@ def test_create_alias_joined_room(self): ) ) - def test_create_alias_other_room(self): + def test_create_alias_other_room(self) -> None: """A user cannot create an alias for a room they're NOT in.""" other_room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok @@ -150,7 +156,7 @@ def test_create_alias_other_room(self): synapse.api.errors.SynapseError, ) - def test_create_alias_admin(self): + def test_create_alias_admin(self) -> None: """An admin can create an alias for a room they're NOT in.""" other_room_id = self.helper.create_room_as( self.test_user, tok=self.test_user_tok @@ -173,7 +179,7 @@ class TestDeleteAlias(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -195,7 +201,7 @@ def prepare(self, reactor, clock, hs): self.test_user_tok = self.login("user", "pass") self.helper.join(room=self.room_id, user=self.test_user, tok=self.test_user_tok) - def _create_alias(self, user): + def _create_alias(self, user) -> None: # Create a new alias to this room. self.get_success( self.store.create_room_alias_association( @@ -203,7 +209,7 @@ def _create_alias(self, user): ) ) - def test_delete_alias_not_allowed(self): + def test_delete_alias_not_allowed(self) -> None: """A user that doesn't meet the expected guidelines cannot delete an alias.""" self._create_alias(self.admin_user) self.get_failure( @@ -213,7 +219,7 @@ def test_delete_alias_not_allowed(self): synapse.api.errors.AuthError, ) - def test_delete_alias_creator(self): + def test_delete_alias_creator(self) -> None: """An alias creator can delete their own alias.""" # Create an alias from a different user. self._create_alias(self.test_user) @@ -232,7 +238,7 @@ def test_delete_alias_creator(self): synapse.api.errors.SynapseError, ) - def test_delete_alias_admin(self): + def test_delete_alias_admin(self) -> None: """A server admin can delete an alias created by another user.""" # Create an alias from a different user. self._create_alias(self.test_user) @@ -251,7 +257,7 @@ def test_delete_alias_admin(self): synapse.api.errors.SynapseError, ) - def test_delete_alias_sufficient_power(self): + def test_delete_alias_sufficient_power(self) -> None: """A user with a sufficient power level should be able to delete an alias.""" self._create_alias(self.admin_user) @@ -288,7 +294,7 @@ class CanonicalAliasTestCase(unittest.HomeserverTestCase): directory.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.handler = hs.get_directory_handler() self.state_handler = hs.get_state_handler() @@ -317,7 +323,7 @@ def _add_alias(self, alias: str) -> RoomAlias: ) return room_alias - def _set_canonical_alias(self, content): + def _set_canonical_alias(self, content) -> None: """Configure the canonical alias state on the room.""" self.helper.send_state( self.room_id, @@ -334,7 +340,7 @@ def _get_canonical_alias(self): ) ) - def test_remove_alias(self): + def test_remove_alias(self) -> None: """Removing an alias that is the canonical alias should remove it there too.""" # Set this new alias as the canonical alias for this room self._set_canonical_alias( @@ -356,7 +362,7 @@ def test_remove_alias(self): self.assertNotIn("alias", data["content"]) self.assertNotIn("alt_aliases", data["content"]) - def test_remove_other_alias(self): + def test_remove_other_alias(self) -> None: """Removing an alias listed as in alt_aliases should remove it there too.""" # Create a second alias. other_test_alias = "#test2:test" @@ -393,7 +399,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # Add custom alias creation rules to the config. @@ -403,7 +409,7 @@ def default_config(self): return config - def test_denied(self): + def test_denied(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -413,7 +419,7 @@ def test_denied(self): ) self.assertEqual(403, channel.code, channel.result) - def test_allowed(self): + def test_allowed(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -423,7 +429,7 @@ def test_allowed(self): ) self.assertEqual(200, channel.code, channel.result) - def test_denied_during_creation(self): + def test_denied_during_creation(self) -> None: """A room alias that is not allowed should be rejected during creation.""" # Invalid room alias. self.helper.create_room_as( @@ -432,7 +438,7 @@ def test_denied_during_creation(self): extra_content={"room_alias_name": "foo"}, ) - def test_allowed_during_creation(self): + def test_allowed_during_creation(self) -> None: """A valid room alias should be allowed during creation.""" room_id = self.helper.create_room_as( self.user_id, @@ -459,7 +465,7 @@ class TestCreatePublishedRoomACL(unittest.HomeserverTestCase): data = {"room_alias_name": "unofficial_test"} allowed_localpart = "allowed" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() # Add custom room list publication rules to the config. @@ -474,7 +480,9 @@ def default_config(self): return config - def prepare(self, reactor, clock, hs): + def prepare( + self, reactor: MemoryReactor, clock: Clock, hs: HomeServer + ) -> HomeServer: self.allowed_user_id = self.register_user(self.allowed_localpart, "pass") self.allowed_access_token = self.login(self.allowed_localpart, "pass") @@ -483,7 +491,7 @@ def prepare(self, reactor, clock, hs): return hs - def test_denied_without_publication_permission(self): + def test_denied_without_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user without permission to publish rooms. @@ -497,7 +505,7 @@ def test_denied_without_publication_permission(self): expect_code=403, ) - def test_allowed_when_creating_private_room(self): + def test_allowed_when_creating_private_room(self) -> None: """ Try to create a room, register an alias for it, and NOT publish it, as a user without permission to publish rooms. @@ -511,7 +519,7 @@ def test_allowed_when_creating_private_room(self): expect_code=200, ) - def test_allowed_with_publication_permission(self): + def test_allowed_with_publication_permission(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user WITH permission to publish rooms. @@ -525,7 +533,7 @@ def test_allowed_with_publication_permission(self): expect_code=200, ) - def test_denied_publication_with_invalid_alias(self): + def test_denied_publication_with_invalid_alias(self) -> None: """ Try to create a room, register an alias for it, and publish it, as a user WITH permission to publish rooms. @@ -538,7 +546,7 @@ def test_denied_publication_with_invalid_alias(self): expect_code=403, ) - def test_can_create_as_private_room_after_rejection(self): + def test_can_create_as_private_room_after_rejection(self) -> None: """ After failing to publish a room with an alias as a user without publish permission, retry as the same user, but without publishing the room. @@ -549,7 +557,7 @@ def test_can_create_as_private_room_after_rejection(self): self.test_denied_without_publication_permission() self.test_allowed_when_creating_private_room() - def test_can_create_with_permission_after_rejection(self): + def test_can_create_with_permission_after_rejection(self) -> None: """ After failing to publish a room with an alias as a user without publish permission, retry as someone with permission, using the same alias. @@ -566,7 +574,9 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): servlets = [directory.register_servlets, room.register_servlets] - def prepare(self, reactor, clock, hs): + def prepare( + self, reactor: MemoryReactor, clock: Clock, hs: HomeServer + ) -> HomeServer: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request( @@ -579,7 +589,7 @@ def prepare(self, reactor, clock, hs): return hs - def test_disabling_room_list(self): + def test_disabling_room_list(self) -> None: self.room_list_handler.enable_room_list_search = True self.directory_handler.enable_room_list_search = True diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 9338ab92e98e..ac21a28c4331 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -20,33 +20,37 @@ from signedjson import key as key, sign as sign from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import Codes, SynapseError +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(federation_client=mock.Mock()) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() self.store = self.hs.get_datastores().main - def test_query_local_devices_no_devices(self): + def test_query_local_devices_no_devices(self) -> None: """If the user has no devices, we expect an empty list.""" local_user = "@boris:" + self.hs.hostname res = self.get_success(self.handler.query_local_devices({local_user: None})) self.assertDictEqual(res, {local_user: {}}) - def test_reupload_one_time_keys(self): + def test_reupload_one_time_keys(self) -> None: """we should be able to re-upload the same keys""" local_user = "@boris:" + self.hs.hostname device_id = "xyz" - keys = { + keys: JsonDict = { "alg1:k1": "key1", "alg2:k2": {"key": "key2", "signatures": {"k1": "sig1"}}, "alg2:k3": {"key": "key3"}, @@ -74,7 +78,7 @@ def test_reupload_one_time_keys(self): res, {"one_time_key_counts": {"alg1": 1, "alg2": 2, "signed_curve25519": 0}} ) - def test_change_one_time_keys(self): + def test_change_one_time_keys(self) -> None: """attempts to change one-time-keys should be rejected""" local_user = "@boris:" + self.hs.hostname @@ -134,7 +138,7 @@ def test_change_one_time_keys(self): SynapseError, ) - def test_claim_one_time_key(self): + def test_claim_one_time_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" keys = {"alg1:k1": "key1"} @@ -161,7 +165,7 @@ def test_claim_one_time_key(self): }, ) - def test_fallback_key(self): + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" fallback_key = {"alg1:k1": "fallback_key1"} @@ -294,7 +298,7 @@ def test_fallback_key(self): {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) - def test_replace_master_key(self): + def test_replace_master_key(self) -> None: """uploading a new signing key should make the old signing key unavailable""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -328,7 +332,7 @@ def test_replace_master_key(self): ) self.assertDictEqual(devices["master_keys"], {local_user: keys2["master_key"]}) - def test_reupload_signatures(self): + def test_reupload_signatures(self) -> None: """re-uploading a signature should not fail""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -433,7 +437,7 @@ def test_reupload_signatures(self): self.assertDictEqual(devices["device_keys"][local_user]["abc"], device_key_1) self.assertDictEqual(devices["device_keys"][local_user]["def"], device_key_2) - def test_self_signing_key_doesnt_show_up_as_device(self): + def test_self_signing_key_doesnt_show_up_as_device(self) -> None: """signing keys should be hidden when fetching a user's devices""" local_user = "@boris:" + self.hs.hostname keys1 = { @@ -462,7 +466,7 @@ def test_self_signing_key_doesnt_show_up_as_device(self): res = self.get_success(self.handler.query_local_devices({local_user: None})) self.assertDictEqual(res, {local_user: {}}) - def test_upload_signatures(self): + def test_upload_signatures(self) -> None: """should check signatures that are uploaded""" # set up a user with cross-signing keys and a device. This user will # try uploading signatures @@ -686,7 +690,7 @@ def test_upload_signatures(self): other_master_key["signatures"][local_user]["ed25519:" + usersigning_pubkey], ) - def test_query_devices_remote_no_sync(self): + def test_query_devices_remote_no_sync(self) -> None: """Tests that querying keys for a remote user that we don't share a room with returns the cross signing keys correctly. """ @@ -759,7 +763,7 @@ def test_query_devices_remote_no_sync(self): }, ) - def test_query_devices_remote_sync(self): + def test_query_devices_remote_sync(self) -> None: """Tests that querying keys for a remote user that we share a room with, but haven't yet fetched the keys for, returns the cross signing keys correctly. @@ -845,7 +849,7 @@ def test_query_devices_remote_sync(self): (["device_1", "device_2"],), ] ) - def test_query_all_devices_caches_result(self, device_ids: Iterable[str]): + def test_query_all_devices_caches_result(self, device_ids: Iterable[str]) -> None: """Test that requests for all of a remote user's devices are cached. We do this by asserting that only one call over federation was made, and that @@ -853,7 +857,7 @@ def test_query_all_devices_caches_result(self, device_ids: Iterable[str]): """ local_user_id = "@test:test" remote_user_id = "@test:other" - request_body = {"device_keys": {remote_user_id: []}} + request_body: JsonDict = {"device_keys": {remote_user_id: []}} response_devices = [ { diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index e8418b6638e4..014815db6ee4 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -13,14 +13,18 @@ # limitations under the License. import json import os +from typing import Any, Dict from unittest.mock import ANY, Mock, patch from urllib.parse import parse_qs, urlparse import pymacaroons +from twisted.test.proto_helpers import MemoryReactor + from synapse.handlers.sso import MappingException from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import JsonDict, UserID +from synapse.util import Clock from synapse.util.macaroons import get_value_from_macaroon from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock @@ -98,7 +102,7 @@ async def map_user_attributes(self, userinfo, token, failures): } -async def get_json(url): +async def get_json(url: str) -> JsonDict: # Mock get_json calls to handle jwks & oidc discovery endpoints if url == WELL_KNOWN: # Minimal discovery document, as defined in OpenID.Discovery @@ -116,6 +120,8 @@ async def get_json(url): elif url == JWKS_URI: return {"keys": []} + return {} + def _key_file_path() -> str: """path to a file containing the private half of a test key""" @@ -147,12 +153,12 @@ class OidcHandlerTestCase(HomeserverTestCase): if not HAS_OIDC: skip = "requires OIDC" - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock(spec=["get_json"]) self.http_client.get_json.side_effect = get_json self.http_client.user_agent = b"Synapse Test" @@ -164,7 +170,7 @@ def make_homeserver(self, reactor, clock): sso_handler = hs.get_sso_handler() # Mock the render error method. self.render_error = Mock(return_value=None) - sso_handler.render_error = self.render_error + sso_handler.render_error = self.render_error # type: ignore[assignment] # Reduce the number of attempts when generating MXIDs. sso_handler._MAP_USERNAME_RETRIES = 3 @@ -193,14 +199,14 @@ def assertRenderedError(self, error, error_description=None): return args @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_config(self): + def test_config(self) -> None: """Basic config correctly sets up the callback URL and client auth correctly.""" self.assertEqual(self.provider._callback_url, CALLBACK_URL) self.assertEqual(self.provider._client_auth.client_id, CLIENT_ID) self.assertEqual(self.provider._client_auth.client_secret, CLIENT_SECRET) @override_config({"oidc_config": {**DEFAULT_CONFIG, "discover": True}}) - def test_discovery(self): + def test_discovery(self) -> None: """The handler should discover the endpoints from OIDC discovery document.""" # This would throw if some metadata were invalid metadata = self.get_success(self.provider.load_metadata()) @@ -219,13 +225,13 @@ def test_discovery(self): self.http_client.get_json.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) - def test_no_discovery(self): + def test_no_discovery(self) -> None: """When discovery is disabled, it should not try to load from discovery document.""" self.get_success(self.provider.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": EXPLICIT_ENDPOINT_CONFIG}) - def test_load_jwks(self): + def test_load_jwks(self) -> None: """JWKS loading is done once (then cached) if used.""" jwks = self.get_success(self.provider.load_jwks()) self.http_client.get_json.assert_called_once_with(JWKS_URI) @@ -253,7 +259,7 @@ async def patched_load_metadata(): self.get_failure(self.provider.load_jwks(force=True), RuntimeError) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_validate_config(self): + def test_validate_config(self) -> None: """Provider metadatas are extensively validated.""" h = self.provider @@ -336,14 +342,14 @@ async def force_load(): force_load_metadata() @override_config({"oidc_config": {**DEFAULT_CONFIG, "skip_verification": True}}) - def test_skip_verification(self): + def test_skip_verification(self) -> None: """Provider metadata validation can be disabled by config.""" with self.metadata_edit({"issuer": "http://insecure"}): # This should not throw get_awaitable_result(self.provider.load_metadata()) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_redirect_request(self): + def test_redirect_request(self) -> None: """The redirect request has the right arguments & generates a valid session cookie.""" req = Mock(spec=["cookies"]) req.cookies = [] @@ -387,7 +393,7 @@ def test_redirect_request(self): self.assertEqual(redirect, "http://client/redirect") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback_error(self): + def test_callback_error(self) -> None: """Errors from the provider returned in the callback are displayed.""" request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] @@ -399,7 +405,7 @@ def test_callback_error(self): self.assertRenderedError("invalid_client", "some description") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback(self): + def test_callback(self) -> None: """Code callback works and display errors if something went wrong. A lot of scenarios are tested here: @@ -428,9 +434,9 @@ def test_callback(self): "username": username, } expected_user_id = "@%s:%s" % (username, self.hs.hostname) - self.provider._exchange_code = simple_async_mock(return_value=token) - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) - self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] + self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -468,7 +474,7 @@ def test_callback(self): self.assertRenderedError("mapping_error") # Handle ID token errors - self.provider._parse_id_token = simple_async_mock(raises=Exception()) + self.provider._parse_id_token = simple_async_mock(raises=Exception()) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") @@ -483,7 +489,7 @@ def test_callback(self): "type": "bearer", "access_token": "access_token", } - self.provider._exchange_code = simple_async_mock(return_value=token) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) auth_handler.complete_sso_login.assert_called_once_with( @@ -510,8 +516,8 @@ def test_callback(self): id_token = { "sid": "abcdefgh", } - self.provider._parse_id_token = simple_async_mock(return_value=id_token) - self.provider._exchange_code = simple_async_mock(return_value=token) + self.provider._parse_id_token = simple_async_mock(return_value=id_token) # type: ignore[assignment] + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] auth_handler.complete_sso_login.reset_mock() self.provider._fetch_userinfo.reset_mock() self.get_success(self.handler.handle_oidc_callback(request)) @@ -531,21 +537,21 @@ def test_callback(self): self.render_error.assert_not_called() # Handle userinfo fetching error - self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) + self.provider._fetch_userinfo = simple_async_mock(raises=Exception()) # type: ignore[assignment] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") # Handle code exchange failure from synapse.handlers.oidc import OidcError - self.provider._exchange_code = simple_async_mock( + self.provider._exchange_code = simple_async_mock( # type: ignore[assignment] raises=OidcError("invalid_request") ) self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_callback_session(self): + def test_callback_session(self) -> None: """The callback verifies the session presence and validity""" request = Mock(spec=["args", "getCookie", "cookies"]) @@ -590,7 +596,7 @@ def test_callback_session(self): @override_config( {"oidc_config": {**DEFAULT_CONFIG, "client_auth_method": "client_secret_post"}} ) - def test_exchange_code(self): + def test_exchange_code(self) -> None: """Code exchange behaves correctly and handles various error scenarios.""" token = {"type": "bearer"} token_json = json.dumps(token).encode("utf-8") @@ -686,7 +692,7 @@ def test_exchange_code(self): } } ) - def test_exchange_code_jwt_key(self): + def test_exchange_code_jwt_key(self) -> None: """Test that code exchange works with a JWK client secret.""" from authlib.jose import jwt @@ -741,7 +747,7 @@ def test_exchange_code_jwt_key(self): } } ) - def test_exchange_code_no_auth(self): + def test_exchange_code_no_auth(self) -> None: """Test that code exchange works with no client secret.""" token = {"type": "bearer"} self.http_client.request = simple_async_mock( @@ -776,7 +782,7 @@ def test_exchange_code_no_auth(self): } } ) - def test_extra_attributes(self): + def test_extra_attributes(self) -> None: """ Login while using a mapping provider that implements get_extra_attributes. """ @@ -790,8 +796,8 @@ def test_extra_attributes(self): "username": "foo", "phone": "1234567", } - self.provider._exchange_code = simple_async_mock(return_value=token) - self.provider._parse_id_token = simple_async_mock(return_value=userinfo) + self.provider._exchange_code = simple_async_mock(return_value=token) # type: ignore[assignment] + self.provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -817,12 +823,12 @@ def test_extra_attributes(self): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_map_userinfo_to_user(self): + def test_map_userinfo_to_user(self) -> None: """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() - userinfo = { + userinfo: dict = { "sub": "test_user", "username": "test_user", } @@ -870,7 +876,7 @@ def test_map_userinfo_to_user(self): ) @override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}}) - def test_map_userinfo_to_existing_user(self): + def test_map_userinfo_to_existing_user(self) -> None: """Existing users can log in with OpenID Connect when allow_existing_users is True.""" store = self.hs.get_datastores().main user = UserID.from_string("@test_user:test") @@ -974,7 +980,7 @@ def test_map_userinfo_to_existing_user(self): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_map_userinfo_to_invalid_localpart(self): + def test_map_userinfo_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" self.get_success( _make_callback_with_userinfo(self.hs, {"sub": "test2", "username": "föö"}) @@ -991,7 +997,7 @@ def test_map_userinfo_to_invalid_localpart(self): } } ) - def test_map_userinfo_to_user_retries(self): + def test_map_userinfo_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1039,7 +1045,7 @@ def test_map_userinfo_to_user_retries(self): ) @override_config({"oidc_config": DEFAULT_CONFIG}) - def test_empty_localpart(self): + def test_empty_localpart(self) -> None: """Attempts to map onto an empty localpart should be rejected.""" userinfo = { "sub": "tester", @@ -1058,7 +1064,7 @@ def test_empty_localpart(self): } } ) - def test_null_localpart(self): + def test_null_localpart(self) -> None: """Mapping onto a null localpart via an empty OIDC attribute should be rejected""" userinfo = { "sub": "tester", @@ -1075,7 +1081,7 @@ def test_null_localpart(self): } } ) - def test_attribute_requirements(self): + def test_attribute_requirements(self) -> None: """The required attributes must be met from the OIDC userinfo response.""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1115,7 +1121,7 @@ def test_attribute_requirements(self): } } ) - def test_attribute_requirements_contains(self): + def test_attribute_requirements_contains(self) -> None: """Test that auth succeeds if userinfo attribute CONTAINS required value""" auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() @@ -1146,7 +1152,7 @@ def test_attribute_requirements_contains(self): } } ) - def test_attribute_requirements_mismatch(self): + def test_attribute_requirements_mismatch(self) -> None: """ Test that auth fails if attributes exist but don't match, or are non-string values. @@ -1154,7 +1160,7 @@ def test_attribute_requirements_mismatch(self): auth_handler = self.hs.get_auth_handler() auth_handler.complete_sso_login = simple_async_mock() # userinfo with "test": "not_foobar" attribute should fail - userinfo = { + userinfo: dict = { "sub": "tester", "username": "tester", "test": "not_foobar", @@ -1248,9 +1254,9 @@ async def _make_callback_with_userinfo( handler = hs.get_oidc_handler() provider = handler._providers["oidc"] - provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) - provider._parse_id_token = simple_async_mock(return_value=userinfo) - provider._fetch_userinfo = simple_async_mock(return_value=userinfo) + provider._exchange_code = simple_async_mock(return_value={"id_token": ""}) # type: ignore[assignment] + provider._parse_id_token = simple_async_mock(return_value=userinfo) # type: ignore[assignment] + provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] state = "state" session = handler._token_generator.generate_oidc_session_token( diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 972cbac6e496..08733a9f2d42 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -11,14 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Awaitable, Callable, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + import synapse.types from synapse.api.errors import AuthError, SynapseError from synapse.rest import admin from synapse.server import HomeServer -from synapse.types import UserID +from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -29,13 +32,15 @@ class ProfileTestCase(unittest.HomeserverTestCase): servlets = [admin.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.mock_federation = Mock() self.mock_registry = Mock() - self.query_handlers = {} + self.query_handlers: Dict[str, Callable[[dict], Awaitable[JsonDict]]] = {} - def register_query_handler(query_type, handler): + def register_query_handler( + query_type: str, handler: Callable[[dict], Awaitable[JsonDict]] + ) -> None: self.query_handlers[query_type] = handler self.mock_registry.register_query_handler = register_query_handler @@ -47,7 +52,7 @@ def register_query_handler(query_type, handler): ) return hs - def prepare(self, reactor, clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.frank = UserID.from_string("@1234abcd:test") @@ -58,7 +63,7 @@ def prepare(self, reactor, clock, hs: HomeServer): self.handler = hs.get_profile_handler() - def test_get_my_name(self): + def test_get_my_name(self) -> None: self.get_success( self.store.set_profile_displayname(self.frank.localpart, "Frank") ) @@ -67,7 +72,7 @@ def test_get_my_name(self): self.assertEqual("Frank", displayname) - def test_set_my_name(self): + def test_set_my_name(self) -> None: self.get_success( self.handler.set_displayname( self.frank, synapse.types.create_requester(self.frank), "Frank Jr." @@ -110,7 +115,7 @@ def test_set_my_name(self): self.get_success(self.store.get_profile_displayname(self.frank.localpart)) ) - def test_set_my_name_if_disabled(self): + def test_set_my_name_if_disabled(self) -> None: self.hs.config.registration.enable_set_displayname = False # Setting displayname for the first time is allowed @@ -135,7 +140,7 @@ def test_set_my_name_if_disabled(self): SynapseError, ) - def test_set_my_name_noauth(self): + def test_set_my_name_noauth(self) -> None: self.get_failure( self.handler.set_displayname( self.frank, synapse.types.create_requester(self.bob), "Frank Jr." @@ -143,7 +148,7 @@ def test_set_my_name_noauth(self): AuthError, ) - def test_get_other_name(self): + def test_get_other_name(self) -> None: self.mock_federation.make_query.return_value = make_awaitable( {"displayname": "Alice"} ) @@ -158,7 +163,7 @@ def test_get_other_name(self): ignore_backoff=True, ) - def test_incoming_fed_query(self): + def test_incoming_fed_query(self) -> None: self.get_success(self.store.create_profile("caroline")) self.get_success(self.store.set_profile_displayname("caroline", "Caroline")) @@ -174,7 +179,7 @@ def test_incoming_fed_query(self): self.assertEqual({"displayname": "Caroline"}, response) - def test_get_my_avatar(self): + def test_get_my_avatar(self) -> None: self.get_success( self.store.set_profile_avatar_url( self.frank.localpart, "http://my.server/me.png" @@ -184,7 +189,7 @@ def test_get_my_avatar(self): self.assertEqual("http://my.server/me.png", avatar_url) - def test_set_my_avatar(self): + def test_set_my_avatar(self) -> None: self.get_success( self.handler.set_avatar_url( self.frank, @@ -225,7 +230,7 @@ def test_set_my_avatar(self): (self.get_success(self.store.get_profile_avatar_url(self.frank.localpart))), ) - def test_set_my_avatar_if_disabled(self): + def test_set_my_avatar_if_disabled(self) -> None: self.hs.config.registration.enable_set_avatar_url = False # Setting displayname for the first time is allowed @@ -250,7 +255,7 @@ def test_set_my_avatar_if_disabled(self): SynapseError, ) - def test_avatar_constraints_no_config(self): + def test_avatar_constraints_no_config(self) -> None: """Tests that the method to check an avatar against configured constraints skips all of its check if no constraint is configured. """ @@ -263,7 +268,7 @@ def test_avatar_constraints_no_config(self): self.assertTrue(res) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_constraints_missing(self): + def test_avatar_constraints_missing(self) -> None: """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't be found. """ @@ -273,7 +278,7 @@ def test_avatar_constraints_missing(self): self.assertFalse(res) @unittest.override_config({"max_avatar_size": 50}) - def test_avatar_constraints_file_size(self): + def test_avatar_constraints_file_size(self) -> None: """Tests that a file that's above the allowed file size is forbidden but one that's below it is allowed. """ @@ -295,7 +300,7 @@ def test_avatar_constraints_file_size(self): self.assertFalse(res) @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) - def test_avatar_constraint_mime_type(self): + def test_avatar_constraint_mime_type(self) -> None: """Tests that a file with an unauthorised MIME type is forbidden but one with an authorised content type is allowed. """ diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 23941abed852..8d4404eda10d 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Any, Dict, Optional from unittest.mock import Mock import attr +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.errors import RedirectException +from synapse.server import HomeServer +from synapse.util import Clock from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -81,10 +85,10 @@ def saml_response_to_user_attributes( class SamlHandlerTestCase(HomeserverTestCase): - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL - saml_config = { + saml_config: Dict[str, Any] = { "sp_config": {"metadata": {}}, # Disable grandfathering. "grandfathered_mxid_source_attribute": None, @@ -98,7 +102,7 @@ def default_config(self): return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.handler = hs.get_saml_handler() @@ -114,7 +118,7 @@ def make_homeserver(self, reactor, clock): elif not has_xmlsec1: skip = "Requires xmlsec1" - def test_map_saml_response_to_user(self): + def test_map_saml_response_to_user(self) -> None: """Ensure that mapping the SAML response returned from a provider to an MXID works properly.""" # stub out the auth handler @@ -140,7 +144,7 @@ def test_map_saml_response_to_user(self): ) @override_config({"saml2_config": {"grandfathered_mxid_source_attribute": "mxid"}}) - def test_map_saml_response_to_existing_user(self): + def test_map_saml_response_to_existing_user(self) -> None: """Existing users can log in with SAML account.""" store = self.hs.get_datastores().main self.get_success( @@ -186,7 +190,7 @@ def test_map_saml_response_to_existing_user(self): auth_provider_session_id=None, ) - def test_map_saml_response_to_invalid_localpart(self): + def test_map_saml_response_to_invalid_localpart(self) -> None: """If the mapping provider generates an invalid localpart it should be rejected.""" # stub out the auth handler @@ -207,7 +211,7 @@ def test_map_saml_response_to_invalid_localpart(self): ) auth_handler.complete_sso_login.assert_not_called() - def test_map_saml_response_to_user_retries(self): + def test_map_saml_response_to_user_retries(self) -> None: """The mapping provider can retry generating an MXID if the MXID is already in use.""" # stub out the auth handler and error renderer @@ -271,7 +275,7 @@ def test_map_saml_response_to_user_retries(self): } } ) - def test_map_saml_response_redirect(self): + def test_map_saml_response_redirect(self) -> None: """Test a mapping provider that raises a RedirectException""" saml_response = FakeAuthnResponse({"uid": "test", "username": "test_user"}) @@ -292,7 +296,7 @@ def test_map_saml_response_redirect(self): }, } ) - def test_attribute_requirements(self): + def test_attribute_requirements(self) -> None: """The required attributes must be met from the SAML response.""" # stub out the auth handler From dea577998f221297d3ff30bdf904f7147f3c3d8a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 15 Mar 2022 15:40:34 +0000 Subject: [PATCH 405/474] Add tests for database transaction callbacks (#12198) Signed-off-by: Sean Quah --- changelog.d/12198.misc | 1 + tests/storage/test_database.py | 104 ++++++++++++++++++++++++++++++++- 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12198.misc diff --git a/changelog.d/12198.misc b/changelog.d/12198.misc new file mode 100644 index 000000000000..6b184a9053f8 --- /dev/null +++ b/changelog.d/12198.misc @@ -0,0 +1 @@ +Add tests for database transaction callbacks. diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 85978675634b..ae13bed08621 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage.database import make_tuple_comparison_clause +from typing import Callable, Tuple +from unittest.mock import Mock, call + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import ( + DatabasePool, + LoggingTransaction, + make_tuple_comparison_clause, +) +from synapse.util import Clock from tests import unittest @@ -22,3 +33,94 @@ def test_native_tuple_comparison(self): clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) self.assertEqual(clause, "(a,b) > (?,?)") self.assertEqual(args, [1, 2]) + + +class CallbacksTestCase(unittest.HomeserverTestCase): + """Tests for transaction callbacks.""" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + def _run_interaction( + self, func: Callable[[LoggingTransaction], object] + ) -> Tuple[Mock, Mock]: + """Run the given function in a database transaction, with callbacks registered. + + Args: + func: The function to be run in a transaction. The transaction will be + retried if `func` raises an `OperationalError`. + + Returns: + Two mocks, which were registered as an `after_callback` and an + `exception_callback` respectively, on every transaction attempt. + """ + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + func(txn) + + try: + self.get_success_or_raise( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + except Exception: + pass + + return after_callback, exception_callback + + def test_after_callback(self) -> None: + """Test that the after callback is called when a transaction succeeds.""" + after_callback, exception_callback = self._run_interaction(lambda txn: None) + + after_callback.assert_called_once_with(123, 456, extra=789) + exception_callback.assert_not_called() + + def test_exception_callback(self) -> None: + """Test that the exception callback is called when a transaction fails.""" + _test_txn = Mock(side_effect=ZeroDivisionError) + after_callback, exception_callback = self._run_interaction(_test_txn) + + after_callback.assert_not_called() + exception_callback.assert_called_once_with(987, 654, extra=321) + + def test_failed_retry(self) -> None: + """Test that the exception callback is called for every failed attempt.""" + # Always raise an `OperationalError`. + _test_txn = Mock(side_effect=self.db_pool.engine.module.OperationalError) + after_callback, exception_callback = self._run_interaction(_test_txn) + + after_callback.assert_not_called() + exception_callback.assert_has_calls( + [ + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + ] + ) + self.assertEqual(exception_callback.call_count, 6) # no additional calls + + def test_successful_retry(self) -> None: + """Test callbacks for a failed transaction followed by a successful attempt.""" + # Raise an `OperationalError` on the first attempt only. + _test_txn = Mock( + side_effect=[self.db_pool.engine.module.OperationalError, None] + ) + after_callback, exception_callback = self._run_interaction(_test_txn) + + # Calling both `after_callback`s when the first attempt failed is rather + # surprising (#12184). Let's document the behaviour in a test. + after_callback.assert_has_calls( + [ + call(123, 456, extra=789), + call(123, 456, extra=789), + ] + ) + self.assertEqual(after_callback.call_count, 2) # no additional calls + exception_callback.assert_not_called() From dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 15 Mar 2022 14:06:05 -0400 Subject: [PATCH 406/474] Use the ignored_users table to test event visibility & sync. (#12225) Instead of fetching the raw account data and re-parsing it. The ignored_users table is a denormalised version of the account data for quick searching. --- changelog.d/12225.misc | 1 + synapse/handlers/sync.py | 30 +------------- synapse/push/bulk_push_rule_evaluator.py | 2 +- .../storage/databases/main/account_data.py | 41 +++++++++++++++++-- synapse/visibility.py | 18 ++------ tests/storage/test_account_data.py | 17 ++++++++ 6 files changed, 62 insertions(+), 47 deletions(-) create mode 100644 changelog.d/12225.misc diff --git a/changelog.d/12225.misc b/changelog.d/12225.misc new file mode 100644 index 000000000000..23105c727c3c --- /dev/null +++ b/changelog.d/12225.misc @@ -0,0 +1 @@ +Use the `ignored_users` table in additional places instead of re-parsing the account data. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0aa3052fd6ac..c9d6a18bd700 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -28,7 +28,7 @@ import attr from prometheus_client import Counter -from synapse.api.constants import AccountDataTypes, EventTypes, Membership, ReceiptTypes +from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -1601,7 +1601,7 @@ async def _generate_sync_entry_for_rooms( return set(), set(), set(), set() # 3. Work out which rooms need reporting in the sync response. - ignored_users = await self._get_ignored_users(user_id) + ignored_users = await self.store.ignored_users(user_id) if since_token: room_changes = await self._get_rooms_changed( sync_result_builder, ignored_users @@ -1627,7 +1627,6 @@ async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None: logger.debug("Generating room entry for %s", room_entry.room_id) await self._generate_room_entry( sync_result_builder, - ignored_users, room_entry, ephemeral=ephemeral_by_room.get(room_entry.room_id, []), tags=tags_by_room.get(room_entry.room_id), @@ -1657,29 +1656,6 @@ async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None: newly_left_users, ) - async def _get_ignored_users(self, user_id: str) -> FrozenSet[str]: - """Retrieve the users ignored by the given user from their global account_data. - - Returns an empty set if - - there is no global account_data entry for ignored_users - - there is such an entry, but it's not a JSON object. - """ - # TODO: Can we `SELECT ignored_user_id FROM ignored_users WHERE ignorer_user_id=?;` instead? - ignored_account_data = ( - await self.store.get_global_account_data_by_type_for_user( - user_id=user_id, data_type=AccountDataTypes.IGNORED_USER_LIST - ) - ) - - # If there is ignored users account data and it matches the proper type, - # then use it. - ignored_users: FrozenSet[str] = frozenset() - if ignored_account_data: - ignored_users_data = ignored_account_data.get("ignored_users", {}) - if isinstance(ignored_users_data, dict): - ignored_users = frozenset(ignored_users_data.keys()) - return ignored_users - async def _have_rooms_changed( self, sync_result_builder: "SyncResultBuilder" ) -> bool: @@ -2022,7 +1998,6 @@ async def _get_all_rooms( async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", - ignored_users: FrozenSet[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], @@ -2051,7 +2026,6 @@ async def _generate_room_entry( Args: sync_result_builder - ignored_users: Set of users ignored by user. room_builder ephemeral: List of new ephemeral events for room tags: List of *all* tags for room, or None if there has been diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8140afcb6b37..030898e4d0cc 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -213,7 +213,7 @@ async def action_for_event_by_user( if not event.is_state(): ignorers = await self.store.ignored_by(event.sender) else: - ignorers = set() + ignorers = frozenset() for uid, rules in rules_by_user.items(): if event.sender == uid: diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 52146aacc8c0..9af9f4f18e19 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -14,7 +14,17 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Tuple, + cast, +) from synapse.api.constants import AccountDataTypes from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker @@ -365,7 +375,7 @@ def get_updated_account_data_for_user_txn( ) @cached(max_entries=5000, iterable=True) - async def ignored_by(self, user_id: str) -> Set[str]: + async def ignored_by(self, user_id: str) -> FrozenSet[str]: """ Get users which ignore the given user. @@ -375,7 +385,7 @@ async def ignored_by(self, user_id: str) -> Set[str]: Return: The user IDs which ignore the given user. """ - return set( + return frozenset( await self.db_pool.simple_select_onecol( table="ignored_users", keyvalues={"ignored_user_id": user_id}, @@ -384,6 +394,26 @@ async def ignored_by(self, user_id: str) -> Set[str]: ) ) + @cached(max_entries=5000, iterable=True) + async def ignored_users(self, user_id: str) -> FrozenSet[str]: + """ + Get users which the given user ignores. + + Params: + user_id: The user ID which is making the request. + + Return: + The user IDs which are ignored by the given user. + """ + return frozenset( + await self.db_pool.simple_select_onecol( + table="ignored_users", + keyvalues={"ignorer_user_id": user_id}, + retcol="ignored_user_id", + desc="ignored_users", + ) + ) + def process_replication_rows( self, stream_name: str, @@ -529,6 +559,10 @@ def _add_account_data_for_user( else: currently_ignored_users = set() + # If the data has not changed, nothing to do. + if previously_ignored_users == currently_ignored_users: + return + # Delete entries which are no longer ignored. self.db_pool.simple_delete_many_txn( txn, @@ -551,6 +585,7 @@ def _add_account_data_for_user( # Invalidate the cache for any ignored users which were added or removed. for ignored_user_id in previously_ignored_users ^ currently_ignored_users: self._invalidate_cache_and_stream(txn, self.ignored_by, (ignored_user_id,)) + self._invalidate_cache_and_stream(txn, self.ignored_users, (user_id,)) async def purge_account_data_for_user(self, user_id: str) -> None: """ diff --git a/synapse/visibility.py b/synapse/visibility.py index 281cbe4d8877..49519eb8f51e 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -14,12 +14,7 @@ import logging from typing import Dict, FrozenSet, List, Optional -from synapse.api.constants import ( - AccountDataTypes, - EventTypes, - HistoryVisibility, - Membership, -) +from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.events import EventBase from synapse.events.utils import prune_event from synapse.storage import Storage @@ -87,15 +82,8 @@ async def filter_events_for_client( state_filter=StateFilter.from_types(types), ) - ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.IGNORED_USER_LIST - ) - - ignore_list: FrozenSet[str] = frozenset() - if ignore_dict_content: - ignored_users_dict = ignore_dict_content.get("ignored_users", {}) - if isinstance(ignored_users_dict, dict): - ignore_list = frozenset(ignored_users_dict.keys()) + # Get the users who are ignored by the requesting user. + ignore_list = await storage.main.ignored_users(user_id) erased_senders = await storage.main.are_users_erased(e.sender for e in events) diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 272cd3540220..72bf5b3d311c 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -47,9 +47,18 @@ def assert_ignorers( expected_ignorer_user_ids, ) + def assert_ignored( + self, ignorer_user_id: str, expected_ignored_user_ids: Set[str] + ) -> None: + self.assertEqual( + self.get_success(self.store.ignored_users(ignorer_user_id)), + expected_ignored_user_ids, + ) + def test_ignoring_users(self): """Basic adding/removing of users from the ignore list.""" self._update_ignore_list("@other:test", "@another:remote") + self.assert_ignored(self.user, {"@other:test", "@another:remote"}) # Check a user which no one ignores. self.assert_ignorers("@user:test", set()) @@ -62,6 +71,7 @@ def test_ignoring_users(self): # Add one user, remove one user, and leave one user. self._update_ignore_list("@foo:test", "@another:remote") + self.assert_ignored(self.user, {"@foo:test", "@another:remote"}) # Check the removed user. self.assert_ignorers("@other:test", set()) @@ -76,20 +86,24 @@ def test_caching(self): """Ensure that caching works properly between different users.""" # The first user ignores a user. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # The second user ignores them. self._update_ignore_list("@other:test", ignorer_user_id="@second:test") + self.assert_ignored("@second:test", {"@other:test"}) self.assert_ignorers("@other:test", {self.user, "@second:test"}) # The first user un-ignores them. self._update_ignore_list() + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", {"@second:test"}) def test_invalid_data(self): """Invalid data ends up clearing out the ignored users list.""" # Add some data and ensure it is there. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # No ignored_users key. @@ -102,10 +116,12 @@ def test_invalid_data(self): ) # No one ignores the user now. + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) # Add some data and ensure it is there. self._update_ignore_list("@other:test") + self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # Invalid data. @@ -118,4 +134,5 @@ def test_invalid_data(self): ) # No one ignores the user now. + self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) From 4587b35929d22731644a11120a9e7d6a9c3bc304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 07:21:36 -0400 Subject: [PATCH 407/474] Clean-up logic for rebasing URLs during URL preview. (#12219) By using urljoin from the standard library and reducing the number of places URLs are rebased. --- changelog.d/12219.misc | 1 + synapse/rest/media/v1/preview_html.py | 39 +------------- synapse/rest/media/v1/preview_url_resource.py | 23 ++++---- tests/rest/media/v1/test_html_preview.py | 54 ++++--------------- 4 files changed, 26 insertions(+), 91 deletions(-) create mode 100644 changelog.d/12219.misc diff --git a/changelog.d/12219.misc b/changelog.d/12219.misc new file mode 100644 index 000000000000..607941409295 --- /dev/null +++ b/changelog.d/12219.misc @@ -0,0 +1 @@ +Clean-up logic around rebasing URLs for URL image previews. diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index 872a9e72e818..4cc9c66fbe68 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -16,7 +16,6 @@ import logging import re from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Set, Union -from urllib import parse as urlparse if TYPE_CHECKING: from lxml import etree @@ -144,9 +143,7 @@ def decode_body( return etree.fromstring(body, parser) -def parse_html_to_open_graph( - tree: "etree.Element", media_uri: str -) -> Dict[str, Optional[str]]: +def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -155,7 +152,6 @@ def parse_html_to_open_graph( Args: tree: The parsed HTML document. - media_url: The URI used to download the body. Returns: The Open Graph response as a dictionary. @@ -209,7 +205,7 @@ def parse_html_to_open_graph( "//*/meta[translate(@itemprop, 'IMAGE', 'image')='image']/@content" ) if meta_image: - og["og:image"] = rebase_url(meta_image[0], media_uri) + og["og:image"] = meta_image[0] else: # TODO: consider inlined CSS styles as well as width & height attribs images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]") @@ -320,37 +316,6 @@ def _iterate_over_text( ) -def rebase_url(url: str, base: str) -> str: - """ - Resolves a potentially relative `url` against an absolute `base` URL. - - For example: - - >>> rebase_url("subpage", "https://example.com/foo/") - 'https://example.com/foo/subpage' - >>> rebase_url("sibling", "https://example.com/foo") - 'https://example.com/sibling' - >>> rebase_url("/bar", "https://example.com/foo/") - 'https://example.com/bar' - >>> rebase_url("https://alice.com/a/", "https://example.com/foo/") - 'https://alice.com/a' - """ - base_parts = urlparse.urlparse(base) - # Convert the parsed URL to a list for (potential) modification. - url_parts = list(urlparse.urlparse(url)) - # Add a scheme, if one does not exist. - if not url_parts[0]: - url_parts[0] = base_parts.scheme or "http" - # Fix up the hostname, if this is not a data URL. - if url_parts[0] != "data" and not url_parts[1]: - url_parts[1] = base_parts.netloc - # If the path does not start with a /, nest it under the base path's last - # directory. - if not url_parts[2].startswith("/"): - url_parts[2] = re.sub(r"/[^/]+$", "/", base_parts.path) + url_parts[2] - return urlparse.urlunparse(url_parts) - - def summarize_paragraphs( text_nodes: Iterable[str], min_size: int = 200, max_size: int = 500 ) -> Optional[str]: diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 14ea88b24052..d47af8ead6b7 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -22,7 +22,7 @@ import sys import traceback from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple -from urllib import parse as urlparse +from urllib.parse import urljoin, urlparse, urlsplit from urllib.request import urlopen import attr @@ -44,11 +44,7 @@ from synapse.rest.media.v1._base import get_filename_from_headers from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.oembed import OEmbedProvider -from synapse.rest.media.v1.preview_html import ( - decode_body, - parse_html_to_open_graph, - rebase_url, -) +from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph from synapse.types import JsonDict, UserID from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred @@ -187,7 +183,7 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: ts = self.clock.time_msec() # XXX: we could move this into _do_preview if we wanted. - url_tuple = urlparse.urlsplit(url) + url_tuple = urlsplit(url) for entry in self.url_preview_url_blacklist: match = True for attrib in entry: @@ -322,7 +318,7 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: # Parse Open Graph information from the HTML in case the oEmbed # response failed or is incomplete. - og_from_html = parse_html_to_open_graph(tree, media_info.uri) + og_from_html = parse_html_to_open_graph(tree) # Compile the Open Graph response by using the scraped # information from the HTML and overlaying any information @@ -588,12 +584,17 @@ async def _precache_image_url( if "og:image" not in og or not og["og:image"]: return + # The image URL from the HTML might be relative to the previewed page, + # convert it to an URL which can be requested directly. + image_url = og["og:image"] + url_parts = urlparse(image_url) + if url_parts.scheme != "data": + image_url = urljoin(media_info.uri, image_url) + # FIXME: it might be cleaner to use the same flow as the main /preview_url # request itself and benefit from the same caching etc. But for now we # just rely on the caching on the master request to speed things up. - image_info = await self._handle_url( - rebase_url(og["og:image"], media_info.uri), user, allow_data_urls=True - ) + image_info = await self._handle_url(image_url, user, allow_data_urls=True) if _is_media(image_info.media_type): # TODO: make sure we don't choke on white-on-transparent images diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py index 3fb37a2a5970..62e308814d2f 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -16,7 +16,6 @@ _get_html_media_encodings, decode_body, parse_html_to_open_graph, - rebase_url, summarize_paragraphs, ) @@ -161,7 +160,7 @@ def test_simple(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -177,7 +176,7 @@ def test_comment(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -196,7 +195,7 @@ def test_comment2(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual( og, @@ -218,7 +217,7 @@ def test_script(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) @@ -232,7 +231,7 @@ def test_missing_title(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -247,7 +246,7 @@ def test_h1_as_title(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Title", "og:description": "Some text."}) @@ -262,7 +261,7 @@ def test_missing_title_and_broken_h1(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": None, "og:description": "Some text."}) @@ -290,7 +289,7 @@ def test_xml(self) -> None: FooSome text. """.strip() tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding(self) -> None: @@ -304,7 +303,7 @@ def test_invalid_encoding(self) -> None: """ tree = decode_body(html, "http://example.com/test.html", "invalid-encoding") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."}) def test_invalid_encoding2(self) -> None: @@ -319,7 +318,7 @@ def test_invalid_encoding2(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ÿÿ Foo", "og:description": "Some text."}) def test_windows_1252(self) -> None: @@ -333,7 +332,7 @@ def test_windows_1252(self) -> None: """ tree = decode_body(html, "http://example.com/test.html") - og = parse_html_to_open_graph(tree, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."}) @@ -448,34 +447,3 @@ def test_unknown_invalid(self) -> None: 'text/html; charset="invalid"', ) self.assertEqual(list(encodings), ["utf-8", "cp1252"]) - - -class RebaseUrlTestCase(unittest.TestCase): - def test_relative(self) -> None: - """Relative URLs should be resolved based on the context of the base URL.""" - self.assertEqual( - rebase_url("subpage", "https://example.com/foo/"), - "https://example.com/foo/subpage", - ) - self.assertEqual( - rebase_url("sibling", "https://example.com/foo"), - "https://example.com/sibling", - ) - self.assertEqual( - rebase_url("/bar", "https://example.com/foo/"), - "https://example.com/bar", - ) - - def test_absolute(self) -> None: - """Absolute URLs should not be modified.""" - self.assertEqual( - rebase_url("https://alice.com/a/", "https://example.com/foo/"), - "https://alice.com/a/", - ) - - def test_data(self) -> None: - """Data URLs should not be modified.""" - self.assertEqual( - rebase_url("data:,Hello%2C%20World%21", "https://example.com/foo/"), - "data:,Hello%2C%20World%21", - ) From 1da0f79d5455b594f2aa989106a672786f5b990f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 09:20:57 -0400 Subject: [PATCH 408/474] Refactor relations tests (#12232) * Moves the relation pagination tests to a separate class. * Move the assertion of the response code into the `_send_relation` helper. * Moves some helpers into the base-class. --- changelog.d/12232.misc | 1 + tests/rest/client/test_relations.py | 1389 +++++++++++++-------------- 2 files changed, 674 insertions(+), 716 deletions(-) create mode 100644 changelog.d/12232.misc diff --git a/changelog.d/12232.misc b/changelog.d/12232.misc new file mode 100644 index 000000000000..4a4132edff2c --- /dev/null +++ b/changelog.d/12232.misc @@ -0,0 +1 @@ +Refactor relations tests to improve code re-use. diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 0cbe6c0cf754..3dbd1304a87d 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -79,6 +79,7 @@ def _send_relation( content: Optional[dict] = None, access_token: Optional[str] = None, parent_id: Optional[str] = None, + expected_response_code: int = 200, ) -> FakeChannel: """Helper function to send a relation pointing at `self.parent_id` @@ -115,16 +116,50 @@ def _send_relation( content, access_token=access_token, ) + self.assertEqual(expected_response_code, channel.code, channel.json_body) return channel + def _get_related_events(self) -> List[str]: + """ + Requests /relations on the parent ID and returns a list of event IDs. + """ + # Request the relations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + return [ev["event_id"] for ev in channel.json_body["chunk"]] + + def _get_bundled_aggregations(self) -> JsonDict: + """ + Requests /event on the parent ID and returns the m.relations field (from unsigned), if it exists. + """ + # Fetch the bundled aggregations of the event. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEquals(200, channel.code, channel.json_body) + return channel.json_body["unsigned"].get("m.relations", {}) + + def _get_aggregations(self) -> List[JsonDict]: + """Request /aggregations on the parent ID and includes the returned chunk.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + return channel.json_body["chunk"] + class RelationsTestCase(BaseRelationsTestCase): def test_send_relation(self) -> None: """Tests that sending a relation works.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) - event_id = channel.json_body["event_id"] channel = self.make_request( @@ -151,13 +186,13 @@ def test_send_relation(self) -> None: def test_deny_invalid_event(self) -> None: """Test that we deny relations on non-existant events""" - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, EventTypes.Message, parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) # Unless that event is referenced from another event! self.get_success( @@ -171,13 +206,12 @@ def test_deny_invalid_event(self) -> None: desc="test_deny_invalid_event", ) ) - channel = self._send_relation( + self._send_relation( RelationTypes.THREAD, EventTypes.Message, parent_id="foo", content={"body": "foo", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) def test_deny_invalid_room(self) -> None: """Test that we deny relations on non-existant events""" @@ -187,18 +221,20 @@ def test_deny_invalid_room(self) -> None: parent_id = res["event_id"] # Attempt to send an annotation to that event. - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", parent_id=parent_id, key="A" + self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + parent_id=parent_id, + key="A", + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) def test_deny_double_react(self) -> None: """Test that we deny relations on membership events""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(400, channel.code, channel.json_body) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", expected_response_code=400 + ) def test_deny_forked_thread(self) -> None: """It is invalid to start a thread off a thread.""" @@ -208,461 +244,160 @@ def test_deny_forked_thread(self) -> None: content={"msgtype": "m.text", "body": "foo"}, parent_id=self.parent_id, ) - self.assertEqual(200, channel.code, channel.json_body) parent_id = channel.json_body["event_id"] - channel = self._send_relation( + self._send_relation( RelationTypes.THREAD, "m.room.message", content={"msgtype": "m.text", "body": "foo"}, parent_id=parent_id, + expected_response_code=400, ) - self.assertEqual(400, channel.code, channel.json_body) - def test_basic_paginate_relations(self) -> None: - """Tests that calling pagination API correctly the latest relations.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - first_annotation_id = channel.json_body["event_id"] + def test_aggregation(self) -> None: + """Test that annotations get correctly aggregated.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - second_annotation_id = channel.json_body["event_id"] + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") channel = self.make_request( "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the latest - # full relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( + self.assertEqual( + channel.json_body, { - "event_id": second_annotation_id, - "sender": self.user_id, - "type": "m.reaction", + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] }, - channel.json_body["chunk"][0], - ) - - # We also expect to get the original event (the id of which is self.parent_id) - self.assertEqual( - channel.json_body["original_event"]["event_id"], self.parent_id ) - # Make sure next_batch has something in it that looks like it could be a - # valid token. - self.assertIsInstance( - channel.json_body.get("next_batch"), str, channel.json_body - ) + def test_aggregation_must_be_annotation(self) -> None: + """Test that aggregations must be annotations.""" - # Request the relations again, but with a different direction. channel = self.make_request( "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations" - f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations" + f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", access_token=self.user_token, ) - self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(400, channel.code, channel.json_body) - # We expect to get back a single pagination result, which is the earliest - # full relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( - { - "event_id": first_annotation_id, - "sender": self.user_id, - "type": "m.reaction", - }, - channel.json_body["chunk"][0], - ) + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_bundled_aggregations(self) -> None: + """ + Test that annotations, references, and threads get correctly bundled. - def test_repeated_paginate_relations(self) -> None: - """Test that if we paginate using a limit and tokens then we get the - expected events. + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + + See test_edit for a similar test for edits. """ + # Setup by sending a variety of relations. + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - expected_event_ids = [] - for idx in range(10): - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) - ) - self.assertEqual(200, channel.code, channel.json_body) - expected_event_ids.append(channel.json_body["event_id"]) + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_1 = channel.json_body["event_id"] - prev_token = "" - found_event_ids: List[str] = [] - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_2 = channel.json_body["event_id"] - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + self._send_relation(RelationTypes.THREAD, "m.room.test") - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - next_batch = channel.json_body.get("next_batch") + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") - if not prev_token: - break + # Ensure the fields are as expected. + self.assertCountEqual( + relations_dict.keys(), + ( + RelationTypes.ANNOTATION, + RelationTypes.REFERENCE, + RelationTypes.THREAD, + ), + ) - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) + # Check the values of each field. + self.assertEqual( + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + relations_dict[RelationTypes.ANNOTATION], + ) - def test_pagination_from_sync_and_messages(self) -> None: - """Pagination tokens from /sync and /messages can be used to paginate /relations.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") - self.assertEqual(200, channel.code, channel.json_body) - annotation_id = channel.json_body["event_id"] - # Send an event after the relation events. - self.helper.send(self.room, body="Latest event", tok=self.user_token) + self.assertEqual( + {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, + relations_dict[RelationTypes.REFERENCE], + ) - # Request /sync, limiting it such that only the latest event is returned - # (and not the relation). - filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') + self.assertEqual( + 2, + relations_dict[RelationTypes.THREAD].get("count"), + ) + self.assertTrue( + relations_dict[RelationTypes.THREAD].get("current_user_participated") + ) + # The latest thread event has some fields that don't matter. + self.assert_dict( + { + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": RelationTypes.THREAD, + } + }, + "event_id": thread_2, + "sender": self.user_id, + "type": "m.room.test", + }, + relations_dict[RelationTypes.THREAD].get("latest_event"), + ) + + # Request the event directly. channel = self.make_request( - "GET", f"/sync?filter={filter}", access_token=self.user_token + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - sync_prev_batch = room_timeline["prev_batch"] - self.assertIsNotNone(sync_prev_batch) - # Ensure the relation event is not in the batch returned from /sync. - self.assertNotIn( - annotation_id, [ev["event_id"] for ev in room_timeline["events"]] - ) + assert_bundle(channel.json_body) - # Request /messages, limiting it such that only the latest event is - # returned (and not the relation). + # Request the room messages. channel = self.make_request( "GET", - f"/rooms/{self.room}/messages?dir=b&limit=1", + f"/rooms/{self.room}/messages?dir=b", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - messages_end = channel.json_body["end"] - self.assertIsNotNone(messages_end) - # Ensure the relation event is not in the chunk returned from /messages. - self.assertNotIn( - annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) + + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, ) - - # Request /relations with the pagination tokens received from both the - # /sync and /messages responses above, in turn. - # - # This is a tiny bit silly since the client wouldn't know the parent ID - # from the requests above; consider the parent ID to be known from a - # previous /sync. - for from_token in (sync_prev_batch, messages_end): - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - # The relation should be in the returned chunk. - self.assertIn( - annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] - ) - - def test_aggregation_pagination_groups(self) -> None: - """Test that we can paginate annotation groups correctly.""" - - # We need to create ten separate users to send each reaction. - access_tokens = [self.user_token, self.user2_token] - idx = 0 - while len(access_tokens) < 10: - user_id, token = self._create_user("test" + str(idx)) - idx += 1 - - self.helper.join(self.room, user=user_id, tok=token) - access_tokens.append(token) - - idx = 0 - sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1} - for key in itertools.chain.from_iterable( - itertools.repeat(key, num) for key, num in sent_groups.items() - ): - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key=key, - access_token=access_tokens[idx], - ) - self.assertEqual(200, channel.code, channel.json_body) - - idx += 1 - idx %= len(access_tokens) - - prev_token: Optional[str] = None - found_groups: Dict[str, int] = {} - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - for groups in channel.json_body["chunk"]: - # We only expect reactions - self.assertEqual(groups["type"], "m.reaction", channel.json_body) - - # We should only see each key once - self.assertNotIn(groups["key"], found_groups, channel.json_body) - - found_groups[groups["key"]] = groups["count"] - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - self.assertEqual(sent_groups, found_groups) - - def test_aggregation_pagination_within_group(self) -> None: - """Test that we can paginate within an annotation group.""" - - # We need to create ten separate users to send each reaction. - access_tokens = [self.user_token, self.user2_token] - idx = 0 - while len(access_tokens) < 10: - user_id, token = self._create_user("test" + str(idx)) - idx += 1 - - self.helper.join(self.room, user=user_id, tok=token) - access_tokens.append(token) - - idx = 0 - expected_event_ids = [] - for _ in range(10): - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="👍", - access_token=access_tokens[idx], - ) - self.assertEqual(200, channel.code, channel.json_body) - expected_event_ids.append(channel.json_body["event_id"]) - - idx += 1 - - # Also send a different type of reaction so that we test we don't see it - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - self.assertEqual(200, channel.code, channel.json_body) - - prev_token = "" - found_event_ids: List[str] = [] - encoded_key = urllib.parse.quote_plus("👍".encode()) - for _ in range(20): - from_token = "" - if prev_token: - from_token = "&from=" + prev_token - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}" - f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" - f"/m.reaction/{encoded_key}?limit=1{from_token}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - - found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - - next_batch = channel.json_body.get("next_batch") - - self.assertNotEqual(prev_token, next_batch) - prev_token = next_batch - - if not prev_token: - break - - # We paginated backwards, so reverse - found_event_ids.reverse() - self.assertEqual(found_event_ids, expected_event_ids) - - def test_aggregation(self) -> None: - """Test that annotations get correctly aggregated.""" - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - - self.assertEqual( - channel.json_body, - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - ) - - def test_aggregation_must_be_annotation(self) -> None: - """Test that aggregations must be annotations.""" - - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations" - f"/{self.parent_id}/{RelationTypes.REPLACE}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(400, channel.code, channel.json_body) - - @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) - def test_bundled_aggregations(self) -> None: - """ - Test that annotations, references, and threads get correctly bundled. - - Note that this doesn't test against /relations since only thread relations - get bundled via that API. See test_aggregation_get_event_for_thread. - - See test_edit for a similar test for edits. - """ - # Setup by sending a variety of relations. - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - reply_1 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - reply_2 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - thread_2 = channel.json_body["event_id"] - - def assert_bundle(event_json: JsonDict) -> None: - """Assert the expected values of the bundled aggregations.""" - relations_dict = event_json["unsigned"].get("m.relations") - - # Ensure the fields are as expected. - self.assertCountEqual( - relations_dict.keys(), - ( - RelationTypes.ANNOTATION, - RelationTypes.REFERENCE, - RelationTypes.THREAD, - ), - ) - - # Check the values of each field. - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - relations_dict[RelationTypes.ANNOTATION], - ) - - self.assertEqual( - {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - relations_dict[RelationTypes.REFERENCE], - ) - - self.assertEqual( - 2, - relations_dict[RelationTypes.THREAD].get("count"), - ) - self.assertTrue( - relations_dict[RelationTypes.THREAD].get("current_user_participated") - ) - # The latest thread event has some fields that don't matter. - self.assert_dict( - { - "content": { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": RelationTypes.THREAD, - } - }, - "event_id": thread_2, - "sender": self.user_id, - "type": "m.room.test", - }, - relations_dict[RelationTypes.THREAD].get("latest_event"), - ) - - # Request the event directly. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body) - - # Request the room messages. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/messages?dir=b", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - - # Request the room context. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) # Request sync. channel = self.make_request("GET", "/sync", access_token=self.user_token) @@ -693,14 +428,12 @@ def test_aggregation_get_event_for_annotation(self) -> None: when directly requested. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) annotation_id = channel.json_body["event_id"] # Annotate the annotation. - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id ) - self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -713,14 +446,12 @@ def test_aggregation_get_event_for_annotation(self) -> None: def test_aggregation_get_event_for_thread(self) -> None: """Test that threads get bundled aggregations included when directly requested.""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) thread_id = channel.json_body["event_id"] # Annotate the annotation. - channel = self._send_relation( + self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id ) - self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", @@ -877,8 +608,6 @@ def test_edit(self) -> None: "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] def assert_bundle(event_json: JsonDict) -> None: @@ -954,7 +683,7 @@ def test_multi_edit(self) -> None: shouldn't be allowed, are correctly handled. """ - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={ @@ -963,309 +692,575 @@ def test_multi_edit(self) -> None: "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + ) + edit_event_id = channel.json_body["event_id"] + + self._send_relation( + RelationTypes.REPLACE, + "m.room.message.WRONG_TYPE", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, + }, + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + self.assertEqual(channel.json_body["content"], new_body) + + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_edit_reply(self) -> None: + """Test that editing a reply works.""" + + # Create a reply to edit. + channel = self._send_relation( + RelationTypes.REFERENCE, + "m.room.message", + content={"msgtype": "m.text", "body": "A reply!"}, + ) + reply = channel.json_body["event_id"] + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=reply, + ) + edit_event_id = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{reply}", + access_token=self.user_token, + ) self.assertEqual(200, channel.code, channel.json_body) + # We expect to see the new body in the dict, as well as the reference + # metadata sill intact. + self.assertDictContainsSubset(new_body, channel.json_body["content"]) + self.assertDictContainsSubset( + { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": "m.reference", + } + }, + channel.json_body["content"], + ) + + # We expect that the edit relation appears in the unsigned relations + # section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_edit_thread(self) -> None: + """Test that editing a thread works.""" + + # Create a thread and edit the last event. + channel = self._send_relation( + RelationTypes.THREAD, + "m.room.message", + content={"msgtype": "m.text", "body": "A threaded reply!"}, + ) + threaded_event_id = channel.json_body["event_id"] + new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + parent_id=threaded_event_id, + ) + + # Fetch the thread root, to get the bundled aggregation for the thread. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # We expect that the edit message appears in the thread summary in the + # unsigned relations section. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.THREAD, relations_dict) + + thread_summary = relations_dict[RelationTypes.THREAD] + self.assertIn("latest_event", thread_summary) + latest_event_in_thread = thread_summary["latest_event"] + self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + + def test_edit_edit(self) -> None: + """Test that an edit cannot be edited.""" + new_body = {"msgtype": "m.text", "body": "Initial edit"} + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": new_body, + }, + ) + edit_event_id = channel.json_body["event_id"] + + # Edit the edit event. + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "foo", + "m.new_content": {"msgtype": "m.text", "body": "Ignored edit"}, + }, + parent_id=edit_event_id, + ) + + # Request the original event. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + # The edit to the edit should be ignored. + self.assertEqual(channel.json_body["content"], new_body) + + # The relations information should not include the edit to the edit. + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + ) + + def test_unknown_relations(self) -> None: + """Unknown relations should be accepted.""" + channel = self._send_relation("m.relation.test", "m.room.test") + event_id = channel.json_body["event_id"] + + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + # We expect to get back a single pagination result, which is the full + # relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, + channel.json_body["chunk"][0], + ) + + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEqual( + channel.json_body["original_event"]["event_id"], self.parent_id + ) + + # When bundling the unknown relation is not included. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertNotIn("m.relations", channel.json_body["unsigned"]) + + # But unknown relations can be directly queried. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(channel.json_body["chunk"], []) + + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + + def test_background_update(self) -> None: + """Test the event_arbitrary_relations background update.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + annotation_event_id_good = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") + annotation_event_id_bad = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_event_id = channel.json_body["event_id"] + + # Clean-up the table as if the inserts did not happen during event creation. + self.get_success( + self.store.db_pool.simple_delete_many( + table="event_relations", + column="event_id", + iterable=(annotation_event_id_bad, thread_event_id), + keyvalues={}, + desc="RelationsTestCase.test_background_update", + ) + ) + + # Only the "good" annotation should be found. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", + access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( + [ev["event_id"] for ev in channel.json_body["chunk"]], + [annotation_event_id_good], + ) - edit_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message.WRONG_TYPE", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, - }, + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + {"update_name": "event_arbitrary_relations", "progress_json": "{}"}, + ) ) - self.assertEqual(200, channel.code, channel.json_body) + # Ugh, have to reset this flag + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # The "good" annotation and the thread should be found, but not the "bad" + # annotation. channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{self.parent_id}", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + self.assertCountEqual( + [ev["event_id"] for ev in channel.json_body["chunk"]], + [annotation_event_id_good, thread_event_id], + ) - self.assertEqual(channel.json_body["content"], new_body) - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) +class RelationPaginationTestCase(BaseRelationsTestCase): + def test_basic_paginate_relations(self) -> None: + """Tests that calling pagination API correctly the latest relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + first_annotation_id = channel.json_body["event_id"] - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + second_annotation_id = channel.json_body["event_id"] - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, ) + self.assertEqual(200, channel.code, channel.json_body) - def test_edit_reply(self) -> None: - """Test that editing a reply works.""" - - # Create a reply to edit. - channel = self._send_relation( - RelationTypes.REFERENCE, - "m.room.message", - content={"msgtype": "m.text", "body": "A reply!"}, + # We expect to get back a single pagination result, which is the latest + # full relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + { + "event_id": second_annotation_id, + "sender": self.user_id, + "type": "m.reaction", + }, + channel.json_body["chunk"][0], ) - self.assertEqual(200, channel.code, channel.json_body) - reply = channel.json_body["event_id"] - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, - parent_id=reply, + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEqual( + channel.json_body["original_event"]["event_id"], self.parent_id ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] + # Make sure next_batch has something in it that looks like it could be a + # valid token. + self.assertIsInstance( + channel.json_body.get("next_batch"), str, channel.json_body + ) + # Request the relations again, but with a different direction. channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{reply}", + f"/_matrix/client/unstable/rooms/{self.room}/relations" + f"/{self.parent_id}?limit=1&org.matrix.msc3715.dir=f", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - # We expect to see the new body in the dict, as well as the reference - # metadata sill intact. - self.assertDictContainsSubset(new_body, channel.json_body["content"]) - self.assertDictContainsSubset( + # We expect to get back a single pagination result, which is the earliest + # full relation event we sent above. + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": "m.reference", - } + "event_id": first_annotation_id, + "sender": self.user_id, + "type": "m.reaction", }, - channel.json_body["content"], + channel.json_body["chunk"][0], ) - # We expect that the edit relation appears in the unsigned relations - # section. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + def test_repeated_paginate_relations(self) -> None: + """Test that if we paginate using a limit and tokens then we get the + expected events. + """ - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + expected_event_ids = [] + for idx in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) + ) + expected_event_ids.append(channel.json_body["event_id"]) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) + prev_token = "" + found_event_ids: List[str] = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - def test_edit_thread(self) -> None: - """Test that editing a thread works.""" + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - # Create a thread and edit the last event. - channel = self._send_relation( - RelationTypes.THREAD, - "m.room.message", - content={"msgtype": "m.text", "body": "A threaded reply!"}, - ) - self.assertEqual(200, channel.code, channel.json_body) - threaded_event_id = channel.json_body["event_id"] + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, - parent_id=threaded_event_id, + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEqual(found_event_ids, expected_event_ids) + + def test_pagination_from_sync_and_messages(self) -> None: + """Pagination tokens from /sync and /messages can be used to paginate /relations.""" + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") + annotation_id = channel.json_body["event_id"] + # Send an event after the relation events. + self.helper.send(self.room, body="Latest event", tok=self.user_token) + + # Request /sync, limiting it such that only the latest event is returned + # (and not the relation). + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 1}}}') + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token ) self.assertEqual(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + sync_prev_batch = room_timeline["prev_batch"] + self.assertIsNotNone(sync_prev_batch) + # Ensure the relation event is not in the batch returned from /sync. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in room_timeline["events"]] + ) - # Fetch the thread root, to get the bundled aggregation for the thread. + # Request /messages, limiting it such that only the latest event is + # returned (and not the relation). channel = self.make_request( "GET", - f"/rooms/{self.room}/event/{self.parent_id}", + f"/rooms/{self.room}/messages?dir=b&limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) + messages_end = channel.json_body["end"] + self.assertIsNotNone(messages_end) + # Ensure the relation event is not in the chunk returned from /messages. + self.assertNotIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) - # We expect that the edit message appears in the thread summary in the - # unsigned relations section. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.THREAD, relations_dict) + # Request /relations with the pagination tokens received from both the + # /sync and /messages responses above, in turn. + # + # This is a tiny bit silly since the client wouldn't know the parent ID + # from the requests above; consider the parent ID to be known from a + # previous /sync. + for from_token in (sync_prev_batch, messages_end): + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - thread_summary = relations_dict[RelationTypes.THREAD] - self.assertIn("latest_event", thread_summary) - latest_event_in_thread = thread_summary["latest_event"] - self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") + # The relation should be in the returned chunk. + self.assertIn( + annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] + ) + + def test_aggregation_pagination_groups(self) -> None: + """Test that we can paginate annotation groups correctly.""" + + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 + + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) + + idx = 0 + sent_groups = {"👍": 10, "a": 7, "b": 5, "c": 3, "d": 2, "e": 1} + for key in itertools.chain.from_iterable( + itertools.repeat(key, num) for key, num in sent_groups.items() + ): + self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + key=key, + access_token=access_tokens[idx], + ) + + idx += 1 + idx %= len(access_tokens) - def test_edit_edit(self) -> None: - """Test that an edit cannot be edited.""" - new_body = {"msgtype": "m.text", "body": "Initial edit"} - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": new_body, - }, - ) - self.assertEqual(200, channel.code, channel.json_body) - edit_event_id = channel.json_body["event_id"] + prev_token: Optional[str] = None + found_groups: Dict[str, int] = {} + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - # Edit the edit event. - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content={ - "msgtype": "m.text", - "body": "foo", - "m.new_content": {"msgtype": "m.text", "body": "Ignored edit"}, - }, - parent_id=edit_event_id, - ) - self.assertEqual(200, channel.code, channel.json_body) + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) - # Request the original event. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - # The edit to the edit should be ignored. - self.assertEqual(channel.json_body["content"], new_body) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - # The relations information should not include the edit to the edit. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + for groups in channel.json_body["chunk"]: + # We only expect reactions + self.assertEqual(groups["type"], "m.reaction", channel.json_body) - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) + # We should only see each key once + self.assertNotIn(groups["key"], found_groups, channel.json_body) - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) + found_groups[groups["key"]] = groups["count"] - def test_unknown_relations(self) -> None: - """Unknown relations should be accepted.""" - channel = self._send_relation("m.relation.test", "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - event_id = channel.json_body["event_id"] + next_batch = channel.json_body.get("next_batch") - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch - # We expect to get back a single pagination result, which is the full - # relation event we sent above. - self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - self.assert_dict( - {"event_id": event_id, "sender": self.user_id, "type": "m.room.test"}, - channel.json_body["chunk"][0], - ) + if not prev_token: + break - # We also expect to get the original event (the id of which is self.parent_id) - self.assertEqual( - channel.json_body["original_event"]["event_id"], self.parent_id - ) + self.assertEqual(sent_groups, found_groups) - # When bundling the unknown relation is not included. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertNotIn("m.relations", channel.json_body["unsigned"]) + def test_aggregation_pagination_within_group(self) -> None: + """Test that we can paginate within an annotation group.""" - # But unknown relations can be directly queried. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["chunk"], []) + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 - def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - return event + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) - raise AssertionError(f"Event {self.parent_id} not found in chunk") + idx = 0 + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, + "m.reaction", + key="👍", + access_token=access_tokens[idx], + ) + expected_event_ids.append(channel.json_body["event_id"]) - def test_background_update(self) -> None: - """Test the event_arbitrary_relations background update.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) - annotation_event_id_good = channel.json_body["event_id"] + idx += 1 - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="A") - self.assertEqual(200, channel.code, channel.json_body) - annotation_event_id_bad = channel.json_body["event_id"] + # Also send a different type of reaction so that we test we don't see it + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - self.assertEqual(200, channel.code, channel.json_body) - thread_event_id = channel.json_body["event_id"] + prev_token = "" + found_event_ids: List[str] = [] + encoded_key = urllib.parse.quote_plus("👍".encode()) + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token - # Clean-up the table as if the inserts did not happen during event creation. - self.get_success( - self.store.db_pool.simple_delete_many( - table="event_relations", - column="event_id", - iterable=(annotation_event_id_bad, thread_event_id), - keyvalues={}, - desc="RelationsTestCase.test_background_update", + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}" + f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}" + f"/m.reaction/{encoded_key}?limit=1{from_token}", + access_token=self.user_token, ) - ) + self.assertEqual(200, channel.code, channel.json_body) - # Only the "good" annotation should be found. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - [ev["event_id"] for ev in channel.json_body["chunk"]], - [annotation_event_id_good], - ) + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) - # Insert and run the background update. - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - {"update_name": "event_arbitrary_relations", "progress_json": "{}"}, - ) - ) + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) - # Ugh, have to reset this flag - self.store.db_pool.updates._all_done = False - self.wait_for_background_updates() + next_batch = channel.json_body.get("next_batch") - # The "good" annotation and the thread should be found, but not the "bad" - # annotation. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=10", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertCountEqual( - [ev["event_id"] for ev in channel.json_body["chunk"]], - [annotation_event_id_good, thread_event_id], - ) + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEqual(found_event_ids, expected_event_ids) class RelationRedactionTestCase(BaseRelationsTestCase): @@ -1294,46 +1289,6 @@ def _redact(self, event_id: str) -> None: ) self.assertEqual(200, channel.code, channel.json_body) - def _make_relation_requests(self) -> Tuple[List[str], JsonDict]: - """ - Makes requests and ensures they result in a 200 response, returns a - tuple of results: - - 1. `/relations` -> Returns a list of event IDs. - 2. `/event` -> Returns the response's m.relations field (from unsigned), - if it exists. - """ - - # Request the relations of the event. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] - - # Fetch the bundled aggregations of the event. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - bundled_relations = channel.json_body["unsigned"].get("m.relations", {}) - - return event_ids, bundled_relations - - def _get_aggregations(self) -> List[JsonDict]: - """Request /aggregations on the parent ID and includes the returned chunk.""" - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/aggregations/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - return channel.json_body["chunk"] - def test_redact_relation_annotation(self) -> None: """ Test that annotations of an event are properly handled after the @@ -1343,17 +1298,16 @@ def test_redact_relation_annotation(self) -> None: the response to relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token ) - self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Both relations should exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertEquals( relations["m.annotation"], @@ -1368,7 +1322,8 @@ def test_redact_relation_annotation(self) -> None: self._redact(to_redact_event_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [unredacted_event_id]) self.assertEquals( relations["m.annotation"], @@ -1391,7 +1346,6 @@ def test_redact_relation_thread(self) -> None: EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) unredacted_event_id = channel.json_body["event_id"] # Note that the *last* event in the thread is redacted, as that gets @@ -1401,11 +1355,11 @@ def test_redact_relation_thread(self) -> None: EventTypes.Message, content={"body": "reply 2", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) to_redact_event_id = channel.json_body["event_id"] # Both relations exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [to_redact_event_id, unredacted_event_id]) self.assertDictContainsSubset( { @@ -1424,7 +1378,8 @@ def test_redact_relation_thread(self) -> None: self._redact(to_redact_event_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [unredacted_event_id]) self.assertDictContainsSubset( { @@ -1444,7 +1399,7 @@ def test_redact_parent_edit(self) -> None: is redacted. """ # Add a relation - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", parent_id=self.parent_id, @@ -1454,10 +1409,10 @@ def test_redact_parent_edit(self) -> None: "m.new_content": {"msgtype": "m.text", "body": "First edit"}, }, ) - self.assertEqual(200, channel.code, channel.json_body) # Check the relation is returned - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) self.assertIn(RelationTypes.REPLACE, relations) @@ -1465,7 +1420,8 @@ def test_redact_parent_edit(self) -> None: self._redact(self.parent_id) # The relations are not returned. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 0) self.assertEqual(relations, {}) @@ -1475,11 +1431,11 @@ def test_redact_parent_annotation(self) -> None: """ # Add a relation channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") - self.assertEqual(200, channel.code, channel.json_body) related_event_id = channel.json_body["event_id"] # The relations should exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) self.assertIn(RelationTypes.ANNOTATION, relations) @@ -1491,7 +1447,8 @@ def test_redact_parent_annotation(self) -> None: self._redact(self.parent_id) # The relations are returned. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [related_event_id]) self.assertEquals( relations["m.annotation"], @@ -1512,14 +1469,14 @@ def test_redact_parent_thread(self) -> None: EventTypes.Message, content={"body": "reply 1", "msgtype": "m.text"}, ) - self.assertEqual(200, channel.code, channel.json_body) related_event_id = channel.json_body["event_id"] # Redact one of the reactions. self._redact(self.parent_id) # The unredacted relation should still exist. - event_ids, relations = self._make_relation_requests() + event_ids = self._get_related_events() + relations = self._get_bundled_aggregations() self.assertEquals(len(event_ids), 1) self.assertDictContainsSubset( { From 86965605a4688d80dc0a74ed4993a52f282e970a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 16 Mar 2022 13:52:59 +0000 Subject: [PATCH 409/474] Fix dead link in spam checker warning (#12231) --- changelog.d/12231.doc | 1 + synapse/config/spam_checker.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12231.doc diff --git a/changelog.d/12231.doc b/changelog.d/12231.doc new file mode 100644 index 000000000000..16593d2b9226 --- /dev/null +++ b/changelog.d/12231.doc @@ -0,0 +1 @@ +Fix the link to the module documentation in the legacy spam checker warning message. diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index a233a9ce0388..4c52103b1c29 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -25,8 +25,8 @@ LEGACY_SPAM_CHECKER_WARNING = """ This server is using a spam checker module that is implementing the deprecated spam checker interface. Please check with the module's maintainer to see if a new version -supporting Synapse's generic modules system is available. -For more information, please see https://matrix-org.github.io/synapse/latest/modules.html +supporting Synapse's generic modules system is available. For more information, please +see https://matrix-org.github.io/synapse/latest/modules/index.html ---------------------------------------------------------------------------------------""" From c486fa5fd9082643e40a55ffa59d902aa6db4c2b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 10:37:04 -0400 Subject: [PATCH 410/474] Add some missing type hints to cache datastore. (#12216) --- changelog.d/12216.misc | 1 + synapse/storage/databases/main/cache.py | 57 ++++++++++++++++--------- 2 files changed, 37 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12216.misc diff --git a/changelog.d/12216.misc b/changelog.d/12216.misc new file mode 100644 index 000000000000..dc398ac1e098 --- /dev/null +++ b/changelog.d/12216.misc @@ -0,0 +1 @@ +Add missing type hints for cache storage. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index d6a2df1afeb6..2d7511d61391 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -23,6 +23,7 @@ EventsStream, EventsStreamCurrentStateRow, EventsStreamEventRow, + EventsStreamRow, ) from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -31,6 +32,7 @@ LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.util.caches.descriptors import _CachedFunction from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -82,7 +84,9 @@ async def get_all_updated_caches( if last_id == current_id: return [], current_id, False - def get_all_updated_caches_txn(txn): + def get_all_updated_caches_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: # We purposefully don't bound by the current token, as we want to # send across cache invalidations as quickly as possible. Cache # invalidations are idempotent, so duplicates are fine. @@ -107,7 +111,9 @@ def get_all_updated_caches_txn(txn): "get_all_updated_caches", get_all_updated_caches_txn ) - def process_replication_rows(self, stream_name, instance_name, token, rows): + def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: Iterable[Any] + ) -> None: if stream_name == EventsStream.NAME: for row in rows: self._process_event_stream_row(token, row) @@ -142,10 +148,11 @@ def process_replication_rows(self, stream_name, instance_name, token, rows): super().process_replication_rows(stream_name, instance_name, token, rows) - def _process_event_stream_row(self, token, row): + def _process_event_stream_row(self, token: int, row: EventsStreamRow) -> None: data = row.data if row.type == EventsStreamEventRow.TypeId: + assert isinstance(data, EventsStreamEventRow) self._invalidate_caches_for_event( token, data.event_id, @@ -157,9 +164,8 @@ def _process_event_stream_row(self, token, row): backfilled=False, ) elif row.type == EventsStreamCurrentStateRow.TypeId: - self._curr_state_delta_stream_cache.entity_has_changed( - row.data.room_id, token - ) + assert isinstance(data, EventsStreamCurrentStateRow) + self._curr_state_delta_stream_cache.entity_has_changed(data.room_id, token) if data.type == EventTypes.Member: self.get_rooms_for_user_with_stream_ordering.invalidate( @@ -170,15 +176,15 @@ def _process_event_stream_row(self, token, row): def _invalidate_caches_for_event( self, - stream_ordering, - event_id, - room_id, - etype, - state_key, - redacts, - relates_to, - backfilled, - ): + stream_ordering: int, + event_id: str, + room_id: str, + etype: str, + state_key: Optional[str], + redacts: Optional[str], + relates_to: Optional[str], + backfilled: bool, + ) -> None: self._invalidate_get_event_cache(event_id) self.have_seen_event.invalidate((room_id, event_id)) @@ -207,7 +213,9 @@ def _invalidate_caches_for_event( self.get_thread_summary.invalidate((relates_to,)) self.get_thread_participated.invalidate((relates_to,)) - async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): + async def invalidate_cache_and_stream( + self, cache_name: str, keys: Tuple[Any, ...] + ) -> None: """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -227,7 +235,12 @@ async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, .. keys, ) - def _invalidate_cache_and_stream(self, txn, cache_func, keys): + def _invalidate_cache_and_stream( + self, + txn: LoggingTransaction, + cache_func: _CachedFunction, + keys: Tuple[Any, ...], + ) -> None: """Invalidates the cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -238,7 +251,9 @@ def _invalidate_cache_and_stream(self, txn, cache_func, keys): txn.call_after(cache_func.invalidate, keys) self._send_invalidation_to_replication(txn, cache_func.__name__, keys) - def _invalidate_all_cache_and_stream(self, txn, cache_func): + def _invalidate_all_cache_and_stream( + self, txn: LoggingTransaction, cache_func: _CachedFunction + ) -> None: """Invalidates the entire cache and adds it to the cache stream so slaves will know to invalidate their caches. """ @@ -279,8 +294,8 @@ def _invalidate_state_caches_and_stream( ) def _send_invalidation_to_replication( - self, txn, cache_name: str, keys: Optional[Iterable[Any]] - ): + self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]] + ) -> None: """Notifies replication that given cache has been invalidated. Note that this does *not* invalidate the cache locally. @@ -315,7 +330,7 @@ def _send_invalidation_to_replication( "instance_name": self._instance_name, "cache_func": cache_name, "keys": keys, - "invalidation_ts": self.clock.time_msec(), + "invalidation_ts": self._clock.time_msec(), }, ) From fc9bd620ce94b64af46737e25a524336967782a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 10:39:15 -0400 Subject: [PATCH 411/474] Add a relations handler to avoid duplication. (#12227) Adds a handler layer between the REST and datastore layers for relations. --- changelog.d/12227.misc | 1 + synapse/handlers/pagination.py | 5 +- synapse/handlers/relations.py | 117 +++++++++++++++++++++++++++++++ synapse/rest/client/relations.py | 75 +++----------------- synapse/server.py | 5 ++ 5 files changed, 134 insertions(+), 69 deletions(-) create mode 100644 changelog.d/12227.misc create mode 100644 synapse/handlers/relations.py diff --git a/changelog.d/12227.misc b/changelog.d/12227.misc new file mode 100644 index 000000000000..41c9dcbd37f6 --- /dev/null +++ b/changelog.d/12227.misc @@ -0,0 +1 @@ +Refactor the relations endpoints to add a `RelationsHandler`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 60059fec3e0f..41679f7f866b 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set import attr @@ -422,7 +422,7 @@ async def get_messages( pagin_config: PaginationConfig, as_client_event: bool = True, event_filter: Optional[Filter] = None, - ) -> Dict[str, Any]: + ) -> JsonDict: """Get messages in a room. Args: @@ -431,6 +431,7 @@ async def get_messages( pagin_config: The pagination config rules to apply, if any. as_client_event: True to get events in client-server format. event_filter: Filter to apply to results or None + Returns: Pagination API results """ diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py new file mode 100644 index 000000000000..8e475475ad02 --- /dev/null +++ b/synapse/handlers/relations.py @@ -0,0 +1,117 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import TYPE_CHECKING, Optional + +from synapse.api.errors import SynapseError +from synapse.types import JsonDict, Requester, StreamToken + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +logger = logging.getLogger(__name__) + + +class RelationsHandler: + def __init__(self, hs: "HomeServer"): + self._main_store = hs.get_datastores().main + self._auth = hs.get_auth() + self._clock = hs.get_clock() + self._event_handler = hs.get_event_handler() + self._event_serializer = hs.get_event_client_serializer() + + async def get_relations( + self, + requester: Requester, + event_id: str, + room_id: str, + relation_type: Optional[str] = None, + event_type: Optional[str] = None, + aggregation_key: Optional[str] = None, + limit: int = 5, + direction: str = "b", + from_token: Optional[StreamToken] = None, + to_token: Optional[StreamToken] = None, + ) -> JsonDict: + """Get related events of a event, ordered by topological ordering. + + TODO Accept a PaginationConfig instead of individual pagination parameters. + + Args: + requester: The user requesting the relations. + event_id: Fetch events that relate to this event ID. + room_id: The room the event belongs to. + relation_type: Only fetch events with this relation type, if given. + event_type: Only fetch events with this event type, if given. + aggregation_key: Only fetch events with this aggregation key, if given. + limit: Only fetch the most recent `limit` events. + direction: Whether to fetch the most recent first (`"b"`) or the + oldest first (`"f"`). + from_token: Fetch rows from the given token, or from the start if None. + to_token: Fetch rows up to the given token, or up to the end if None. + + Returns: + The pagination chunk. + """ + + user_id = requester.user.to_string() + + await self._auth.check_user_in_room_or_world_readable( + room_id, user_id, allow_departed_users=True + ) + + # This gets the original event and checks that a) the event exists and + # b) the user is allowed to view it. + event = await self._event_handler.get_event(requester.user, room_id, event_id) + if event is None: + raise SynapseError(404, "Unknown parent event.") + + pagination_chunk = await self._main_store.get_relations_for_event( + event_id=event_id, + event=event, + room_id=room_id, + relation_type=relation_type, + event_type=event_type, + aggregation_key=aggregation_key, + limit=limit, + direction=direction, + from_token=from_token, + to_token=to_token, + ) + + events = await self._main_store.get_events_as_list( + [c["event_id"] for c in pagination_chunk.chunk] + ) + + now = self._clock.time_msec() + # Do not bundle aggregations when retrieving the original event because + # we want the content before relations are applied to it. + original_event = self._event_serializer.serialize_event( + event, now, bundle_aggregations=None + ) + # The relations returned for the requested event do include their + # bundled aggregations. + aggregations = await self._main_store.get_bundled_aggregations( + events, requester.user.to_string() + ) + serialized_events = self._event_serializer.serialize_events( + events, now, bundle_aggregations=aggregations + ) + + return_value = await pagination_chunk.to_dict(self._main_store) + return_value["chunk"] = serialized_events + return_value["original_event"] = original_event + + return return_value diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index d9a6be43f793..c16078b187ee 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -51,9 +51,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self._event_serializer = hs.get_event_client_serializer() - self.event_handler = hs.get_event_handler() + self._relations_handler = hs.get_relations_handler() async def on_GET( self, @@ -65,16 +63,6 @@ async def on_GET( ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_user_in_room_or_world_readable( - room_id, requester.user.to_string(), allow_departed_users=True - ) - - # This gets the original event and checks that a) the event exists and - # b) the user is allowed to view it. - event = await self.event_handler.get_event(requester.user, room_id, parent_id) - if event is None: - raise SynapseError(404, "Unknown parent event.") - limit = parse_integer(request, "limit", default=5) direction = parse_string( request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] @@ -90,9 +78,9 @@ async def on_GET( if to_token_str: to_token = await StreamToken.from_string(self.store, to_token_str) - pagination_chunk = await self.store.get_relations_for_event( + result = await self._relations_handler.get_relations( + requester=requester, event_id=parent_id, - event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -102,30 +90,7 @@ async def on_GET( to_token=to_token, ) - events = await self.store.get_events_as_list( - [c["event_id"] for c in pagination_chunk.chunk] - ) - - now = self.clock.time_msec() - # Do not bundle aggregations when retrieving the original event because - # we want the content before relations are applied to it. - original_event = self._event_serializer.serialize_event( - event, now, bundle_aggregations=None - ) - # The relations returned for the requested event do include their - # bundled aggregations. - aggregations = await self.store.get_bundled_aggregations( - events, requester.user.to_string() - ) - serialized_events = self._event_serializer.serialize_events( - events, now, bundle_aggregations=aggregations - ) - - return_value = await pagination_chunk.to_dict(self.store) - return_value["chunk"] = serialized_events - return_value["original_event"] = original_event - - return 200, return_value + return 200, result class RelationAggregationPaginationServlet(RestServlet): @@ -245,9 +210,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastores().main - self.clock = hs.get_clock() - self._event_serializer = hs.get_event_client_serializer() - self.event_handler = hs.get_event_handler() + self._relations_handler = hs.get_relations_handler() async def on_GET( self, @@ -260,18 +223,6 @@ async def on_GET( ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - await self.auth.check_user_in_room_or_world_readable( - room_id, - requester.user.to_string(), - allow_departed_users=True, - ) - - # This checks that a) the event exists and b) the user is allowed to - # view it. - event = await self.event_handler.get_event(requester.user, room_id, parent_id) - if event is None: - raise SynapseError(404, "Unknown parent event.") - if relation_type != RelationTypes.ANNOTATION: raise SynapseError(400, "Relation type must be 'annotation'") @@ -286,9 +237,9 @@ async def on_GET( if to_token_str: to_token = await StreamToken.from_string(self.store, to_token_str) - result = await self.store.get_relations_for_event( + result = await self._relations_handler.get_relations( + requester=requester, event_id=parent_id, - event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -298,17 +249,7 @@ async def on_GET( to_token=to_token, ) - events = await self.store.get_events_as_list( - [c["event_id"] for c in result.chunk] - ) - - now = self.clock.time_msec() - serialized_events = self._event_serializer.serialize_events(events, now) - - return_value = await result.to_dict(self.store) - return_value["chunk"] = serialized_events - - return 200, return_value + return 200, result def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: diff --git a/synapse/server.py b/synapse/server.py index 2fcf18a7a69a..380369db923e 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -94,6 +94,7 @@ from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler from synapse.handlers.register import RegistrationHandler +from synapse.handlers.relations import RelationsHandler from synapse.handlers.room import ( RoomContextHandler, RoomCreationHandler, @@ -719,6 +720,10 @@ def get_message_handler(self) -> MessageHandler: def get_pagination_handler(self) -> PaginationHandler: return PaginationHandler(self) + @cache_in_self + def get_relations_handler(self) -> RelationsHandler: + return RelationsHandler(self) + @cache_in_self def get_room_context_handler(self) -> RoomContextHandler: return RoomContextHandler(self) From 61210567405b1ac7efaa23d5513cc0b443da0a3a Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 16 Mar 2022 15:07:41 +0000 Subject: [PATCH 412/474] Handle cancellation in `DatabasePool.runInteraction()` (#12199) To handle cancellation, we ensure that `after_callback`s and `exception_callback`s are always run, since the transaction will complete on another thread regardless of cancellation. We also wait until everything is done before releasing the `CancelledError`, so that logging contexts won't get used after they have been finished. Signed-off-by: Sean Quah --- changelog.d/12199.misc | 1 + synapse/storage/database.py | 61 +++++++++++++++++++++------------- tests/storage/test_database.py | 58 ++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+), 24 deletions(-) create mode 100644 changelog.d/12199.misc diff --git a/changelog.d/12199.misc b/changelog.d/12199.misc new file mode 100644 index 000000000000..16dec1d26d24 --- /dev/null +++ b/changelog.d/12199.misc @@ -0,0 +1 @@ +Handle cancellation in `DatabasePool.runInteraction()`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 99802228c9f7..9749f0c06e86 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -41,6 +41,7 @@ from typing_extensions import Literal from twisted.enterprise import adbapi +from twisted.internet import defer from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig @@ -55,6 +56,7 @@ from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor +from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -732,34 +734,45 @@ async def runInteraction( Returns: The result of func """ - after_callbacks: List[_CallbackListEntry] = [] - exception_callbacks: List[_CallbackListEntry] = [] - if not current_context(): - logger.warning("Starting db txn '%s' from sentinel context", desc) + async def _runInteraction() -> R: + after_callbacks: List[_CallbackListEntry] = [] + exception_callbacks: List[_CallbackListEntry] = [] - try: - with opentracing.start_active_span(f"db.{desc}"): - result = await self.runWithConnection( - self.new_transaction, - desc, - after_callbacks, - exception_callbacks, - func, - *args, - db_autocommit=db_autocommit, - isolation_level=isolation_level, - **kwargs, - ) + if not current_context(): + logger.warning("Starting db txn '%s' from sentinel context", desc) - for after_callback, after_args, after_kwargs in after_callbacks: - after_callback(*after_args, **after_kwargs) - except Exception: - for after_callback, after_args, after_kwargs in exception_callbacks: - after_callback(*after_args, **after_kwargs) - raise + try: + with opentracing.start_active_span(f"db.{desc}"): + result = await self.runWithConnection( + self.new_transaction, + desc, + after_callbacks, + exception_callbacks, + func, + *args, + db_autocommit=db_autocommit, + isolation_level=isolation_level, + **kwargs, + ) - return cast(R, result) + for after_callback, after_args, after_kwargs in after_callbacks: + after_callback(*after_args, **after_kwargs) + + return cast(R, result) + except Exception: + for after_callback, after_args, after_kwargs in exception_callbacks: + after_callback(*after_args, **after_kwargs) + raise + + # To handle cancellation, we ensure that `after_callback`s and + # `exception_callback`s are always run, since the transaction will complete + # on another thread regardless of cancellation. + # + # We also wait until everything above is done before releasing the + # `CancelledError`, so that logging contexts won't get used after they have been + # finished. + return await delay_cancellation(defer.ensureDeferred(_runInteraction())) async def runWithConnection( self, diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index ae13bed08621..a40fc20ef990 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -15,6 +15,8 @@ from typing import Callable, Tuple from unittest.mock import Mock, call +from twisted.internet import defer +from twisted.internet.defer import CancelledError, Deferred from twisted.test.proto_helpers import MemoryReactor from synapse.server import HomeServer @@ -124,3 +126,59 @@ def test_successful_retry(self) -> None: ) self.assertEqual(after_callback.call_count, 2) # no additional calls exception_callback.assert_not_called() + + +class CancellationTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + + def test_after_callback(self) -> None: + """Test that the after callback is called when a transaction succeeds.""" + d: "Deferred[None]" + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + d.cancel() + + d = defer.ensureDeferred( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + self.get_failure(d, CancelledError) + + after_callback.assert_called_once_with(123, 456, extra=789) + exception_callback.assert_not_called() + + def test_exception_callback(self) -> None: + """Test that the exception callback is called when a transaction fails.""" + d: "Deferred[None]" + after_callback = Mock() + exception_callback = Mock() + + def _test_txn(txn: LoggingTransaction) -> None: + txn.call_after(after_callback, 123, 456, extra=789) + txn.call_on_exception(exception_callback, 987, 654, extra=321) + d.cancel() + # Simulate a retryable failure on every attempt. + raise self.db_pool.engine.module.OperationalError() + + d = defer.ensureDeferred( + self.db_pool.runInteraction("test_transaction", _test_txn) + ) + self.get_failure(d, CancelledError) + + after_callback.assert_not_called() + exception_callback.assert_has_calls( + [ + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + call(987, 654, extra=321), + ] + ) + self.assertEqual(exception_callback.call_count, 6) # no additional calls From 96274565ff0dbb7d21b02b04fcef115330426707 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 12:17:39 -0400 Subject: [PATCH 413/474] Fix bundling aggregations if unsigned is not a returned event field. (#12234) An error occured if a filter was supplied with `event_fields` which did not include `unsigned`. In that case, bundled aggregations are still added as the spec states it is allowed for servers to add additional fields. --- changelog.d/12234.bugfix | 1 + synapse/events/utils.py | 9 ++++++--- tests/rest/client/test_relations.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12234.bugfix diff --git a/changelog.d/12234.bugfix b/changelog.d/12234.bugfix new file mode 100644 index 000000000000..dbb77f36ff32 --- /dev/null +++ b/changelog.d/12234.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when a `filter` argument with `event_fields` supplied but not including the `unsigned` field could result in a 500 error on `/sync`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index b2a237c1e04a..a0520068e0f9 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -530,9 +530,12 @@ def _inject_bundled_aggregations( # Include the bundled aggregations in the event. if serialized_aggregations: - serialized_event["unsigned"].setdefault("m.relations", {}).update( - serialized_aggregations - ) + # There is likely already an "unsigned" field, but a filter might + # have stripped it off (via the event_fields option). The server is + # allowed to return additional fields, so add it back. + serialized_event.setdefault("unsigned", {}).setdefault( + "m.relations", {} + ).update(serialized_aggregations) def serialize_events( self, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 0cbe6c0cf754..171f4e97c8fd 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1267,6 +1267,34 @@ def test_background_update(self) -> None: [annotation_event_id_good, thread_event_id], ) + def test_bundled_aggregations_with_filter(self) -> None: + """ + If "unsigned" is an omitted field (due to filtering), adding the bundled + aggregations should not break. + + Note that the spec allows for a server to return additional fields beyond + what is specified. + """ + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + + # Note that the sync filter does not include "unsigned" as a field. + filter = urllib.parse.quote_plus( + b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Ensure the timeline is limited, find the parent event. + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + parent_event = self._find_event_in_chunk(room_timeline["events"]) + + # Ensure there's bundled aggregations on it. + self.assertIn("unsigned", parent_event) + self.assertIn("m.relations", parent_event["unsigned"]) + class RelationRedactionTestCase(BaseRelationsTestCase): """ From f70afbd565f34cdc093e083b92376a1154b007a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 16 Mar 2022 12:20:05 -0400 Subject: [PATCH 414/474] Re-generate changelog. --- CHANGES.md | 1 + changelog.d/12234.bugfix | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/12234.bugfix diff --git a/CHANGES.md b/CHANGES.md index 60e7ecb1b9c1..78498c10b551 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,6 +21,7 @@ Bugfixes - Fix a bug introduced in Synapse 1.7.2 whereby background updates are never run with the default background batch size. ([\#12157](https://github.com/matrix-org/synapse/issues/12157)) - Fix a bug where non-standard information was returned from the `/hierarchy` API. Introduced in Synapse v1.41.0. ([\#12175](https://github.com/matrix-org/synapse/issues/12175)) - Fix a bug introduced in Synapse 1.54.0 that broke background updates on sqlite homeservers while search was disabled. ([\#12215](https://github.com/matrix-org/synapse/issues/12215)) +- Fix a long-standing bug when a `filter` argument with `event_fields` which did not include the `unsigned` field could result in a 500 error on `/sync`. ([\#12234](https://github.com/matrix-org/synapse/issues/12234)) Improved Documentation diff --git a/changelog.d/12234.bugfix b/changelog.d/12234.bugfix deleted file mode 100644 index dbb77f36ff32..000000000000 --- a/changelog.d/12234.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when a `filter` argument with `event_fields` supplied but not including the `unsigned` field could result in a 500 error on `/sync`. From 9e06e220649cc0139749c388a894bee0d65d5f4e Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 17 Mar 2022 12:25:50 +0100 Subject: [PATCH 415/474] Add type hints to more tests files. (#12240) --- changelog.d/12240.misc | 1 + mypy.ini | 4 ---- tests/handlers/test_cas.py | 19 +++++++++------ tests/handlers/test_federation.py | 36 ++++++++++++++++------------ tests/handlers/test_presence.py | 13 ++++++---- tests/push/test_http.py | 40 ++++++++++++++++++------------- 6 files changed, 66 insertions(+), 47 deletions(-) create mode 100644 changelog.d/12240.misc diff --git a/changelog.d/12240.misc b/changelog.d/12240.misc new file mode 100644 index 000000000000..c5b635679931 --- /dev/null +++ b/changelog.d/12240.misc @@ -0,0 +1 @@ +Add type hints to tests files. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index fe31bfb8bb37..51f47ff5be8f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -66,9 +66,6 @@ exclude = (?x) |tests/federation/test_federation_server.py |tests/federation/transport/test_knocking.py |tests/federation/transport/test_server.py - |tests/handlers/test_cas.py - |tests/handlers/test_federation.py - |tests/handlers/test_presence.py |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py @@ -80,7 +77,6 @@ exclude = (?x) |tests/logging/test_terse_json.py |tests/module_api/test_api.py |tests/push/test_email.py - |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index a2672288465a..a54aa29cf177 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -11,9 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.handlers.cas import CasResponse +from synapse.server import HomeServer +from synapse.util import Clock from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -24,7 +29,7 @@ class CasHandlerTestCase(HomeserverTestCase): - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["public_baseurl"] = BASE_URL cas_config = { @@ -40,7 +45,7 @@ def default_config(self): return config - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.handler = hs.get_cas_handler() @@ -51,7 +56,7 @@ def make_homeserver(self, reactor, clock): return hs - def test_map_cas_user_to_user(self): + def test_map_cas_user_to_user(self) -> None: """Ensure that mapping the CAS user returned from a provider to an MXID works properly.""" # stub out the auth handler @@ -75,7 +80,7 @@ def test_map_cas_user_to_user(self): auth_provider_session_id=None, ) - def test_map_cas_user_to_existing_user(self): + def test_map_cas_user_to_existing_user(self) -> None: """Existing users can log in with CAS account.""" store = self.hs.get_datastores().main self.get_success( @@ -119,7 +124,7 @@ def test_map_cas_user_to_existing_user(self): auth_provider_session_id=None, ) - def test_map_cas_user_to_invalid_localpart(self): + def test_map_cas_user_to_invalid_localpart(self) -> None: """CAS automaps invalid characters to base-64 encoding.""" # stub out the auth handler @@ -150,7 +155,7 @@ def test_map_cas_user_to_invalid_localpart(self): } } ) - def test_required_attributes(self): + def test_required_attributes(self) -> None: """The required attributes must be met from the CAS response.""" # stub out the auth handler @@ -166,7 +171,7 @@ def test_required_attributes(self): auth_handler.complete_sso_login.assert_not_called() # The response doesn't have any department. - cas_response = CasResponse("test_user", {"userGroup": "staff"}) + cas_response = CasResponse("test_user", {"userGroup": ["staff"]}) request.reset_mock() self.get_success( self.handler._handle_cas_response(request, cas_response, "redirect_uri", "") diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index e8b4e39d1a32..89078fc6374c 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import List +from typing import List, cast from unittest import TestCase +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes from synapse.api.errors import AuthError, Codes, LimitExceededError, SynapseError from synapse.api.room_versions import RoomVersions @@ -23,7 +25,9 @@ from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer from synapse.types import create_requester +from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest @@ -42,7 +46,7 @@ class FederationTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main @@ -50,7 +54,7 @@ def make_homeserver(self, reactor, clock): self._event_auth_handler = hs.get_event_auth_handler() return hs - def test_exchange_revoked_invite(self): + def test_exchange_revoked_invite(self) -> None: user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") @@ -96,7 +100,7 @@ def test_exchange_revoked_invite(self): self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure) self.assertEqual(failure.msg, "You are not invited to this room.") - def test_rejected_message_event_state(self): + def test_rejected_message_event_state(self) -> None: """ Check that we store the state group correctly for rejected non-state events. @@ -126,7 +130,7 @@ def test_rejected_message_event_state(self): "content": {}, "room_id": room_id, "sender": "@yetanotheruser:" + OTHER_SERVER, - "depth": join_event["depth"] + 1, + "depth": cast(int, join_event["depth"]) + 1, "prev_events": [join_event.event_id], "auth_events": [], "origin_server_ts": self.clock.time_msec(), @@ -149,7 +153,7 @@ def test_rejected_message_event_state(self): self.assertEqual(sg, sg2) - def test_rejected_state_event_state(self): + def test_rejected_state_event_state(self) -> None: """ Check that we store the state group correctly for rejected state events. @@ -180,7 +184,7 @@ def test_rejected_state_event_state(self): "content": {}, "room_id": room_id, "sender": "@yetanotheruser:" + OTHER_SERVER, - "depth": join_event["depth"] + 1, + "depth": cast(int, join_event["depth"]) + 1, "prev_events": [join_event.event_id], "auth_events": [], "origin_server_ts": self.clock.time_msec(), @@ -203,7 +207,7 @@ def test_rejected_state_event_state(self): self.assertEqual(sg, sg2) - def test_backfill_with_many_backward_extremities(self): + def test_backfill_with_many_backward_extremities(self) -> None: """ Check that we can backfill with many backward extremities. The goal is to make sure that when we only use a portion @@ -262,7 +266,7 @@ def test_backfill_with_many_backward_extremities(self): ) self.get_success(d) - def test_backfill_floating_outlier_membership_auth(self): + def test_backfill_floating_outlier_membership_auth(self) -> None: """ As the local homeserver, check that we can properly process a federated event from the OTHER_SERVER with auth_events that include a floating @@ -377,7 +381,7 @@ async def get_event_auth( for ae in auth_events ] - self.handler.federation_client.get_event_auth = get_event_auth + self.handler.federation_client.get_event_auth = get_event_auth # type: ignore[assignment] with LoggingContext("receive_pdu"): # Fake the OTHER_SERVER federating the message event over to our local homeserver @@ -397,7 +401,7 @@ async def get_event_auth( @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) - def test_invite_by_user_ratelimit(self): + def test_invite_by_user_ratelimit(self) -> None: """Tests that invites from federation to a particular user are actually rate-limited. """ @@ -446,7 +450,9 @@ def create_invite(): exc=LimitExceededError, ) - def _build_and_send_join_event(self, other_server, other_user, room_id): + def _build_and_send_join_event( + self, other_server: str, other_user: str, room_id: str + ) -> EventBase: join_event = self.get_success( self.handler.on_make_join_request(other_server, room_id, other_user) ) @@ -469,7 +475,7 @@ def _build_and_send_join_event(self, other_server, other_user, room_id): class EventFromPduTestCase(TestCase): - def test_valid_json(self): + def test_valid_json(self) -> None: """Valid JSON should be turned into an event.""" ev = event_from_pdu_json( { @@ -487,7 +493,7 @@ def test_valid_json(self): self.assertIsInstance(ev, EventBase) - def test_invalid_numbers(self): + def test_invalid_numbers(self) -> None: """Invalid values for an integer should be rejected, all floats should be rejected.""" for value in [ -(2 ** 53), @@ -512,7 +518,7 @@ def test_invalid_numbers(self): RoomVersions.V6, ) - def test_invalid_nested(self): + def test_invalid_nested(self) -> None: """List and dictionaries are recursively searched.""" with self.assertRaises(SynapseError): event_from_pdu_json( diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 6ddec9ecf1fe..b2ed9cbe3775 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -331,11 +331,11 @@ def test_persisting_presence_updates(self): # Extract presence update user ID and state information into lists of tuples db_presence_states = [(ps[0], ps[1]) for _, ps in db_presence_states[0]] - presence_states = [(ps.user_id, ps.state) for ps in presence_states] + presence_states_compare = [(ps.user_id, ps.state) for ps in presence_states] # Compare what we put into the storage with what we got out. # They should be identical. - self.assertEqual(presence_states, db_presence_states) + self.assertEqual(presence_states_compare, db_presence_states) class PresenceTimeoutTestCase(unittest.TestCase): @@ -357,6 +357,7 @@ def test_idle_timer(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.UNAVAILABLE) self.assertEqual(new_state.status_msg, status_msg) @@ -380,6 +381,7 @@ def test_busy_no_idle(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.BUSY) self.assertEqual(new_state.status_msg, status_msg) @@ -399,6 +401,7 @@ def test_sync_timeout(self): new_state = handle_timeout(state, is_mine=True, syncing_user_ids=set(), now=now) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.OFFLINE) self.assertEqual(new_state.status_msg, status_msg) @@ -420,6 +423,7 @@ def test_sync_online(self): ) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.ONLINE) self.assertEqual(new_state.status_msg, status_msg) @@ -477,6 +481,7 @@ def test_federation_timeout(self): ) self.assertIsNotNone(new_state) + assert new_state is not None self.assertEqual(new_state.state, PresenceState.OFFLINE) self.assertEqual(new_state.status_msg, status_msg) @@ -653,13 +658,13 @@ def test_set_presence_with_status_msg_none(self): self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None) def _set_presencestate_with_status_msg( - self, user_id: str, state: PresenceState, status_msg: Optional[str] + self, user_id: str, state: str, status_msg: Optional[str] ): """Set a PresenceState and status_msg and check the result. Args: user_id: User for that the status is to be set. - PresenceState: The new PresenceState. + state: The new PresenceState. status_msg: Status message that is to be set. """ self.get_success( diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 6691e0712896..ba158f5d93e4 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -11,15 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Tuple +from typing import Any, Dict, List, Optional, Tuple from unittest.mock import Mock from twisted.internet.defer import Deferred +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.logging.context import make_deferred_yieldable from synapse.push import PusherConfigException from synapse.rest.client import login, push_rule, receipts, room +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests.unittest import HomeserverTestCase, override_config @@ -35,13 +39,13 @@ class HTTPPusherTests(HomeserverTestCase): user_id = True hijack_auth = False - def default_config(self): + def default_config(self) -> Dict[str, Any]: config = super().default_config() config["start_pushers"] = True return config - def make_homeserver(self, reactor, clock): - self.push_attempts: List[tuple[Deferred, str, dict]] = [] + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.push_attempts: List[Tuple[Deferred, str, dict]] = [] m = Mock() @@ -56,7 +60,7 @@ def post_json_get_json(url, body): return hs - def test_invalid_configuration(self): + def test_invalid_configuration(self) -> None: """Invalid push configurations should be rejected.""" # Register the user who gets notified user_id = self.register_user("user", "pass") @@ -68,7 +72,7 @@ def test_invalid_configuration(self): ) token_id = user_tuple.token_id - def test_data(data): + def test_data(data: Optional[JsonDict]) -> None: self.get_failure( self.hs.get_pusherpool().add_pusher( user_id=user_id, @@ -95,7 +99,7 @@ def test_data(data): # A url with an incorrect path isn't accepted. test_data({"url": "http://example.com/foo"}) - def test_sends_http(self): + def test_sends_http(self) -> None: """ The HTTP pusher will send pushes for each message to a HTTP endpoint when configured to do so. @@ -200,7 +204,7 @@ def test_sends_http(self): self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) - def test_sends_high_priority_for_encrypted(self): + def test_sends_high_priority_for_encrypted(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to an encrypted message. @@ -321,7 +325,7 @@ def test_sends_high_priority_for_encrypted(self): ) self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "high") - def test_sends_high_priority_for_one_to_one_only(self): + def test_sends_high_priority_for_one_to_one_only(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to a message in a one-to-one room. @@ -404,7 +408,7 @@ def test_sends_high_priority_for_one_to_one_only(self): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") - def test_sends_high_priority_for_mention(self): + def test_sends_high_priority_for_mention(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to a message containing the user's display name. @@ -480,7 +484,7 @@ def test_sends_high_priority_for_mention(self): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") - def test_sends_high_priority_for_atroom(self): + def test_sends_high_priority_for_atroom(self) -> None: """ The HTTP pusher will send pushes at high priority if they correspond to a message that contains @room. @@ -563,7 +567,7 @@ def test_sends_high_priority_for_atroom(self): # check that this is low-priority self.assertEqual(self.push_attempts[1][2]["notification"]["prio"], "low") - def test_push_unread_count_group_by_room(self): + def test_push_unread_count_group_by_room(self) -> None: """ The HTTP pusher will group unread count by number of unread rooms. """ @@ -576,7 +580,7 @@ def test_push_unread_count_group_by_room(self): self._check_push_attempt(6, 1) @override_config({"push": {"group_unread_count_by_room": False}}) - def test_push_unread_count_message_count(self): + def test_push_unread_count_message_count(self) -> None: """ The HTTP pusher will send the total unread message count. """ @@ -589,7 +593,7 @@ def test_push_unread_count_message_count(self): # last read receipt self._check_push_attempt(6, 3) - def _test_push_unread_count(self): + def _test_push_unread_count(self) -> None: """ Tests that the correct unread count appears in sent push notifications @@ -681,7 +685,7 @@ def _test_push_unread_count(self): self.helper.send(room_id, body="HELLO???", tok=other_access_token) - def _advance_time_and_make_push_succeed(self, expected_push_attempts): + def _advance_time_and_make_push_succeed(self, expected_push_attempts: int) -> None: self.pump() self.push_attempts[expected_push_attempts - 1][0].callback({}) @@ -708,7 +712,9 @@ def _check_push_attempt( expected_unread_count_last_push, ) - def _send_read_request(self, access_token, message_event_id, room_id): + def _send_read_request( + self, access_token: str, message_event_id: str, room_id: str + ) -> None: # Now set the user's read receipt position to the first event # # This will actually trigger a new notification to be sent out so that @@ -748,7 +754,7 @@ def _make_user_with_pusher(self, username: str) -> Tuple[str, str]: return user_id, access_token - def test_dont_notify_rule_overrides_message(self): + def test_dont_notify_rule_overrides_message(self) -> None: """ The override push rule will suppress notification """ From 12d1f82db213603972d60be3f46f6a36c3c2330f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 17 Mar 2022 13:46:05 +0000 Subject: [PATCH 416/474] Generate announcement links in release script (#12242) --- changelog.d/12242.misc | 1 + scripts-dev/release.py | 41 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12242.misc diff --git a/changelog.d/12242.misc b/changelog.d/12242.misc new file mode 100644 index 000000000000..38e7e0f7d1e8 --- /dev/null +++ b/changelog.d/12242.misc @@ -0,0 +1 @@ +Generate announcement links in the release script. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 046453e65ff8..685fa32b03f4 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -66,11 +66,15 @@ def cli(): ./scripts-dev/release.py tag - # ... wait for asssets to build ... + # ... wait for assets to build ... ./scripts-dev/release.py publish ./scripts-dev/release.py upload + # Optional: generate some nice links for the announcement + + ./scripts-dev/release.py upload + If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the `tag`/`publish` command, then a new draft release will be created/published. """ @@ -415,6 +419,41 @@ def upload(): ) +@cli.command() +def announce(): + """Generate markdown to announce the release.""" + + current_version, _, _ = parse_version_from_module() + tag_name = f"v{current_version}" + + click.echo( + f""" +Hi everyone. Synapse {current_version} has just been released. + +[notes](https://github.com/matrix-org/synapse/releases/tag/{tag_name}) |\ +[docker](https://hub.docker.com/r/matrixdotorg/synapse/tags?name={tag_name}) | \ +[debs](https://packages.matrix.org/debian/) | \ +[pypi](https://pypi.org/project/matrix-synapse/{current_version}/)""" + ) + + if "rc" in tag_name: + click.echo( + """ +Announce the RC in +- #homeowners:matrix.org (Synapse Announcements) +- #synapse-dev:matrix.org""" + ) + else: + click.echo( + """ +Announce the release in +- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic +- #synapse:matrix.org (Synapse Admins), bumping the version in the topic +- #synapse-dev:matrix.org +- #synapse-package-maintainers:matrix.org""" + ) + + def parse_version_from_module() -> Tuple[ version.Version, redbaron.RedBaron, redbaron.Node ]: From 872dbb0181714e201be082c4e8bd9b727c73f177 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 18 Mar 2022 13:51:41 +0000 Subject: [PATCH 417/474] Correct `check_username_for_spam` annotations and docs (#12246) * Formally type the UserProfile in user searches * export UserProfile in synapse.module_api * Update docs Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/12246.doc | 1 + docs/modules/spam_checker_callbacks.md | 10 ++++---- synapse/events/spamcheck.py | 7 +++--- synapse/handlers/user_directory.py | 4 ++-- synapse/module_api/__init__.py | 2 ++ synapse/rest/client/user_directory.py | 4 ++-- .../storage/databases/main/user_directory.py | 23 +++++++++++++++---- synapse/types.py | 11 +++++++++ 8 files changed, 46 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12246.doc diff --git a/changelog.d/12246.doc b/changelog.d/12246.doc new file mode 100644 index 000000000000..e7fcc1b99c78 --- /dev/null +++ b/changelog.d/12246.doc @@ -0,0 +1 @@ +Correct `check_username_for_spam` annotations and docs. \ No newline at end of file diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 2b672b78f9ae..472d95718087 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -172,7 +172,7 @@ any of the subsequent implementations of this callback. _First introduced in Synapse v1.37.0_ ```python -async def check_username_for_spam(user_profile: Dict[str, str]) -> bool +async def check_username_for_spam(user_profile: synapse.module_api.UserProfile) -> bool ``` Called when computing search results in the user directory. The module must return a @@ -182,9 +182,11 @@ search results; otherwise return `False`. The profile is represented as a dictionary with the following keys: -* `user_id`: The Matrix ID for this user. -* `display_name`: The user's display name. -* `avatar_url`: The `mxc://` URL to the user's avatar. +* `user_id: str`. The Matrix ID for this user. +* `display_name: Optional[str]`. The user's display name, or `None` if this user + has not set a display name. +* `avatar_url: Optional[str]`. The `mxc://` URL to the user's avatar, or `None` + if this user has not set an avatar. The module is given a copy of the original dictionary, so modifying it from within the module cannot modify a user's profile when included in user directory search results. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 60904a55f5c0..cd80fcf9d13a 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -21,7 +21,6 @@ Awaitable, Callable, Collection, - Dict, List, Optional, Tuple, @@ -31,7 +30,7 @@ from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour -from synapse.types import RoomAlias +from synapse.types import RoomAlias, UserProfile from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -50,7 +49,7 @@ USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] -CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[Dict[str, str]], Awaitable[bool]] +CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]] LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ Optional[dict], @@ -383,7 +382,7 @@ async def user_may_publish_room(self, userid: str, room_id: str) -> bool: return True - async def check_username_for_spam(self, user_profile: Dict[str, str]) -> bool: + async def check_username_for_spam(self, user_profile: UserProfile) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. If the server considers a username spammy, then it will not be included in diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index d27ed2be6a99..048fd4bb8225 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -19,8 +19,8 @@ from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo -from synapse.types import JsonDict from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -78,7 +78,7 @@ def __init__(self, hs: "HomeServer"): async def search_users( self, user_id: str, search_term: str, limit: int - ) -> JsonDict: + ) -> SearchResult: """Searches for users in directory Returns: diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d735c1d4616e..aa8256b36f1d 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -111,6 +111,7 @@ StateMap, UserID, UserInfo, + UserProfile, create_requester, ) from synapse.util import Clock @@ -150,6 +151,7 @@ "EventBase", "StateMap", "ProfileInfo", + "UserProfile", ] logger = logging.getLogger(__name__) diff --git a/synapse/rest/client/user_directory.py b/synapse/rest/client/user_directory.py index a47d9bd01da5..116c982ce637 100644 --- a/synapse/rest/client/user_directory.py +++ b/synapse/rest/client/user_directory.py @@ -19,7 +19,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.types import JsonDict +from synapse.types import JsonMapping from ._base import client_patterns @@ -38,7 +38,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() - async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonMapping]: """Searches for users in directory Returns: diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index e7fddd24262a..55cc9178f0b8 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -26,6 +26,8 @@ cast, ) +from typing_extensions import TypedDict + from synapse.api.errors import StoreError if TYPE_CHECKING: @@ -40,7 +42,12 @@ from synapse.storage.databases.main.state import StateFilter from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import JsonDict, get_domain_from_id, get_localpart_from_id +from synapse.types import ( + JsonDict, + UserProfile, + get_domain_from_id, + get_localpart_from_id, +) from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -591,6 +598,11 @@ async def update_user_directory_stream_pos(self, stream_id: Optional[int]) -> No ) +class SearchResult(TypedDict): + limited: bool + results: List[UserProfile] + + class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): # How many records do we calculate before sending it to # add_users_who_share_private_rooms? @@ -777,7 +789,7 @@ async def get_user_directory_stream_pos(self) -> Optional[int]: async def search_user_dir( self, user_id: str, search_term: str, limit: int - ) -> JsonDict: + ) -> SearchResult: """Searches for users in directory Returns: @@ -910,8 +922,11 @@ async def search_user_dir( # This should be unreachable. raise Exception("Unrecognized database engine") - results = await self.db_pool.execute( - "search_user_dir", self.db_pool.cursor_to_dict, sql, *args + results = cast( + List[UserProfile], + await self.db_pool.execute( + "search_user_dir", self.db_pool.cursor_to_dict, sql, *args + ), ) limited = len(results) > limit diff --git a/synapse/types.py b/synapse/types.py index 53be3583a013..5ce2a5b0a5ee 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -34,6 +34,7 @@ import attr from frozendict import frozendict from signedjson.key import decode_verify_key_bytes +from typing_extensions import TypedDict from unpaddedbase64 import decode_base64 from zope.interface import Interface @@ -63,6 +64,10 @@ # JSON types. These could be made stronger, but will do for now. # A JSON-serialisable dict. JsonDict = Dict[str, Any] +# A JSON-serialisable mapping; roughly speaking an immutable JSONDict. +# Useful when you have a TypedDict which isn't going to be mutated and you don't want +# to cast to JsonDict everywhere. +JsonMapping = Mapping[str, Any] # A JSON-serialisable object. JsonSerializable = object @@ -791,3 +796,9 @@ class UserInfo: is_deactivated: bool is_guest: bool is_shadow_banned: bool + + +class UserProfile(TypedDict): + user_id: str + display_name: Optional[str] + avatar_url: Optional[str] From c46065fa3d6ad000f5da6e196c769371e0e76ec5 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 18 Mar 2022 16:24:18 +0100 Subject: [PATCH 418/474] Add some type hints to datastore (#12248) * inherit `MonthlyActiveUsersStore` from `RegistrationWorkerStore` Co-authored-by: Patrick Cloke --- changelog.d/12248.misc | 1 + mypy.ini | 6 - .../storage/databases/main/group_server.py | 156 +++++++++++------- .../databases/main/monthly_active_users.py | 38 +++-- 4 files changed, 117 insertions(+), 84 deletions(-) create mode 100644 changelog.d/12248.misc diff --git a/changelog.d/12248.misc b/changelog.d/12248.misc new file mode 100644 index 000000000000..2b1290d1e143 --- /dev/null +++ b/changelog.d/12248.misc @@ -0,0 +1 @@ +Add missing type hints for storage. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 51f47ff5be8f..d8b3b3f9e588 100644 --- a/mypy.ini +++ b/mypy.ini @@ -42,9 +42,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/databases/main/devices.py |synapse/storage/databases/main/event_federation.py - |synapse/storage/databases/main/group_server.py - |synapse/storage/databases/main/metrics.py - |synapse/storage/databases/main/monthly_active_users.py |synapse/storage/databases/main/push_rule.py |synapse/storage/databases/main/receipts.py |synapse/storage/databases/main/roommember.py @@ -87,9 +84,6 @@ exclude = (?x) |tests/state/test_v2.py |tests/storage/test_background_update.py |tests/storage/test_base.py - |tests/storage/test_client_ips.py - |tests/storage/test_database.py - |tests/storage/test_event_federation.py |tests/storage/test_id_generators.py |tests/storage/test_roommember.py |tests/test_metrics.py diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index 3f6086050bb2..0aef121d8348 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -13,13 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, cast from typing_extensions import TypedDict from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.types import JsonDict from synapse.util import json_encoder @@ -75,7 +79,7 @@ async def get_users_in_group( ) -> List[Dict[str, Any]]: # TODO: Pagination - keyvalues = {"group_id": group_id} + keyvalues: JsonDict = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True @@ -117,7 +121,7 @@ async def get_rooms_in_group( # TODO: Pagination - def _get_rooms_in_group_txn(txn): + def _get_rooms_in_group_txn(txn: LoggingTransaction) -> List[_RoomInGroup]: sql = """ SELECT room_id, is_public FROM group_rooms WHERE group_id = ? @@ -176,8 +180,10 @@ async def get_rooms_for_summary_by_category( * "order": int, the sort order of rooms in this category """ - def _get_rooms_for_summary_txn(txn): - keyvalues = {"group_id": group_id} + def _get_rooms_for_summary_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: + keyvalues: JsonDict = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True @@ -241,7 +247,7 @@ def _get_rooms_for_summary_txn(txn): "get_rooms_for_summary", _get_rooms_for_summary_txn ) - async def get_group_categories(self, group_id): + async def get_group_categories(self, group_id: str) -> JsonDict: rows = await self.db_pool.simple_select_list( table="group_room_categories", keyvalues={"group_id": group_id}, @@ -257,7 +263,7 @@ async def get_group_categories(self, group_id): for row in rows } - async def get_group_category(self, group_id, category_id): + async def get_group_category(self, group_id: str, category_id: str) -> JsonDict: category = await self.db_pool.simple_select_one( table="group_room_categories", keyvalues={"group_id": group_id, "category_id": category_id}, @@ -269,7 +275,7 @@ async def get_group_category(self, group_id, category_id): return category - async def get_group_roles(self, group_id): + async def get_group_roles(self, group_id: str) -> JsonDict: rows = await self.db_pool.simple_select_list( table="group_roles", keyvalues={"group_id": group_id}, @@ -285,7 +291,7 @@ async def get_group_roles(self, group_id): for row in rows } - async def get_group_role(self, group_id, role_id): + async def get_group_role(self, group_id: str, role_id: str) -> JsonDict: role = await self.db_pool.simple_select_one( table="group_roles", keyvalues={"group_id": group_id, "role_id": role_id}, @@ -311,15 +317,19 @@ async def get_local_groups_for_room(self, room_id: str) -> List[str]: desc="get_local_groups_for_room", ) - async def get_users_for_summary_by_role(self, group_id, include_private=False): + async def get_users_for_summary_by_role( + self, group_id: str, include_private: bool = False + ) -> Tuple[List[JsonDict], JsonDict]: """Get the users and roles that should be included in a summary request Returns: ([users], [roles]) """ - def _get_users_for_summary_txn(txn): - keyvalues = {"group_id": group_id} + def _get_users_for_summary_txn( + txn: LoggingTransaction, + ) -> Tuple[List[JsonDict], JsonDict]: + keyvalues: JsonDict = {"group_id": group_id} if not include_private: keyvalues["is_public"] = True @@ -406,7 +416,9 @@ async def is_user_invited_to_local_group( allow_none=True, ) - async def get_users_membership_info_in_group(self, group_id, user_id): + async def get_users_membership_info_in_group( + self, group_id: str, user_id: str + ) -> JsonDict: """Get a dict describing the membership of a user in a group. Example if joined: @@ -421,7 +433,7 @@ async def get_users_membership_info_in_group(self, group_id, user_id): An empty dict if the user is not join/invite/etc """ - def _get_users_membership_in_group_txn(txn): + def _get_users_membership_in_group_txn(txn: LoggingTransaction) -> JsonDict: row = self.db_pool.simple_select_one_txn( txn, table="group_users", @@ -463,10 +475,14 @@ async def get_publicised_groups_for_user(self, user_id: str) -> List[str]: desc="get_publicised_groups_for_user", ) - async def get_attestations_need_renewals(self, valid_until_ms): + async def get_attestations_need_renewals( + self, valid_until_ms: int + ) -> List[Dict[str, Any]]: """Get all attestations that need to be renewed until givent time""" - def _get_attestations_need_renewals_txn(txn): + def _get_attestations_need_renewals_txn( + txn: LoggingTransaction, + ) -> List[Dict[str, Any]]: sql = """ SELECT group_id, user_id FROM group_attestations_renewals WHERE valid_until_ms <= ? @@ -478,7 +494,9 @@ def _get_attestations_need_renewals_txn(txn): "get_attestations_need_renewals", _get_attestations_need_renewals_txn ) - async def get_remote_attestation(self, group_id, user_id): + async def get_remote_attestation( + self, group_id: str, user_id: str + ) -> Optional[JsonDict]: """Get the attestation that proves the remote agrees that the user is in the group. """ @@ -504,8 +522,8 @@ async def get_joined_groups(self, user_id: str) -> List[str]: desc="get_joined_groups", ) - async def get_all_groups_for_user(self, user_id, now_token): - def _get_all_groups_for_user_txn(txn): + async def get_all_groups_for_user(self, user_id, now_token) -> List[JsonDict]: + def _get_all_groups_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]: sql = """ SELECT group_id, type, membership, u.content FROM local_group_updates AS u @@ -528,15 +546,16 @@ def _get_all_groups_for_user_txn(txn): "get_all_groups_for_user", _get_all_groups_for_user_txn ) - async def get_groups_changes_for_user(self, user_id, from_token, to_token): - from_token = int(from_token) - has_changed = self._group_updates_stream_cache.has_entity_changed( + async def get_groups_changes_for_user( + self, user_id: str, from_token: int, to_token: int + ) -> List[JsonDict]: + has_changed = self._group_updates_stream_cache.has_entity_changed( # type: ignore[attr-defined] user_id, from_token ) if not has_changed: return [] - def _get_groups_changes_for_user_txn(txn): + def _get_groups_changes_for_user_txn(txn: LoggingTransaction) -> List[JsonDict]: sql = """ SELECT group_id, membership, type, u.content FROM local_group_updates AS u @@ -583,12 +602,14 @@ async def get_all_groups_changes( """ last_id = int(last_id) - has_changed = self._group_updates_stream_cache.has_any_entity_changed(last_id) + has_changed = self._group_updates_stream_cache.has_any_entity_changed(last_id) # type: ignore[attr-defined] if not has_changed: return [], current_id, False - def _get_all_groups_changes_txn(txn): + def _get_all_groups_changes_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: sql = """ SELECT stream_id, group_id, user_id, type, content FROM local_group_updates @@ -596,10 +617,13 @@ def _get_all_groups_changes_txn(txn): LIMIT ? """ txn.execute(sql, (last_id, current_id, limit)) - updates = [ - (stream_id, (group_id, user_id, gtype, db_to_json(content_json))) - for stream_id, group_id, user_id, gtype, content_json in txn - ] + updates = cast( + List[Tuple[int, tuple]], + [ + (stream_id, (group_id, user_id, gtype, db_to_json(content_json))) + for stream_id, group_id, user_id, gtype, content_json in txn + ], + ) limited = False upto_token = current_id @@ -633,8 +657,8 @@ async def add_room_to_summary( self, group_id: str, room_id: str, - category_id: str, - order: int, + category_id: Optional[str], + order: Optional[int], is_public: Optional[bool], ) -> None: """Add (or update) room's entry in summary. @@ -661,11 +685,11 @@ async def add_room_to_summary( def _add_room_to_summary_txn( self, - txn, + txn: LoggingTransaction, group_id: str, room_id: str, - category_id: str, - order: int, + category_id: Optional[str], + order: Optional[int], is_public: Optional[bool], ) -> None: """Add (or update) room's entry in summary. @@ -750,7 +774,7 @@ def _add_room_to_summary_txn( WHERE group_id = ? AND category_id = ? """ txn.execute(sql, (group_id, category_id)) - (order,) = txn.fetchone() + (order,) = cast(Tuple[int], txn.fetchone()) if existing: to_update = {} @@ -766,7 +790,7 @@ def _add_room_to_summary_txn( "category_id": category_id, "room_id": room_id, }, - values=to_update, + updatevalues=to_update, ) else: if is_public is None: @@ -785,7 +809,7 @@ def _add_room_to_summary_txn( ) async def remove_room_from_summary( - self, group_id: str, room_id: str, category_id: str + self, group_id: str, room_id: str, category_id: Optional[str] ) -> int: if category_id is None: category_id = _DEFAULT_CATEGORY_ID @@ -808,8 +832,8 @@ async def upsert_group_category( is_public: Optional[bool], ) -> None: """Add/update room category for group""" - insertion_values = {} - update_values = {"category_id": category_id} # This cannot be empty + insertion_values: JsonDict = {} + update_values: JsonDict = {"category_id": category_id} # This cannot be empty if profile is None: insertion_values["profile"] = "{}" @@ -844,8 +868,8 @@ async def upsert_group_role( is_public: Optional[bool], ) -> None: """Add/remove user role""" - insertion_values = {} - update_values = {"role_id": role_id} # This cannot be empty + insertion_values: JsonDict = {} + update_values: JsonDict = {"role_id": role_id} # This cannot be empty if profile is None: insertion_values["profile"] = "{}" @@ -876,8 +900,8 @@ async def add_user_to_summary( self, group_id: str, user_id: str, - role_id: str, - order: int, + role_id: Optional[str], + order: Optional[int], is_public: Optional[bool], ) -> None: """Add (or update) user's entry in summary. @@ -904,13 +928,13 @@ async def add_user_to_summary( def _add_user_to_summary_txn( self, - txn, + txn: LoggingTransaction, group_id: str, user_id: str, - role_id: str, - order: int, + role_id: Optional[str], + order: Optional[int], is_public: Optional[bool], - ): + ) -> None: """Add (or update) user's entry in summary. Args: @@ -989,7 +1013,7 @@ def _add_user_to_summary_txn( WHERE group_id = ? AND role_id = ? """ txn.execute(sql, (group_id, role_id)) - (order,) = txn.fetchone() + (order,) = cast(Tuple[int], txn.fetchone()) if existing: to_update = {} @@ -1005,7 +1029,7 @@ def _add_user_to_summary_txn( "role_id": role_id, "user_id": user_id, }, - values=to_update, + updatevalues=to_update, ) else: if is_public is None: @@ -1024,7 +1048,7 @@ def _add_user_to_summary_txn( ) async def remove_user_from_summary( - self, group_id: str, user_id: str, role_id: str + self, group_id: str, user_id: str, role_id: Optional[str] ) -> int: if role_id is None: role_id = _DEFAULT_ROLE_ID @@ -1065,7 +1089,7 @@ async def add_user_to_group( Optional if the user and group are on the same server """ - def _add_user_to_group_txn(txn): + def _add_user_to_group_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_insert_txn( txn, table="group_users", @@ -1108,7 +1132,7 @@ def _add_user_to_group_txn(txn): await self.db_pool.runInteraction("add_user_to_group", _add_user_to_group_txn) async def remove_user_from_group(self, group_id: str, user_id: str) -> None: - def _remove_user_from_group_txn(txn): + def _remove_user_from_group_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_delete_txn( txn, table="group_users", @@ -1159,7 +1183,7 @@ async def update_room_in_group_visibility( ) async def remove_room_from_group(self, group_id: str, room_id: str) -> None: - def _remove_room_from_group_txn(txn): + def _remove_room_from_group_txn(txn: LoggingTransaction) -> None: self.db_pool.simple_delete_txn( txn, table="group_rooms", @@ -1216,7 +1240,9 @@ async def register_user_group_membership( content = content or {} - def _register_user_group_membership_txn(txn, next_id): + def _register_user_group_membership_txn( + txn: LoggingTransaction, next_id: int + ) -> int: # TODO: Upsert? self.db_pool.simple_delete_txn( txn, @@ -1249,7 +1275,7 @@ def _register_user_group_membership_txn(txn, next_id): ), }, ) - self._group_updates_stream_cache.entity_has_changed(user_id, next_id) + self._group_updates_stream_cache.entity_has_changed(user_id, next_id) # type: ignore[attr-defined] # TODO: Insert profile to ensure it comes down stream if its a join. @@ -1289,7 +1315,7 @@ def _register_user_group_membership_txn(txn, next_id): return next_id - async with self._group_updates_id_gen.get_next() as next_id: + async with self._group_updates_id_gen.get_next() as next_id: # type: ignore[attr-defined] res = await self.db_pool.runInteraction( "register_user_group_membership", _register_user_group_membership_txn, @@ -1298,7 +1324,13 @@ def _register_user_group_membership_txn(txn, next_id): return res async def create_group( - self, group_id, user_id, name, avatar_url, short_description, long_description + self, + group_id: str, + user_id: str, + name: str, + avatar_url: str, + short_description: str, + long_description: str, ) -> None: await self.db_pool.simple_insert( table="groups", @@ -1313,7 +1345,7 @@ async def create_group( desc="create_group", ) - async def update_group_profile(self, group_id, profile): + async def update_group_profile(self, group_id: str, profile: JsonDict) -> None: await self.db_pool.simple_update_one( table="groups", keyvalues={"group_id": group_id}, @@ -1361,8 +1393,8 @@ async def remove_attestation_renewal(self, group_id: str, user_id: str) -> int: desc="remove_attestation_renewal", ) - def get_group_stream_token(self): - return self._group_updates_id_gen.get_current_token() + def get_group_stream_token(self) -> int: + return self._group_updates_id_gen.get_current_token() # type: ignore[attr-defined] async def delete_group(self, group_id: str) -> None: """Deletes a group fully from the database. @@ -1371,7 +1403,7 @@ async def delete_group(self, group_id: str) -> None: group_id: The group ID to delete. """ - def _delete_group_txn(txn): + def _delete_group_txn(txn: LoggingTransaction) -> None: tables = [ "groups", "group_users", diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e9a0cdc6be94..216622964aa7 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -12,15 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, + LoggingTransaction, make_in_list_sql_clause, ) +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.util.caches.descriptors import cached from synapse.util.threepids import canonicalise_email @@ -56,7 +58,7 @@ async def get_monthly_active_count(self) -> int: Number of current monthly active users """ - def _count_users(txn): + def _count_users(txn: LoggingTransaction) -> int: # Exclude app service users sql = """ SELECT COUNT(*) @@ -66,7 +68,7 @@ def _count_users(txn): WHERE (users.appservice_id IS NULL OR users.appservice_id = ''); """ txn.execute(sql) - (count,) = txn.fetchone() + (count,) = cast(Tuple[int], txn.fetchone()) return count return await self.db_pool.runInteraction("count_users", _count_users) @@ -84,7 +86,7 @@ async def get_monthly_active_count_by_service(self) -> Dict[str, int]: """ - def _count_users_by_service(txn): + def _count_users_by_service(txn: LoggingTransaction) -> Dict[str, int]: sql = """ SELECT COALESCE(appservice_id, 'native'), COUNT(*) FROM monthly_active_users @@ -93,7 +95,7 @@ def _count_users_by_service(txn): """ txn.execute(sql) - result = txn.fetchall() + result = cast(List[Tuple[str, int]], txn.fetchall()) return dict(result) return await self.db_pool.runInteraction( @@ -141,12 +143,12 @@ async def user_last_seen_monthly_active(self, user_id: str) -> Optional[int]: ) @wrap_as_background_process("reap_monthly_active_users") - async def reap_monthly_active_users(self): + async def reap_monthly_active_users(self) -> None: """Cleans out monthly active user table to ensure that no stale entries exist. """ - def _reap_users(txn, reserved_users): + def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None: """ Args: reserved_users (tuple): reserved users to preserve @@ -210,10 +212,10 @@ def _reap_users(txn, reserved_users): # is racy. # Have resolved to invalidate the whole cache for now and do # something about it if and when the perf becomes significant - self._invalidate_all_cache_and_stream( + self._invalidate_all_cache_and_stream( # type: ignore[attr-defined] txn, self.user_last_seen_monthly_active ) - self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) + self._invalidate_cache_and_stream(txn, self.get_monthly_active_count, ()) # type: ignore[attr-defined] reserved_users = await self.get_registered_reserved_users() await self.db_pool.runInteraction( @@ -221,7 +223,7 @@ def _reap_users(txn, reserved_users): ) -class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): +class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore, RegistrationWorkerStore): def __init__( self, database: DatabasePool, @@ -242,13 +244,15 @@ def __init__( hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value], ) - def _initialise_reserved_users(self, txn, threepids): + def _initialise_reserved_users( + self, txn: LoggingTransaction, threepids: List[dict] + ) -> None: """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. Args: - txn (cursor): - threepids (list[dict]): List of threepid dicts to reserve + txn: + threepids: List of threepid dicts to reserve """ # XXX what is this function trying to achieve? It upserts into @@ -299,7 +303,9 @@ async def upsert_monthly_active_user(self, user_id: str) -> None: "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) - def upsert_monthly_active_user_txn(self, txn, user_id): + def upsert_monthly_active_user_txn( + self, txn: LoggingTransaction, user_id: str + ) -> None: """Updates or inserts monthly active user member We consciously do not call is_support_txn from this method because it @@ -336,7 +342,7 @@ def upsert_monthly_active_user_txn(self, txn, user_id): txn, self.user_last_seen_monthly_active, (user_id,) ) - async def populate_monthly_active_users(self, user_id): + async def populate_monthly_active_users(self, user_id: str) -> None: """Checks on the state of monthly active user limits and optionally add the user to the monthly active tables @@ -345,7 +351,7 @@ async def populate_monthly_active_users(self, user_id): """ if self._limit_usage_by_mau or self._mau_stats_only: # Trial users and guests should not be included as part of MAU group - is_guest = await self.is_guest(user_id) + is_guest = await self.is_guest(user_id) # type: ignore[attr-defined] if is_guest: return is_trial = await self.is_trial_user(user_id) From 2177e356bc4b62e7a84c6fbee1d48de1abc940b5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Mar 2022 12:51:27 -0400 Subject: [PATCH 419/474] Sync more worker regexes in the documentation. (#12243) --- changelog.d/12243.doc | 1 + docs/workers.md | 30 ++++++++++++++---------------- 2 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 changelog.d/12243.doc diff --git a/changelog.d/12243.doc b/changelog.d/12243.doc new file mode 100644 index 000000000000..b2031f0a4039 --- /dev/null +++ b/changelog.d/12243.doc @@ -0,0 +1 @@ +Remove incorrect prefixes in the worker documentation for some endpoints. diff --git a/docs/workers.md b/docs/workers.md index 8751134e654d..9eb4194e4dcd 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -185,8 +185,8 @@ worker: refer to the [stream writers](#stream-writers) section below for further information. # Sync requests - ^/_matrix/client/(v2_alpha|r0|v3)/sync$ - ^/_matrix/client/(api/v1|v2_alpha|r0|v3)/events$ + ^/_matrix/client/(r0|v3)/sync$ + ^/_matrix/client/(api/v1|r0|v3)/events$ ^/_matrix/client/(api/v1|r0|v3)/initialSync$ ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$ @@ -200,13 +200,9 @@ information. ^/_matrix/federation/v1/query/ ^/_matrix/federation/v1/make_join/ ^/_matrix/federation/v1/make_leave/ - ^/_matrix/federation/v1/send_join/ - ^/_matrix/federation/v2/send_join/ - ^/_matrix/federation/v1/send_leave/ - ^/_matrix/federation/v2/send_leave/ - ^/_matrix/federation/v1/invite/ - ^/_matrix/federation/v2/invite/ - ^/_matrix/federation/v1/query_auth/ + ^/_matrix/federation/(v1|v2)/send_join/ + ^/_matrix/federation/(v1|v2)/send_leave/ + ^/_matrix/federation/(v1|v2)/invite/ ^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/exchange_third_party_invite/ ^/_matrix/federation/v1/user/devices/ @@ -274,6 +270,8 @@ information. Additionally, the following REST endpoints can be handled for GET requests: ^/_matrix/federation/v1/groups/ + ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ + ^/_matrix/client/(r0|v3|unstable)/groups/ Pagination requests can also be handled, but all requests for a given room must be routed to the same instance. Additionally, care must be taken to @@ -397,23 +395,23 @@ the stream writer for the `typing` stream: The following endpoints should be routed directly to the worker configured as the stream writer for the `to_device` stream: - ^/_matrix/client/(api/v1|r0|v3|unstable)/sendToDevice/ + ^/_matrix/client/(r0|v3|unstable)/sendToDevice/ ##### The `account_data` stream The following endpoints should be routed directly to the worker configured as the stream writer for the `account_data` stream: - ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/tags - ^/_matrix/client/(api/v1|r0|v3|unstable)/.*/account_data + ^/_matrix/client/(r0|v3|unstable)/.*/tags + ^/_matrix/client/(r0|v3|unstable)/.*/account_data ##### The `receipts` stream The following endpoints should be routed directly to the worker configured as the stream writer for the `receipts` stream: - ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/receipt - ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/read_markers + ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt + ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers ##### The `presence` stream @@ -528,7 +526,7 @@ Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for Handles searches in the user directory. It can handle REST endpoints matching the following regular expressions: - ^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$ + ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ When using this worker you must also set `update_user_directory: False` in the shared configuration file to stop the main synapse running background @@ -540,7 +538,7 @@ Proxies some frequently-requested client endpoints to add caching and remove load from the main synapse. It can handle REST endpoints matching the following regular expressions: - ^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload + ^/_matrix/client/(r0|v3|unstable)/keys/upload If `use_presence` is False in the homeserver config, it can also handle REST endpoints matching the following regular expressions: From 80e0e1f35e6b1cdfa0267f9c40a6f212b7d774de Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Mar 2022 13:15:45 -0400 Subject: [PATCH 420/474] Only fetch thread participation for events with threads. (#12228) We fetch the thread summary in two phases: 1. The summary that is shared by all users (count of messages and latest event). 2. Whether the requesting user has participated in the thread. There's no use in attempting step 2 for events which did not return a summary from step 1. --- changelog.d/12228.bugfix | 1 + synapse/storage/databases/main/relations.py | 4 +- tests/rest/client/test_relations.py | 509 +++++++++++--------- tests/server.py | 20 +- 4 files changed, 289 insertions(+), 245 deletions(-) create mode 100644 changelog.d/12228.bugfix diff --git a/changelog.d/12228.bugfix b/changelog.d/12228.bugfix new file mode 100644 index 000000000000..47557771399e --- /dev/null +++ b/changelog.d/12228.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.53.0 where an unnecessary query could be performed when fetching bundled aggregations for threads. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index c4869d64e663..af2334a65ea0 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -857,7 +857,9 @@ async def get_bundled_aggregations( summaries = await self._get_thread_summaries(events_by_id.keys()) # Only fetch participated for a limited selection based on what had # summaries. - participated = await self._get_threads_participated(summaries.keys(), user_id) + participated = await self._get_threads_participated( + [event_id for event_id, summary in summaries.items() if summary], user_id + ) for event_id, summary in summaries.items(): if summary: thread_count, latest_thread_event, edit = summary diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index f3741b300185..329690f8f7c8 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -15,7 +15,7 @@ import itertools import urllib.parse -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Callable, Dict, List, Optional, Tuple from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor @@ -155,6 +155,16 @@ def _get_aggregations(self) -> List[JsonDict]: self.assertEqual(200, channel.code, channel.json_body) return channel.json_body["chunk"] + def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: + """ + Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. + """ + for event in events: + if event["event_id"] == self.parent_id: + return event + + raise AssertionError(f"Event {self.parent_id} not found in chunk") + class RelationsTestCase(BaseRelationsTestCase): def test_send_relation(self) -> None: @@ -291,202 +301,6 @@ def test_aggregation_must_be_annotation(self) -> None: ) self.assertEqual(400, channel.code, channel.json_body) - @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) - def test_bundled_aggregations(self) -> None: - """ - Test that annotations, references, and threads get correctly bundled. - - Note that this doesn't test against /relations since only thread relations - get bundled via that API. See test_aggregation_get_event_for_thread. - - See test_edit for a similar test for edits. - """ - # Setup by sending a variety of relations. - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - reply_1 = channel.json_body["event_id"] - - channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") - reply_2 = channel.json_body["event_id"] - - self._send_relation(RelationTypes.THREAD, "m.room.test") - - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - thread_2 = channel.json_body["event_id"] - - def assert_bundle(event_json: JsonDict) -> None: - """Assert the expected values of the bundled aggregations.""" - relations_dict = event_json["unsigned"].get("m.relations") - - # Ensure the fields are as expected. - self.assertCountEqual( - relations_dict.keys(), - ( - RelationTypes.ANNOTATION, - RelationTypes.REFERENCE, - RelationTypes.THREAD, - ), - ) - - # Check the values of each field. - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - relations_dict[RelationTypes.ANNOTATION], - ) - - self.assertEqual( - {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, - relations_dict[RelationTypes.REFERENCE], - ) - - self.assertEqual( - 2, - relations_dict[RelationTypes.THREAD].get("count"), - ) - self.assertTrue( - relations_dict[RelationTypes.THREAD].get("current_user_participated") - ) - # The latest thread event has some fields that don't matter. - self.assert_dict( - { - "content": { - "m.relates_to": { - "event_id": self.parent_id, - "rel_type": RelationTypes.THREAD, - } - }, - "event_id": thread_2, - "sender": self.user_id, - "type": "m.room.test", - }, - relations_dict[RelationTypes.THREAD].get("latest_event"), - ) - - # Request the event directly. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body) - - # Request the room messages. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/messages?dir=b", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) - - # Request the room context. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]) - - # Request sync. - channel = self.make_request("GET", "/sync", access_token=self.user_token) - self.assertEqual(200, channel.code, channel.json_body) - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - self.assertTrue(room_timeline["limited"]) - assert_bundle(self._find_event_in_chunk(room_timeline["events"])) - - # Request search. - channel = self.make_request( - "POST", - "/search", - # Search term matches the parent message. - content={"search_categories": {"room_events": {"search_term": "Hi"}}}, - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - chunk = [ - result["result"] - for result in channel.json_body["search_categories"]["room_events"][ - "results" - ] - ] - assert_bundle(self._find_event_in_chunk(chunk)) - - def test_aggregation_get_event_for_annotation(self) -> None: - """Test that annotations do not get bundled aggregations included - when directly requested. - """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - annotation_id = channel.json_body["event_id"] - - # Annotate the annotation. - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id - ) - - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{annotation_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertIsNone(channel.json_body["unsigned"].get("m.relations")) - - def test_aggregation_get_event_for_thread(self) -> None: - """Test that threads get bundled aggregations included when directly requested.""" - channel = self._send_relation(RelationTypes.THREAD, "m.room.test") - thread_id = channel.json_body["event_id"] - - # Annotate the annotation. - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id - ) - - channel = self.make_request( - "GET", - f"/rooms/{self.room}/event/{thread_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["unsigned"].get("m.relations"), - { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, - }, - ) - - # It should also be included when the entire thread is requested. - channel = self.make_request( - "GET", - f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(len(channel.json_body["chunk"]), 1) - - thread_message = channel.json_body["chunk"][0] - self.assertEqual( - thread_message["unsigned"].get("m.relations"), - { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, - }, - ) - def test_ignore_invalid_room(self) -> None: """Test that we ignore invalid relations over federation.""" # Create another room and send a message in it. @@ -796,7 +610,7 @@ def test_edit_thread(self) -> None: threaded_event_id = channel.json_body["event_id"] new_body = {"msgtype": "m.text", "body": "I've been edited!"} - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, @@ -836,7 +650,7 @@ def test_edit_edit(self) -> None: edit_event_id = channel.json_body["event_id"] # Edit the edit event. - channel = self._send_relation( + self._send_relation( RelationTypes.REPLACE, "m.room.message", content={ @@ -912,16 +726,6 @@ def test_unknown_relations(self) -> None: self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["chunk"], []) - def _find_event_in_chunk(self, events: List[JsonDict]) -> JsonDict: - """ - Find the parent event in a chunk of events and assert that it has the proper bundled aggregations. - """ - for event in events: - if event["event_id"] == self.parent_id: - return event - - raise AssertionError(f"Event {self.parent_id} not found in chunk") - def test_background_update(self) -> None: """Test the event_arbitrary_relations background update.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") @@ -981,34 +785,6 @@ def test_background_update(self) -> None: [annotation_event_id_good, thread_event_id], ) - def test_bundled_aggregations_with_filter(self) -> None: - """ - If "unsigned" is an omitted field (due to filtering), adding the bundled - aggregations should not break. - - Note that the spec allows for a server to return additional fields beyond - what is specified. - """ - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - - # Note that the sync filter does not include "unsigned" as a field. - filter = urllib.parse.quote_plus( - b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' - ) - channel = self.make_request( - "GET", f"/sync?filter={filter}", access_token=self.user_token - ) - self.assertEqual(200, channel.code, channel.json_body) - - # Ensure the timeline is limited, find the parent event. - room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] - self.assertTrue(room_timeline["limited"]) - parent_event = self._find_event_in_chunk(room_timeline["events"]) - - # Ensure there's bundled aggregations on it. - self.assertIn("unsigned", parent_event) - self.assertIn("m.relations", parent_event["unsigned"]) - class RelationPaginationTestCase(BaseRelationsTestCase): def test_basic_paginate_relations(self) -> None: @@ -1255,7 +1031,7 @@ def test_aggregation_pagination_within_group(self) -> None: idx += 1 # Also send a different type of reaction so that we test we don't see it - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") prev_token = "" found_event_ids: List[str] = [] @@ -1291,6 +1067,263 @@ def test_aggregation_pagination_within_group(self) -> None: self.assertEqual(found_event_ids, expected_event_ids) +class BundledAggregationsTestCase(BaseRelationsTestCase): + """ + See RelationsTestCase.test_edit for a similar test for edits. + + Note that this doesn't test against /relations since only thread relations + get bundled via that API. See test_aggregation_get_event_for_thread. + """ + + def _test_bundled_aggregations( + self, + relation_type: str, + assertion_callable: Callable[[JsonDict], None], + expected_db_txn_for_event: int, + ) -> None: + """ + Makes requests to various endpoints which should include bundled aggregations + and then calls an assertion function on the bundled aggregations. + + Args: + relation_type: The field to search for in the `m.relations` field in unsigned. + assertion_callable: Called with the contents of unsigned["m.relations"][relation_type] + for relation-specific assertions. + expected_db_txn_for_event: The number of database transactions which + are expected for a call to /event/. + """ + + def assert_bundle(event_json: JsonDict) -> None: + """Assert the expected values of the bundled aggregations.""" + relations_dict = event_json["unsigned"].get("m.relations") + + # Ensure the fields are as expected. + self.assertCountEqual(relations_dict.keys(), (relation_type,)) + assertion_callable(relations_dict[relation_type]) + + # Request the event directly. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body) + assert channel.resource_usage is not None + self.assertEqual(channel.resource_usage.db_txn_count, expected_db_txn_for_event) + + # Request the room messages. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/messages?dir=b", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) + + # Request the room context. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + assert_bundle(channel.json_body["event"]) + + # Request sync. + filter = urllib.parse.quote_plus(b'{"room": {"timeline": {"limit": 4}}}') + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + assert_bundle(self._find_event_in_chunk(room_timeline["events"])) + + # Request search. + channel = self.make_request( + "POST", + "/search", + # Search term matches the parent message. + content={"search_categories": {"room_events": {"search_term": "Hi"}}}, + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + chunk = [ + result["result"] + for result in channel.json_body["search_categories"]["room_events"][ + "results" + ] + ] + assert_bundle(self._find_event_in_chunk(chunk)) + + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_annotation(self) -> None: + """ + Test that annotations get correctly bundled. + """ + # Setup by sending a variety of relations. + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + + def assert_annotations(bundled_aggregations: JsonDict) -> None: + self.assertEqual( + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + bundled_aggregations, + ) + + self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7) + + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_reference(self) -> None: + """ + Test that references get correctly bundled. + """ + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_1 = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") + reply_2 = channel.json_body["event_id"] + + def assert_annotations(bundled_aggregations: JsonDict) -> None: + self.assertEqual( + {"chunk": [{"event_id": reply_1}, {"event_id": reply_2}]}, + bundled_aggregations, + ) + + self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7) + + @unittest.override_config({"experimental_features": {"msc3666_enabled": True}}) + def test_thread(self) -> None: + """ + Test that threads get correctly bundled. + """ + self._send_relation(RelationTypes.THREAD, "m.room.test") + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_2 = channel.json_body["event_id"] + + def assert_annotations(bundled_aggregations: JsonDict) -> None: + self.assertEqual(2, bundled_aggregations.get("count")) + self.assertTrue(bundled_aggregations.get("current_user_participated")) + # The latest thread event has some fields that don't matter. + self.assert_dict( + { + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "rel_type": RelationTypes.THREAD, + } + }, + "event_id": thread_2, + "sender": self.user_id, + "type": "m.room.test", + }, + bundled_aggregations.get("latest_event"), + ) + + self._test_bundled_aggregations(RelationTypes.THREAD, assert_annotations, 9) + + def test_aggregation_get_event_for_annotation(self) -> None: + """Test that annotations do not get bundled aggregations included + when directly requested. + """ + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + annotation_id = channel.json_body["event_id"] + + # Annotate the annotation. + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=annotation_id + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{annotation_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertIsNone(channel.json_body["unsigned"].get("m.relations")) + + def test_aggregation_get_event_for_thread(self) -> None: + """Test that threads get bundled aggregations included when directly requested.""" + channel = self._send_relation(RelationTypes.THREAD, "m.room.test") + thread_id = channel.json_body["event_id"] + + # Annotate the annotation. + self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id + ) + + channel = self.make_request( + "GET", + f"/rooms/{self.room}/event/{thread_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( + channel.json_body["unsigned"].get("m.relations"), + { + RelationTypes.ANNOTATION: { + "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] + }, + }, + ) + + # It should also be included when the entire thread is requested. + channel = self.make_request( + "GET", + f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual(len(channel.json_body["chunk"]), 1) + + thread_message = channel.json_body["chunk"][0] + self.assertEqual( + thread_message["unsigned"].get("m.relations"), + { + RelationTypes.ANNOTATION: { + "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] + }, + }, + ) + + def test_bundled_aggregations_with_filter(self) -> None: + """ + If "unsigned" is an omitted field (due to filtering), adding the bundled + aggregations should not break. + + Note that the spec allows for a server to return additional fields beyond + what is specified. + """ + self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + + # Note that the sync filter does not include "unsigned" as a field. + filter = urllib.parse.quote_plus( + b'{"event_fields": ["content", "event_id"], "room": {"timeline": {"limit": 3}}}' + ) + channel = self.make_request( + "GET", f"/sync?filter={filter}", access_token=self.user_token + ) + self.assertEqual(200, channel.code, channel.json_body) + + # Ensure the timeline is limited, find the parent event. + room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] + self.assertTrue(room_timeline["limited"]) + parent_event = self._find_event_in_chunk(room_timeline["events"]) + + # Ensure there's bundled aggregations on it. + self.assertIn("unsigned", parent_event) + self.assertIn("m.relations", parent_event["unsigned"]) + + class RelationRedactionTestCase(BaseRelationsTestCase): """ Test the behaviour of relations when the parent or child event is redacted. diff --git a/tests/server.py b/tests/server.py index 82990c2eb9df..6ce2a17bf442 100644 --- a/tests/server.py +++ b/tests/server.py @@ -54,13 +54,18 @@ ITransport, ) from twisted.python.failure import Failure -from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock +from twisted.test.proto_helpers import ( + AccumulatingProtocol, + MemoryReactor, + MemoryReactorClock, +) from twisted.web.http_headers import Headers from twisted.web.resource import IResource from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig from synapse.http.site import SynapseRequest +from synapse.logging.context import ContextResourceUsage from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import PostgresEngine, create_engine @@ -88,18 +93,19 @@ class TimedOutException(Exception): """ -@attr.s +@attr.s(auto_attribs=True) class FakeChannel: """ A fake Twisted Web Channel (the part that interfaces with the wire). """ - site = attr.ib(type=Union[Site, "FakeSite"]) - _reactor = attr.ib() - result = attr.ib(type=dict, default=attr.Factory(dict)) - _ip = attr.ib(type=str, default="127.0.0.1") + site: Union[Site, "FakeSite"] + _reactor: MemoryReactor + result: dict = attr.Factory(dict) + _ip: str = "127.0.0.1" _producer: Optional[Union[IPullProducer, IPushProducer]] = None + resource_usage: Optional[ContextResourceUsage] = None @property def json_body(self): @@ -168,6 +174,8 @@ def unregisterProducer(self): def requestDone(self, _self): self.result["done"] = True + if isinstance(_self, SynapseRequest): + self.resource_usage = _self.logcontext.get_resource_usage() def getPeer(self): # We give an address so that getClientIP returns a non null entry, From 8fe930c215f69913fbcd96d609ec6950644e4ec4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Mar 2022 13:49:32 -0400 Subject: [PATCH 421/474] Move get_bundled_aggregations to relations handler. (#12237) The get_bundled_aggregations code is fairly high-level and uses a lot of store methods, we move it into the handler as that seems like a better fit. --- changelog.d/12237.misc | 1 + synapse/events/utils.py | 2 +- synapse/handlers/pagination.py | 5 +- synapse/handlers/relations.py | 151 +++++++++++++++++++- synapse/handlers/room.py | 5 +- synapse/handlers/search.py | 3 +- synapse/handlers/sync.py | 9 +- synapse/rest/client/room.py | 3 +- synapse/storage/databases/main/relations.py | 151 +------------------- 9 files changed, 173 insertions(+), 157 deletions(-) create mode 100644 changelog.d/12237.misc diff --git a/changelog.d/12237.misc b/changelog.d/12237.misc new file mode 100644 index 000000000000..41c9dcbd37f6 --- /dev/null +++ b/changelog.d/12237.misc @@ -0,0 +1 @@ +Refactor the relations endpoints to add a `RelationsHandler`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a0520068e0f9..71200621277e 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -38,8 +38,8 @@ from . import EventBase if TYPE_CHECKING: + from synapse.handlers.relations import BundledAggregations from synapse.server import HomeServer - from synapse.storage.databases.main.relations import BundledAggregations # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 41679f7f866b..876b879483e7 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -134,6 +134,7 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self._server_name = hs.hostname self._room_shutdown_handler = hs.get_room_shutdown_handler() + self._relations_handler = hs.get_relations_handler() self.pagination_lock = ReadWriteLock() # IDs of rooms in which there currently an active purge *or delete* operation. @@ -539,7 +540,9 @@ async def get_messages( state_dict = await self.store.get_events(list(state_ids.values())) state = state_dict.values() - aggregations = await self.store.get_bundled_aggregations(events, user_id) + aggregations = await self._relations_handler.get_bundled_aggregations( + events, user_id + ) time_now = self.clock.time_msec() diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 8e475475ad02..57135d45197b 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -12,18 +12,53 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Dict, Iterable, Optional, cast +import attr +from frozendict import frozendict + +from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError +from synapse.events import EventBase from synapse.types import JsonDict, Requester, StreamToken if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _ThreadAggregation: + # The latest event in the thread. + latest_event: EventBase + # The latest edit to the latest event in the thread. + latest_edit: Optional[EventBase] + # The total number of events in the thread. + count: int + # True if the current user has sent an event to the thread. + current_user_participated: bool + + +@attr.s(slots=True, auto_attribs=True) +class BundledAggregations: + """ + The bundled aggregations for an event. + + Some values require additional processing during serialization. + """ + + annotations: Optional[JsonDict] = None + references: Optional[JsonDict] = None + replace: Optional[EventBase] = None + thread: Optional[_ThreadAggregation] = None + + def __bool__(self) -> bool: + return bool(self.annotations or self.references or self.replace or self.thread) + + class RelationsHandler: def __init__(self, hs: "HomeServer"): self._main_store = hs.get_datastores().main @@ -103,7 +138,7 @@ async def get_relations( ) # The relations returned for the requested event do include their # bundled aggregations. - aggregations = await self._main_store.get_bundled_aggregations( + aggregations = await self.get_bundled_aggregations( events, requester.user.to_string() ) serialized_events = self._event_serializer.serialize_events( @@ -115,3 +150,115 @@ async def get_relations( return_value["original_event"] = original_event return return_value + + async def _get_bundled_aggregation_for_event( + self, event: EventBase, user_id: str + ) -> Optional[BundledAggregations]: + """Generate bundled aggregations for an event. + + Note that this does not use a cache, but depends on cached methods. + + Args: + event: The event to calculate bundled aggregations for. + user_id: The user requesting the bundled aggregations. + + Returns: + The bundled aggregations for an event, if bundled aggregations are + enabled and the event can have bundled aggregations. + """ + + # Do not bundle aggregations for an event which represents an edit or an + # annotation. It does not make sense for them to have related events. + relates_to = event.content.get("m.relates_to") + if isinstance(relates_to, (dict, frozendict)): + relation_type = relates_to.get("rel_type") + if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): + return None + + event_id = event.event_id + room_id = event.room_id + + # The bundled aggregations to include, a mapping of relation type to a + # type-specific value. Some types include the direct return type here + # while others need more processing during serialization. + aggregations = BundledAggregations() + + annotations = await self._main_store.get_aggregation_groups_for_event( + event_id, room_id + ) + if annotations.chunk: + aggregations.annotations = await annotations.to_dict( + cast("DataStore", self) + ) + + references = await self._main_store.get_relations_for_event( + event_id, event, room_id, RelationTypes.REFERENCE, direction="f" + ) + if references.chunk: + aggregations.references = await references.to_dict(cast("DataStore", self)) + + # Store the bundled aggregations in the event metadata for later use. + return aggregations + + async def get_bundled_aggregations( + self, events: Iterable[EventBase], user_id: str + ) -> Dict[str, BundledAggregations]: + """Generate bundled aggregations for events. + + Args: + events: The iterable of events to calculate bundled aggregations for. + user_id: The user requesting the bundled aggregations. + + Returns: + A map of event ID to the bundled aggregation for the event. Not all + events may have bundled aggregations in the results. + """ + # De-duplicate events by ID to handle the same event requested multiple times. + # + # State events do not get bundled aggregations. + events_by_id = { + event.event_id: event for event in events if not event.is_state() + } + + # event ID -> bundled aggregation in non-serialized form. + results: Dict[str, BundledAggregations] = {} + + # Fetch other relations per event. + for event in events_by_id.values(): + event_result = await self._get_bundled_aggregation_for_event(event, user_id) + if event_result: + results[event.event_id] = event_result + + # Fetch any edits (but not for redacted events). + edits = await self._main_store.get_applicable_edits( + [ + event_id + for event_id, event in events_by_id.items() + if not event.internal_metadata.is_redacted() + ] + ) + for event_id, edit in edits.items(): + results.setdefault(event_id, BundledAggregations()).replace = edit + + # Fetch thread summaries. + summaries = await self._main_store.get_thread_summaries(events_by_id.keys()) + # Only fetch participated for a limited selection based on what had + # summaries. + participated = await self._main_store.get_threads_participated( + [event_id for event_id, summary in summaries.items() if summary], user_id + ) + for event_id, summary in summaries.items(): + if summary: + thread_count, latest_thread_event, edit = summary + results.setdefault( + event_id, BundledAggregations() + ).thread = _ThreadAggregation( + latest_event=latest_thread_event, + latest_edit=edit, + count=thread_count, + # If there's a thread summary it must also exist in the + # participated dictionary. + current_user_participated=participated[event_id], + ) + + return results diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b9735631fcd3..092e185c9950 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -60,8 +60,8 @@ from synapse.events.utils import copy_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state +from synapse.handlers.relations import BundledAggregations from synapse.rest.admin._base import assert_user_is_admin -from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.state import StateFilter from synapse.streams import EventSource from synapse.types import ( @@ -1118,6 +1118,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.storage = hs.get_storage() self.state_store = self.storage.state + self._relations_handler = hs.get_relations_handler() async def get_event_context( self, @@ -1190,7 +1191,7 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: event = filtered[0] # Fetch the aggregations. - aggregations = await self.store.get_bundled_aggregations( + aggregations = await self._relations_handler.get_bundled_aggregations( itertools.chain(events_before, (event,), events_after), user.to_string(), ) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index aa16e417eb30..30eddda65fc2 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -54,6 +54,7 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.hs = hs self._event_serializer = hs.get_event_client_serializer() + self._relations_handler = hs.get_relations_handler() self.storage = hs.get_storage() self.state_store = self.storage.state self.auth = hs.get_auth() @@ -354,7 +355,7 @@ async def _search( aggregations = None if self._msc3666_enabled: - aggregations = await self.store.get_bundled_aggregations( + aggregations = await self._relations_handler.get_bundled_aggregations( # Generate an iterable of EventBase for all the events that will be # returned, including contextual events. itertools.chain( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c9d6a18bd700..6c569cfb1c88 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -33,11 +33,11 @@ from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase +from synapse.handlers.relations import BundledAggregations from synapse.logging.context import current_context from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import NotifCounts -from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( @@ -269,6 +269,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.notifier = hs.get_notifier() self.presence_handler = hs.get_presence_handler() + self._relations_handler = hs.get_relations_handler() self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -638,8 +639,10 @@ async def _load_filtered_recents( # as clients will have all the necessary information. bundled_aggregations = None if limited or newly_joined_room: - bundled_aggregations = await self.store.get_bundled_aggregations( - recents, sync_config.user.to_string() + bundled_aggregations = ( + await self._relations_handler.get_bundled_aggregations( + recents, sync_config.user.to_string() + ) ) return TimelineBatch( diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 8a06ab8c5f05..47e152c8cc7a 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -645,6 +645,7 @@ def __init__(self, hs: "HomeServer"): self._store = hs.get_datastores().main self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() + self._relations_handler = hs.get_relations_handler() self.auth = hs.get_auth() async def on_GET( @@ -663,7 +664,7 @@ async def on_GET( if event: # Ensure there are bundled aggregations available. - aggregations = await self._store.get_bundled_aggregations( + aggregations = await self._relations_handler.get_bundled_aggregations( [event], requester.user.to_string() ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index af2334a65ea0..b2295fd51f60 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -27,7 +27,6 @@ ) import attr -from frozendict import frozendict from synapse.api.constants import RelationTypes from synapse.events import EventBase @@ -41,45 +40,15 @@ from synapse.storage.databases.main.stream import generate_pagination_where_clause from synapse.storage.engines import PostgresEngine from synapse.storage.relations import AggregationPaginationToken, PaginationChunk -from synapse.types import JsonDict, RoomStreamToken, StreamToken +from synapse.types import RoomStreamToken, StreamToken from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: from synapse.server import HomeServer - from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) -@attr.s(slots=True, frozen=True, auto_attribs=True) -class _ThreadAggregation: - # The latest event in the thread. - latest_event: EventBase - # The latest edit to the latest event in the thread. - latest_edit: Optional[EventBase] - # The total number of events in the thread. - count: int - # True if the current user has sent an event to the thread. - current_user_participated: bool - - -@attr.s(slots=True, auto_attribs=True) -class BundledAggregations: - """ - The bundled aggregations for an event. - - Some values require additional processing during serialization. - """ - - annotations: Optional[JsonDict] = None - references: Optional[JsonDict] = None - replace: Optional[EventBase] = None - thread: Optional[_ThreadAggregation] = None - - def __bool__(self) -> bool: - return bool(self.annotations or self.references or self.replace or self.thread) - - class RelationsWorkerStore(SQLBaseStore): def __init__( self, @@ -384,7 +353,7 @@ def get_applicable_edit(self, event_id: str) -> Optional[EventBase]: raise NotImplementedError() @cachedList(cached_method_name="get_applicable_edit", list_name="event_ids") - async def _get_applicable_edits( + async def get_applicable_edits( self, event_ids: Collection[str] ) -> Dict[str, Optional[EventBase]]: """Get the most recent edit (if any) that has happened for the given @@ -473,7 +442,7 @@ def get_thread_summary(self, event_id: str) -> Optional[Tuple[int, EventBase]]: raise NotImplementedError() @cachedList(cached_method_name="get_thread_summary", list_name="event_ids") - async def _get_thread_summaries( + async def get_thread_summaries( self, event_ids: Collection[str] ) -> Dict[str, Optional[Tuple[int, EventBase, Optional[EventBase]]]]: """Get the number of threaded replies, the latest reply (if any), and the latest edit for that reply for the given event. @@ -587,7 +556,7 @@ def _get_thread_summaries_txn( latest_events = await self.get_events(latest_event_ids.values()) # type: ignore[attr-defined] # Check to see if any of those events are edited. - latest_edits = await self._get_applicable_edits(latest_event_ids.values()) + latest_edits = await self.get_applicable_edits(latest_event_ids.values()) # Map to the event IDs to the thread summary. # @@ -610,7 +579,7 @@ def get_thread_participated(self, event_id: str, user_id: str) -> bool: raise NotImplementedError() @cachedList(cached_method_name="get_thread_participated", list_name="event_ids") - async def _get_threads_participated( + async def get_threads_participated( self, event_ids: Collection[str], user_id: str ) -> Dict[str, bool]: """Get whether the requesting user participated in the given threads. @@ -766,116 +735,6 @@ def _get_if_user_has_annotated_event(txn: LoggingTransaction) -> bool: "get_if_user_has_annotated_event", _get_if_user_has_annotated_event ) - async def _get_bundled_aggregation_for_event( - self, event: EventBase, user_id: str - ) -> Optional[BundledAggregations]: - """Generate bundled aggregations for an event. - - Note that this does not use a cache, but depends on cached methods. - - Args: - event: The event to calculate bundled aggregations for. - user_id: The user requesting the bundled aggregations. - - Returns: - The bundled aggregations for an event, if bundled aggregations are - enabled and the event can have bundled aggregations. - """ - - # Do not bundle aggregations for an event which represents an edit or an - # annotation. It does not make sense for them to have related events. - relates_to = event.content.get("m.relates_to") - if isinstance(relates_to, (dict, frozendict)): - relation_type = relates_to.get("rel_type") - if relation_type in (RelationTypes.ANNOTATION, RelationTypes.REPLACE): - return None - - event_id = event.event_id - room_id = event.room_id - - # The bundled aggregations to include, a mapping of relation type to a - # type-specific value. Some types include the direct return type here - # while others need more processing during serialization. - aggregations = BundledAggregations() - - annotations = await self.get_aggregation_groups_for_event(event_id, room_id) - if annotations.chunk: - aggregations.annotations = await annotations.to_dict( - cast("DataStore", self) - ) - - references = await self.get_relations_for_event( - event_id, event, room_id, RelationTypes.REFERENCE, direction="f" - ) - if references.chunk: - aggregations.references = await references.to_dict(cast("DataStore", self)) - - # Store the bundled aggregations in the event metadata for later use. - return aggregations - - async def get_bundled_aggregations( - self, events: Iterable[EventBase], user_id: str - ) -> Dict[str, BundledAggregations]: - """Generate bundled aggregations for events. - - Args: - events: The iterable of events to calculate bundled aggregations for. - user_id: The user requesting the bundled aggregations. - - Returns: - A map of event ID to the bundled aggregation for the event. Not all - events may have bundled aggregations in the results. - """ - # De-duplicate events by ID to handle the same event requested multiple times. - # - # State events do not get bundled aggregations. - events_by_id = { - event.event_id: event for event in events if not event.is_state() - } - - # event ID -> bundled aggregation in non-serialized form. - results: Dict[str, BundledAggregations] = {} - - # Fetch other relations per event. - for event in events_by_id.values(): - event_result = await self._get_bundled_aggregation_for_event(event, user_id) - if event_result: - results[event.event_id] = event_result - - # Fetch any edits (but not for redacted events). - edits = await self._get_applicable_edits( - [ - event_id - for event_id, event in events_by_id.items() - if not event.internal_metadata.is_redacted() - ] - ) - for event_id, edit in edits.items(): - results.setdefault(event_id, BundledAggregations()).replace = edit - - # Fetch thread summaries. - summaries = await self._get_thread_summaries(events_by_id.keys()) - # Only fetch participated for a limited selection based on what had - # summaries. - participated = await self._get_threads_participated( - [event_id for event_id, summary in summaries.items() if summary], user_id - ) - for event_id, summary in summaries.items(): - if summary: - thread_count, latest_thread_event, edit = summary - results.setdefault( - event_id, BundledAggregations() - ).thread = _ThreadAggregation( - latest_event=latest_thread_event, - latest_edit=edit, - count=thread_count, - # If there's a thread summary it must also exist in the - # participated dictionary. - current_user_participated=participated[event_id], - ) - - return results - class RelationsStore(RelationsWorkerStore): pass From bf9d549e3ad944e1e53a2ecc898640d690bf1eac Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 18 Mar 2022 19:03:46 +0000 Subject: [PATCH 422/474] Try to detect borked package installations. (#12244) * Try to detect borked package installations. Fixes #12223. Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/12244.misc | 1 + synapse/util/check_dependencies.py | 24 +++++++++++++++++++++++- tests/util/test_check_dependencies.py | 15 ++++++++++++++- 3 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12244.misc diff --git a/changelog.d/12244.misc b/changelog.d/12244.misc new file mode 100644 index 000000000000..950d48e4c68e --- /dev/null +++ b/changelog.d/12244.misc @@ -0,0 +1 @@ +Improve error message when dependencies check finds a broken installation. \ No newline at end of file diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 12cd8049392f..66f1da750289 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -128,6 +128,19 @@ def _incorrect_version( ) +def _no_reported_version(requirement: Requirement, extra: Optional[str] = None) -> str: + if extra: + return ( + f"Synapse {VERSION} needs {requirement} for {extra}, " + f"but can't determine {requirement.name}'s version" + ) + else: + return ( + f"Synapse {VERSION} needs {requirement}, " + f"but can't determine {requirement.name}'s version" + ) + + def check_requirements(extra: Optional[str] = None) -> None: """Check Synapse's dependencies are present and correctly versioned. @@ -163,8 +176,17 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled.append(requirement.name) errors.append(_not_installed(requirement, extra)) else: + if dist.version is None: + # This shouldn't happen---it suggests a borked virtualenv. (See #12223) + # Try to give a vaguely helpful error message anyway. + # Type-ignore: the annotations don't reflect reality: see + # https://github.com/python/typeshed/issues/7513 + # https://bugs.python.org/issue47060 + deps_unfulfilled.append(requirement.name) # type: ignore[unreachable] + errors.append(_no_reported_version(requirement, extra)) + # We specify prereleases=True to allow prereleases such as RCs. - if not requirement.specifier.contains(dist.version, prereleases=True): + elif not requirement.specifier.contains(dist.version, prereleases=True): deps_unfulfilled.append(requirement.name) errors.append(_incorrect_version(requirement, dist.version, extra)) diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index 38e9f58ac6ca..5d1aa025d127 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -12,7 +12,7 @@ class DummyDistribution(metadata.Distribution): - def __init__(self, version: str): + def __init__(self, version: object): self._version = version @property @@ -30,6 +30,7 @@ def read_text(self, filename): old_release_candidate = DummyDistribution("0.1.2rc3") new = DummyDistribution("1.2.3") new_release_candidate = DummyDistribution("1.2.3rc4") +distribution_with_no_version = DummyDistribution(None) # could probably use stdlib TestCase --- no need for twisted here @@ -67,6 +68,18 @@ def test_mandatory_dependency(self) -> None: # should not raise check_requirements() + def test_version_reported_as_none(self) -> None: + """Complain if importlib.metadata.version() returns None. + + This shouldn't normally happen, but it was seen in the wild (#12223). + """ + with patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1"], + ): + with self.mock_installed_package(distribution_with_no_version): + self.assertRaises(DependencyException, check_requirements) + def test_checks_ignore_dev_dependencies(self) -> None: """Bot generic and per-extra checks should ignore dev dependencies.""" with patch( From afa17f0eabf06087d53697eafc748f7c935fb13f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 21 Mar 2022 11:23:32 +0000 Subject: [PATCH 423/474] Return a 404 from `/state` for an outlier (#12087) * Replace `get_state_for_pdu` with `get_state_ids_for_pdu` and `get_events_as_list`. * Return a 404 from `/state` and `/state_ids` for an outlier --- changelog.d/12087.bugfix | 1 + synapse/federation/federation_server.py | 7 ++- synapse/handlers/federation.py | 61 +++++++++---------------- 3 files changed, 25 insertions(+), 44 deletions(-) create mode 100644 changelog.d/12087.bugfix diff --git a/changelog.d/12087.bugfix b/changelog.d/12087.bugfix new file mode 100644 index 000000000000..6dacdddd0dcf --- /dev/null +++ b/changelog.d/12087.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which caused the `/_matrix/federation/v1/state` and `.../state_ids` endpoints to return incorrect or invalid data when called for an event which we have stored as an "outlier". diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 482bbdd86744..af2d0f7d7932 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -22,7 +22,6 @@ Callable, Collection, Dict, - Iterable, List, Optional, Tuple, @@ -577,10 +576,10 @@ async def _on_state_ids_request_compute( async def _on_context_state_request_compute( self, room_id: str, event_id: Optional[str] ) -> Dict[str, list]: + pdus: Collection[EventBase] if event_id: - pdus: Iterable[EventBase] = await self.handler.get_state_for_pdu( - room_id, event_id - ) + event_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id) + pdus = await self.store.get_events_as_list(event_ids) else: pdus = (await self.state.get_current_state(room_id)).values() diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index db39aeabded6..350ec9c03af1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -950,54 +950,35 @@ async def on_make_knock_request( return event - async def get_state_for_pdu(self, room_id: str, event_id: str) -> List[EventBase]: - """Returns the state at the event. i.e. not including said event.""" - - event = await self.store.get_event(event_id, check_room_id=room_id) - - state_groups = await self.state_store.get_state_groups(room_id, [event_id]) - - if state_groups: - _, state = list(state_groups.items()).pop() - results = {(e.type, e.state_key): e for e in state} - - if event.is_state(): - # Get previous state - if "replaces_state" in event.unsigned: - prev_id = event.unsigned["replaces_state"] - if prev_id != event.event_id: - prev_event = await self.store.get_event(prev_id) - results[(event.type, event.state_key)] = prev_event - else: - del results[(event.type, event.state_key)] - - res = list(results.values()) - return res - else: - return [] - async def get_state_ids_for_pdu(self, room_id: str, event_id: str) -> List[str]: """Returns the state at the event. i.e. not including said event.""" event = await self.store.get_event(event_id, check_room_id=room_id) + if event.internal_metadata.outlier: + raise NotFoundError("State not known at event %s" % (event_id,)) state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id]) - if state_groups: - _, state = list(state_groups.items()).pop() - results = state + # get_state_groups_ids should return exactly one result + assert len(state_groups) == 1 - if event.is_state(): - # Get previous state - if "replaces_state" in event.unsigned: - prev_id = event.unsigned["replaces_state"] - if prev_id != event.event_id: - results[(event.type, event.state_key)] = prev_id - else: - results.pop((event.type, event.state_key), None) + state_map = next(iter(state_groups.values())) - return list(results.values()) - else: - return [] + state_key = event.get_state_key() + if state_key is not None: + # the event was not rejected (get_event raises a NotFoundError for rejected + # events) so the state at the event should include the event itself. + assert ( + state_map.get((event.type, state_key)) == event.event_id + ), "State at event did not include event itself" + + # ... but we need the state *before* that event + if "replaces_state" in event.unsigned: + prev_id = event.unsigned["replaces_state"] + state_map[(event.type, state_key)] = prev_id + else: + del state_map[(event.type, state_key)] + + return list(state_map.values()) async def on_backfill_request( self, origin: str, room_id: str, pdu_list: List[str], limit: int From 1530cef19244e21d8b160bee2d925dcabbc0c4be Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Mon, 21 Mar 2022 11:52:10 +0000 Subject: [PATCH 424/474] Make it possible to enable compression for the metrics HTTP resource (#12258) * Make it possible to enable compression for the metrics HTTP resource This can provide significant bandwidth savings pulling metrics from synapse instances. * Add changelog file. * Fix type hint --- changelog.d/12258.misc | 1 + synapse/app/homeserver.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12258.misc diff --git a/changelog.d/12258.misc b/changelog.d/12258.misc new file mode 100644 index 000000000000..80024c8e91ee --- /dev/null +++ b/changelog.d/12258.misc @@ -0,0 +1 @@ +Compress metrics HTTP resource when enabled. Contributed by Nick @ Beeper. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index e4dc04c0b40f..ad2b7c9515d8 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -261,7 +261,10 @@ def _configure_named_resource( resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "metrics" and self.config.metrics.enable_metrics: - resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) + metrics_resource: Resource = MetricsResource(RegistryProxy) + if compress: + metrics_resource = gz_wrap(metrics_resource) + resources[METRICS_PREFIX] = metrics_resource if name == "replication": resources[REPLICATION_PREFIX] = ReplicationRestResource(self) From 6134b3079e954e15c5a92ad3b89050085197b851 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 21 Mar 2022 12:16:46 +0000 Subject: [PATCH 425/474] Reword 'Choose your user name' as 'Choose your account name' in the SSO registration template, in order to comply with SIWA guidelines. (#12260) * Reword as 'Choose your account name' * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/12260.misc | 1 + synapse/res/templates/sso_auth_account_details.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12260.misc diff --git a/changelog.d/12260.misc b/changelog.d/12260.misc new file mode 100644 index 000000000000..deacf034deae --- /dev/null +++ b/changelog.d/12260.misc @@ -0,0 +1 @@ +Reword 'Choose your user name' as 'Choose your account name' in the SSO registration template, in order to comply with SIWA guidelines. \ No newline at end of file diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 41315e4fd4da..b231aace01e6 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,7 +130,7 @@
-

Choose your user name

+

Choose your account name

This is required to create your account on {{ server_name }}, and you can't change this later.

From 9d21ecf7ceab55bc19c4457b8b07401b0b1623a7 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 21 Mar 2022 14:43:16 +0100 Subject: [PATCH 426/474] Add type hints to tests files. (#12256) --- changelog.d/12256.misc | 1 + mypy.ini | 2 - tests/handlers/test_typing.py | 35 ++++++----- tests/push/test_push_rule_evaluator.py | 23 +++---- tests/storage/test_background_update.py | 48 ++++++++------- tests/storage/test_id_generators.py | 80 +++++++++++++------------ 6 files changed, 101 insertions(+), 88 deletions(-) create mode 100644 changelog.d/12256.misc diff --git a/changelog.d/12256.misc b/changelog.d/12256.misc new file mode 100644 index 000000000000..c5b635679931 --- /dev/null +++ b/changelog.d/12256.misc @@ -0,0 +1 @@ +Add type hints to tests files. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index d8b3b3f9e588..24d4ba15d452 100644 --- a/mypy.ini +++ b/mypy.ini @@ -82,9 +82,7 @@ exclude = (?x) |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py |tests/state/test_v2.py - |tests/storage/test_background_update.py |tests/storage/test_base.py - |tests/storage/test_id_generators.py |tests/storage/test_roommember.py |tests/test_metrics.py |tests/test_phone_home.py diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index f91a80b9fa57..ffd5c4cb938c 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -18,11 +18,14 @@ from unittest.mock import ANY, Mock, call from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer -from synapse.types import UserID, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, UserID, create_requester +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable @@ -42,7 +45,9 @@ OTHER_ROOM_ID = "another-room" -def _expect_edu_transaction(edu_type, content, origin="test"): +def _expect_edu_transaction( + edu_type: str, content: JsonDict, origin: str = "test" +) -> JsonDict: return { "origin": origin, "origin_server_ts": 1000000, @@ -51,12 +56,12 @@ def _expect_edu_transaction(edu_type, content, origin="test"): } -def _make_edu_transaction_json(edu_type, content): +def _make_edu_transaction_json(edu_type: str, content: JsonDict) -> bytes: return json.dumps(_expect_edu_transaction(edu_type, content)).encode("utf8") class TypingNotificationsTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # we mock out the keyring so as to skip the authentication check on the # federation API call. mock_keyring = Mock(spec=["verify_json_for_server"]) @@ -83,7 +88,7 @@ def create_resource_dict(self) -> Dict[str, Resource]: d["/_matrix/federation"] = TransportLayerServer(self.hs) return d - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: mock_notifier = hs.get_notifier() self.on_new_event = mock_notifier.on_new_event @@ -111,24 +116,24 @@ def get_received_txn_response(*args): self.room_members = [] - async def check_user_in_room(room_id, user_id): + async def check_user_in_room(room_id: str, user_id: str) -> None: if user_id not in [u.to_string() for u in self.room_members]: raise AuthError(401, "User is not in the room") return None hs.get_auth().check_user_in_room = check_user_in_room - async def check_host_in_room(room_id, server_name): + async def check_host_in_room(room_id: str, server_name: str) -> bool: return room_id == ROOM_ID hs.get_event_auth_handler().check_host_in_room = check_host_in_room - def get_joined_hosts_for_room(room_id): + def get_joined_hosts_for_room(room_id: str): return {member.domain for member in self.room_members} self.datastore.get_joined_hosts_for_room = get_joined_hosts_for_room - async def get_users_in_room(room_id): + async def get_users_in_room(room_id: str): return {str(u) for u in self.room_members} self.datastore.get_users_in_room = get_users_in_room @@ -153,7 +158,7 @@ async def get_users_in_room(room_id): lambda *args, **kwargs: make_awaitable(None) ) - def test_started_typing_local(self): + def test_started_typing_local(self) -> None: self.room_members = [U_APPLE, U_BANANA] self.assertEqual(self.event_source.get_current_key(), 0) @@ -187,7 +192,7 @@ def test_started_typing_local(self): ) @override_config({"send_federation": True}) - def test_started_typing_remote_send(self): + def test_started_typing_remote_send(self) -> None: self.room_members = [U_APPLE, U_ONION] self.get_success( @@ -217,7 +222,7 @@ def test_started_typing_remote_send(self): try_trailing_slash_on_400=True, ) - def test_started_typing_remote_recv(self): + def test_started_typing_remote_recv(self) -> None: self.room_members = [U_APPLE, U_ONION] self.assertEqual(self.event_source.get_current_key(), 0) @@ -256,7 +261,7 @@ def test_started_typing_remote_recv(self): ], ) - def test_started_typing_remote_recv_not_in_room(self): + def test_started_typing_remote_recv_not_in_room(self) -> None: self.room_members = [U_APPLE, U_ONION] self.assertEqual(self.event_source.get_current_key(), 0) @@ -292,7 +297,7 @@ def test_started_typing_remote_recv_not_in_room(self): self.assertEqual(events[1], 0) @override_config({"send_federation": True}) - def test_stopped_typing(self): + def test_stopped_typing(self) -> None: self.room_members = [U_APPLE, U_BANANA, U_ONION] # Gut-wrenching @@ -343,7 +348,7 @@ def test_stopped_typing(self): [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}], ) - def test_typing_timeout(self): + def test_typing_timeout(self) -> None: self.room_members = [U_APPLE, U_BANANA] self.assertEqual(self.event_source.get_current_key(), 0) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 3849beb9d6d6..5dba1870762e 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Dict, Optional, Union import frozendict @@ -20,12 +20,13 @@ from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent +from synapse.types import JsonDict from tests import unittest class PushRuleEvaluatorTestCase(unittest.TestCase): - def _get_evaluator(self, content): + def _get_evaluator(self, content: JsonDict) -> PushRuleEvaluatorForEvent: event = FrozenEvent( { "event_id": "$event_id", @@ -39,12 +40,12 @@ def _get_evaluator(self, content): ) room_member_count = 0 sender_power_level = 0 - power_levels = {} + power_levels: Dict[str, Union[int, Dict[str, int]]] = {} return PushRuleEvaluatorForEvent( event, room_member_count, sender_power_level, power_levels ) - def test_display_name(self): + def test_display_name(self) -> None: """Check for a matching display name in the body of the event.""" evaluator = self._get_evaluator({"body": "foo bar baz"}) @@ -71,20 +72,20 @@ def test_display_name(self): self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) def _assert_matches( - self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None ) -> None: evaluator = self._get_evaluator(content) self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg) def _assert_not_matches( - self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None ) -> None: evaluator = self._get_evaluator(content) self.assertFalse( evaluator.matches(condition, "@user:test", "display_name"), msg ) - def test_event_match_body(self): + def test_event_match_body(self) -> None: """Check that event_match conditions on content.body work as expected""" # if the key is `content.body`, the pattern matches substrings. @@ -165,7 +166,7 @@ def test_event_match_body(self): r"? after \ should match any character", ) - def test_event_match_non_body(self): + def test_event_match_non_body(self) -> None: """Check that event_match conditions on other keys work as expected""" # if the key is anything other than 'content.body', the pattern must match the @@ -241,7 +242,7 @@ def test_event_match_non_body(self): "pattern should not match before a newline", ) - def test_no_body(self): + def test_no_body(self) -> None: """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) @@ -250,7 +251,7 @@ def test_no_body(self): } self.assertFalse(evaluator.matches(condition, "@user:test", "foo")) - def test_invalid_body(self): + def test_invalid_body(self) -> None: """A non-string body should not break the evaluator.""" condition = { "kind": "contains_display_name", @@ -260,7 +261,7 @@ def test_invalid_body(self): evaluator = self._get_evaluator({"body": body}) self.assertFalse(evaluator.matches(condition, "@user:test", "foo")) - def test_tweaks_for_actions(self): + def test_tweaks_for_actions(self) -> None: """ This tests the behaviour of tweaks_for_actions. """ diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 5cf18b690e48..fd619b64d4dd 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -17,8 +17,12 @@ import yaml from twisted.internet.defer import Deferred, ensureDeferred +from twisted.test.proto_helpers import MemoryReactor +from synapse.server import HomeServer from synapse.storage.background_updates import BackgroundUpdater +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable, simple_async_mock @@ -26,7 +30,7 @@ class BackgroundUpdateTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates # the base test class should have run the real bg updates for us self.assertTrue( @@ -39,7 +43,7 @@ def prepare(self, reactor, clock, homeserver): ) self.store = self.hs.get_datastores().main - async def update(self, progress, count): + async def update(self, progress: JsonDict, count: int) -> int: duration_ms = 10 await self.clock.sleep((count * duration_ms) / 1000) progress = {"my_key": progress["my_key"] + 1} @@ -51,7 +55,7 @@ async def update(self, progress, count): ) return count - def test_do_background_update(self): + def test_do_background_update(self) -> None: # the time we claim it takes to update one item when running the update duration_ms = 10 @@ -80,7 +84,7 @@ def test_do_background_update(self): # second step: complete the update # we should now get run with a much bigger number of items to update - async def update(progress, count): + async def update(progress: JsonDict, count: int) -> int: self.assertEqual(progress, {"my_key": 2}) self.assertAlmostEqual( count, @@ -110,7 +114,7 @@ async def update(progress, count): """ ) ) - def test_background_update_default_batch_set_by_config(self): + def test_background_update_default_batch_set_by_config(self) -> None: """ Test that the background update is run with the default_batch_size set by the config """ @@ -133,7 +137,7 @@ def test_background_update_default_batch_set_by_config(self): # on the first call, we should get run with the default background update size specified in the config self.update_handler.assert_called_once_with({"my_key": 1}, 20) - def test_background_update_default_sleep_behavior(self): + def test_background_update_default_sleep_behavior(self) -> None: """ Test default background update behavior, which is to sleep """ @@ -147,7 +151,7 @@ def test_background_update_default_sleep_behavior(self): self.update_handler.side_effect = self.update self.update_handler.reset_mock() - self.updates.start_doing_background_updates(), + self.updates.start_doing_background_updates() # 2: advance the reactor less than the default sleep duration (1000ms) self.reactor.pump([0.5]) @@ -167,7 +171,7 @@ def test_background_update_default_sleep_behavior(self): """ ) ) - def test_background_update_sleep_set_in_config(self): + def test_background_update_sleep_set_in_config(self) -> None: """ Test that changing the sleep time in the config changes how long it sleeps """ @@ -181,7 +185,7 @@ def test_background_update_sleep_set_in_config(self): self.update_handler.side_effect = self.update self.update_handler.reset_mock() - self.updates.start_doing_background_updates(), + self.updates.start_doing_background_updates() # 2: advance the reactor less than the configured sleep duration (500ms) self.reactor.pump([0.45]) @@ -201,7 +205,7 @@ def test_background_update_sleep_set_in_config(self): """ ) ) - def test_disabling_background_update_sleep(self): + def test_disabling_background_update_sleep(self) -> None: """ Test that disabling sleep in the config results in bg update not sleeping """ @@ -215,7 +219,7 @@ def test_disabling_background_update_sleep(self): self.update_handler.side_effect = self.update self.update_handler.reset_mock() - self.updates.start_doing_background_updates(), + self.updates.start_doing_background_updates() # 2: advance the reactor very little self.reactor.pump([0.025]) @@ -230,7 +234,7 @@ def test_disabling_background_update_sleep(self): """ ) ) - def test_background_update_duration_set_in_config(self): + def test_background_update_duration_set_in_config(self) -> None: """ Test that the desired duration set in the config is used in determining batch size """ @@ -254,7 +258,7 @@ def test_background_update_duration_set_in_config(self): # the first update was run with the default batch size, this should be run with 500ms as the # desired duration - async def update(progress, count): + async def update(progress: JsonDict, count: int) -> int: self.assertEqual(progress, {"my_key": 2}) self.assertAlmostEqual( count, @@ -275,7 +279,7 @@ async def update(progress, count): """ ) ) - def test_background_update_min_batch_set_in_config(self): + def test_background_update_min_batch_set_in_config(self) -> None: """ Test that the minimum batch size set in the config is used """ @@ -290,7 +294,7 @@ def test_background_update_min_batch_set_in_config(self): ) # Run the update with the long-running update item - async def update(progress, count): + async def update_long(progress: JsonDict, count: int) -> int: await self.clock.sleep((count * duration_ms) / 1000) progress = {"my_key": progress["my_key"] + 1} await self.store.db_pool.runInteraction( @@ -301,7 +305,7 @@ async def update(progress, count): ) return count - self.update_handler.side_effect = update + self.update_handler.side_effect = update_long self.update_handler.reset_mock() res = self.get_success( self.updates.do_next_background_update(False), @@ -311,25 +315,25 @@ async def update(progress, count): # the first update was run with the default batch size, this should be run with minimum batch size # as the first items took a very long time - async def update(progress, count): + async def update_short(progress: JsonDict, count: int) -> int: self.assertEqual(progress, {"my_key": 2}) self.assertEqual(count, 5) await self.updates._end_background_update("test_update") return count - self.update_handler.side_effect = update + self.update_handler.side_effect = update_short self.get_success(self.updates.do_next_background_update(False)) class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, homeserver): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates # the base test class should have run the real bg updates for us self.assertTrue( self.get_success(self.updates.has_completed_background_updates()) ) - self.update_deferred = Deferred() + self.update_deferred: Deferred[int] = Deferred() self.update_handler = Mock(return_value=self.update_deferred) self.updates.register_background_update_handler( "test_update", self.update_handler @@ -358,7 +362,7 @@ class MockCM: ), ) - def test_controller(self): + def test_controller(self) -> None: store = self.hs.get_datastores().main self.get_success( store.db_pool.simple_insert( @@ -368,7 +372,7 @@ def test_controller(self): ) # Set the return value for the context manager. - enter_defer = Deferred() + enter_defer: Deferred[int] = Deferred() self._update_ctx_manager.__aenter__ = Mock(return_value=enter_defer) # Start the background update. diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 6ac4b93f981a..395396340bf2 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -13,9 +13,13 @@ # limitations under the License. from typing import List, Optional -from synapse.storage.database import DatabasePool +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer +from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.util.id_generators import MultiWriterIdGenerator +from synapse.util import Clock from tests.unittest import HomeserverTestCase from tests.utils import USE_POSTGRES_FOR_TESTS @@ -25,13 +29,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) - def _setup_db(self, txn): + def _setup_db(self, txn: LoggingTransaction) -> None: txn.execute("CREATE SEQUENCE foobar_seq") txn.execute( """ @@ -59,12 +63,12 @@ def _create(conn): return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) - def _insert_rows(self, instance_name: str, number: int): + def _insert_rows(self, instance_name: str, number: int) -> None: """Insert N rows as the given instance, inserting with stream IDs pulled from the postgres sequence. """ - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: for _ in range(number): txn.execute( "INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)", @@ -80,12 +84,12 @@ def _insert(txn): self.get_success(self.db_pool.runInteraction("_insert_rows", _insert)) - def _insert_row_with_id(self, instance_name: str, stream_id: int): + def _insert_row_with_id(self, instance_name: str, stream_id: int) -> None: """Insert one row as the given instance with given stream_id, updating the postgres sequence position to match. """ - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: txn.execute( "INSERT INTO foobar VALUES (?, ?)", ( @@ -104,7 +108,7 @@ def _insert(txn): self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert)) - def test_empty(self): + def test_empty(self) -> None: """Test an ID generator against an empty database gives sensible current positions. """ @@ -114,7 +118,7 @@ def test_empty(self): # The table is empty so we expect an empty map for positions self.assertEqual(id_gen.get_positions(), {}) - def test_single_instance(self): + def test_single_instance(self) -> None: """Test that reads and writes from a single process are handled correctly. """ @@ -130,7 +134,7 @@ def test_single_instance(self): # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) @@ -142,7 +146,7 @@ async def _get_next_async(): self.assertEqual(id_gen.get_positions(), {"master": 8}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 8) - def test_out_of_order_finish(self): + def test_out_of_order_finish(self) -> None: """Test that IDs persisted out of order are correctly handled""" # Prefill table with 7 rows written by 'master' @@ -191,7 +195,7 @@ def test_out_of_order_finish(self): self.assertEqual(id_gen.get_positions(), {"master": 11}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 11) - def test_multi_instance(self): + def test_multi_instance(self) -> None: """Test that reads and writes from multiple processes are handled correctly. """ @@ -215,7 +219,7 @@ def test_multi_instance(self): # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. - async def _get_next_async(): + async def _get_next_async() -> None: async with first_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) @@ -233,7 +237,7 @@ async def _get_next_async(): # ... but calling `get_next` on the second instance should give a unique # stream ID - async def _get_next_async(): + async def _get_next_async2() -> None: async with second_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 9) @@ -241,7 +245,7 @@ async def _get_next_async(): second_id_gen.get_positions(), {"first": 3, "second": 7} ) - self.get_success(_get_next_async()) + self.get_success(_get_next_async2()) self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 9}) @@ -249,7 +253,7 @@ async def _get_next_async(): second_id_gen.advance("first", 8) self.assertEqual(second_id_gen.get_positions(), {"first": 8, "second": 9}) - def test_get_next_txn(self): + def test_get_next_txn(self) -> None: """Test that the `get_next_txn` function works correctly.""" # Prefill table with 7 rows written by 'master' @@ -263,7 +267,7 @@ def test_get_next_txn(self): # Try allocating a new ID gen and check that we only see position # advanced after we leave the context manager. - def _get_next_txn(txn): + def _get_next_txn(txn: LoggingTransaction) -> None: stream_id = id_gen.get_next_txn(txn) self.assertEqual(stream_id, 8) @@ -275,7 +279,7 @@ def _get_next_txn(txn): self.assertEqual(id_gen.get_positions(), {"master": 8}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 8) - def test_get_persisted_upto_position(self): + def test_get_persisted_upto_position(self) -> None: """Test that `get_persisted_upto_position` correctly tracks updates to positions. """ @@ -317,7 +321,7 @@ def test_get_persisted_upto_position(self): id_gen.advance("second", 15) self.assertEqual(id_gen.get_persisted_upto_position(), 11) - def test_get_persisted_upto_position_get_next(self): + def test_get_persisted_upto_position_get_next(self) -> None: """Test that `get_persisted_upto_position` correctly tracks updates to positions when `get_next` is called. """ @@ -331,7 +335,7 @@ def test_get_persisted_upto_position_get_next(self): self.assertEqual(id_gen.get_persisted_upto_position(), 5) - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 6) self.assertEqual(id_gen.get_persisted_upto_position(), 5) @@ -344,7 +348,7 @@ async def _get_next_async(): # `persisted_upto_position` in this case, then it will be correct in the # other cases that are tested above (since they'll hit the same code). - def test_restart_during_out_of_order_persistence(self): + def test_restart_during_out_of_order_persistence(self) -> None: """Test that restarting a process while another process is writing out of order updates are handled correctly. """ @@ -388,7 +392,7 @@ def test_restart_during_out_of_order_persistence(self): id_gen_worker.advance("master", 9) self.assertEqual(id_gen_worker.get_positions(), {"master": 9}) - def test_writer_config_change(self): + def test_writer_config_change(self) -> None: """Test that changing the writer config correctly works.""" self._insert_row_with_id("first", 3) @@ -421,7 +425,7 @@ def test_writer_config_change(self): # Check that we get a sane next stream ID with this new config. - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen_3.get_next() as stream_id: self.assertEqual(stream_id, 6) @@ -435,7 +439,7 @@ async def _get_next_async(): self.assertEqual(id_gen_5.get_current_token_for_writer("first"), 6) self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6) - def test_sequence_consistency(self): + def test_sequence_consistency(self) -> None: """Test that we error out if the table and sequence diverges.""" # Prefill with some rows @@ -458,13 +462,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) - def _setup_db(self, txn): + def _setup_db(self, txn: LoggingTransaction) -> None: txn.execute("CREATE SEQUENCE foobar_seq") txn.execute( """ @@ -493,10 +497,10 @@ def _create(conn): return self.get_success(self.db_pool.runWithConnection(_create)) - def _insert_row(self, instance_name: str, stream_id: int): + def _insert_row(self, instance_name: str, stream_id: int) -> None: """Insert one row as the given instance with given stream_id.""" - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: txn.execute( "INSERT INTO foobar VALUES (?, ?)", ( @@ -514,13 +518,13 @@ def _insert(txn): self.get_success(self.db_pool.runInteraction("_insert_row", _insert)) - def test_single_instance(self): + def test_single_instance(self) -> None: """Test that reads and writes from a single process are handled correctly. """ id_gen = self._create_id_generator() - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen.get_next() as stream_id: self._insert_row("master", stream_id) @@ -530,7 +534,7 @@ async def _get_next_async(): self.assertEqual(id_gen.get_current_token_for_writer("master"), -1) self.assertEqual(id_gen.get_persisted_upto_position(), -1) - async def _get_next_async2(): + async def _get_next_async2() -> None: async with id_gen.get_next_mult(3) as stream_ids: for stream_id in stream_ids: self._insert_row("master", stream_id) @@ -548,14 +552,14 @@ async def _get_next_async2(): self.assertEqual(second_id_gen.get_current_token_for_writer("master"), -4) self.assertEqual(second_id_gen.get_persisted_upto_position(), -4) - def test_multiple_instance(self): + def test_multiple_instance(self) -> None: """Tests that having multiple instances that get advanced over federation works corretly. """ id_gen_1 = self._create_id_generator("first", writers=["first", "second"]) id_gen_2 = self._create_id_generator("second", writers=["first", "second"]) - async def _get_next_async(): + async def _get_next_async() -> None: async with id_gen_1.get_next() as stream_id: self._insert_row("first", stream_id) id_gen_2.advance("first", stream_id) @@ -567,7 +571,7 @@ async def _get_next_async(): self.assertEqual(id_gen_1.get_persisted_upto_position(), -1) self.assertEqual(id_gen_2.get_persisted_upto_position(), -1) - async def _get_next_async2(): + async def _get_next_async2() -> None: async with id_gen_2.get_next() as stream_id: self._insert_row("second", stream_id) id_gen_1.advance("second", stream_id) @@ -584,13 +588,13 @@ class MultiTableMultiWriterIdGeneratorTestCase(HomeserverTestCase): if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self.db_pool: DatabasePool = self.store.db_pool self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) - def _setup_db(self, txn): + def _setup_db(self, txn: LoggingTransaction) -> None: txn.execute("CREATE SEQUENCE foobar_seq") txn.execute( """ @@ -642,7 +646,7 @@ def _insert_rows( from the postgres sequence. """ - def _insert(txn): + def _insert(txn: LoggingTransaction) -> None: for _ in range(number): txn.execute( "INSERT INTO %s VALUES (nextval('foobar_seq'), ?)" % (table,), @@ -659,7 +663,7 @@ def _insert(txn): self.get_success(self.db_pool.runInteraction("_insert_rows", _insert)) - def test_load_existing_stream(self): + def test_load_existing_stream(self) -> None: """Test creating ID gens with multiple tables that have rows from after the position in `stream_positions` table. """ From d9bc65918e406b8ae15c42b2ea3680d2c9fb79c3 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 21 Mar 2022 17:27:59 +0000 Subject: [PATCH 427/474] Call out synctl change --- CHANGES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 78498c10b551..06396c3f6f8b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,6 +3,8 @@ Synapse 1.55.0rc1 (2022-03-15) This release removes a workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. **This breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))**; Mjolnir users should upgrade Mjolnir before upgrading Synapse to this version. +This release also moves the location of the `synctl` script; see the [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved) for more details. + Features -------- @@ -38,6 +40,7 @@ Deprecations and Removals ------------------------- - **Remove workaround introduced in Synapse 1.50.0 for Mjolnir compatibility. Breaks compatibility with Mjolnir 1.3.1 and earlier. ([\#11700](https://github.com/matrix-org/synapse/issues/11700))** +- **`synctl` has been moved into into `synapse._scripts` and is exposed as an entry point; see [upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#synctl-script-has-been-moved). ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) - Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0. ([\#12138](https://github.com/matrix-org/synapse/issues/12138)) - The groups/communities feature in Synapse has been deprecated. ([\#12200](https://github.com/matrix-org/synapse/issues/12200)) @@ -56,7 +59,6 @@ Internal Changes - Fix CI not attaching source distributions and wheels to the GitHub releases. ([\#12131](https://github.com/matrix-org/synapse/issues/12131)) - Remove unused mocks from `test_typing`. ([\#12136](https://github.com/matrix-org/synapse/issues/12136)) - Give `scripts-dev` scripts suffixes for neater CI config. ([\#12137](https://github.com/matrix-org/synapse/issues/12137)) -- Move `synctl` into `synapse._scripts` and expose as an entry point. ([\#12140](https://github.com/matrix-org/synapse/issues/12140)) - Move the snapcraft configuration file to `contrib`. ([\#12142](https://github.com/matrix-org/synapse/issues/12142)) - Enable [MSC3030](https://github.com/matrix-org/matrix-doc/pull/3030) Complement tests in CI. ([\#12144](https://github.com/matrix-org/synapse/issues/12144)) - Enable [MSC2716](https://github.com/matrix-org/matrix-doc/pull/2716) Complement tests in CI. ([\#12145](https://github.com/matrix-org/synapse/issues/12145)) From 01211e0c16758f41883e42f1d3e6306b7a683e96 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Tue, 22 Mar 2022 10:22:25 +0000 Subject: [PATCH 428/474] Tweak copy for sso account details template (#12265) * Tweak copy for sso account details template * Update sso footer copyright year * Add newsfragment Signed-off-by: Michael Telatynski <7t3chguy@gmail.com> --- changelog.d/12265.misc | 1 + synapse/res/templates/sso_auth_account_details.html | 8 ++++---- synapse/res/templates/sso_auth_account_details.js | 2 +- synapse/res/templates/sso_footer.html | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12265.misc diff --git a/changelog.d/12265.misc b/changelog.d/12265.misc new file mode 100644 index 000000000000..4213f5855592 --- /dev/null +++ b/changelog.d/12265.misc @@ -0,0 +1 @@ +Tweak copy for default sso account details template to better adhere to mobile app store guidelines. \ No newline at end of file diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index b231aace01e6..1ba850369a32 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -130,13 +130,13 @@
-

Choose your account name

-

This is required to create your account on {{ server_name }}, and you can't change this later.

+

Create your account

+

This is required. Continue to create your account on {{ server_name }}. You can't change this later.

- +
@
:{{ server_name }}
@@ -145,7 +145,7 @@

Choose your account name

{% if user_attributes.avatar_url or user_attributes.display_name or user_attributes.emails %}
-

{% if idp.idp_icon %}{% endif %}Information from {{ idp.idp_name }}

+

{% if idp.idp_icon %}{% endif %}Optional data from {{ idp.idp_name }}

{% if user_attributes.avatar_url %}