From d8df8e6c1432d25ea1c0310a5f2dc48d1688345f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 26 Jan 2022 12:47:34 +0000 Subject: [PATCH 01/19] Don't print HTTPStatus.* in "Processed..." logs (#11827) * Don't print HTTPStatus.* in "Processed..." logs Fixes #11812. See also #7118 and https://github.com/matrix-org/synapse/pull/7188#r401719326 in particular. Co-authored-by: Brendan Abolivier --- changelog.d/11827.bugfix | 1 + synapse/http/site.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11827.bugfix diff --git a/changelog.d/11827.bugfix b/changelog.d/11827.bugfix new file mode 100644 index 000000000000..30222dfb6269 --- /dev/null +++ b/changelog.d/11827.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. \ No newline at end of file diff --git a/synapse/http/site.py b/synapse/http/site.py index c180a1d3231b..40f6c0489451 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -407,7 +407,10 @@ def _finished_processing(self) -> None: user_agent = get_request_user_agent(self, "-") - code = str(self.code) + # int(self.code) looks redundant, because self.code is already an int. + # But self.code might be an HTTPStatus (which inherits from int)---which has + # a different string representation. So ensure we really have an integer. + code = str(int(self.code)) if not self.finished: # we didn't send the full response before we gave up (presumably because # the connection dropped) From 2897fb6b4fb8bdaea0e919233d5ccaf5dea12742 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 26 Jan 2022 08:27:04 -0500 Subject: [PATCH 02/19] Improvements to bundling aggregations. (#11815) This is some odds and ends found during the review of #11791 and while continuing to work in this code: * Return attrs classes instead of dictionaries from some methods to improve type safety. * Call `get_bundled_aggregations` fewer times. * Adds a missing assertion in the tests. * Do not return empty bundled aggregations for an event (preferring to not include the bundle at all, as the docstring states). --- changelog.d/11815.misc | 1 + synapse/events/utils.py | 57 ++++++++++----- synapse/handlers/room.py | 77 +++++++++++---------- synapse/handlers/search.py | 45 ++++++------ synapse/handlers/sync.py | 3 +- synapse/push/mailer.py | 2 +- synapse/rest/admin/rooms.py | 39 +++++++---- synapse/rest/client/room.py | 39 +++++++---- synapse/rest/client/sync.py | 3 +- synapse/storage/databases/main/relations.py | 61 ++++++++++------ synapse/storage/databases/main/stream.py | 22 ++++-- tests/rest/client/test_relations.py | 2 +- 12 files changed, 212 insertions(+), 139 deletions(-) create mode 100644 changelog.d/11815.misc diff --git a/changelog.d/11815.misc b/changelog.d/11815.misc new file mode 100644 index 000000000000..83aa6d6eb046 --- /dev/null +++ b/changelog.d/11815.misc @@ -0,0 +1 @@ +Improve type safety of bundled aggregations code. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 918adeecf8cd..243696b35724 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -14,7 +14,17 @@ # limitations under the License. import collections.abc import re -from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Mapping, + Optional, + Union, +) from frozendict import frozendict @@ -26,6 +36,10 @@ from . import EventBase +if TYPE_CHECKING: + from synapse.storage.databases.main.relations import BundledAggregations + + # Split strings on "." but not "\." This uses a negative lookbehind assertion for '\' # (? JsonDict: """Serializes a single event. @@ -415,7 +429,7 @@ def _inject_bundled_aggregations( self, event: EventBase, time_now: int, - aggregations: JsonDict, + aggregations: "BundledAggregations", serialized_event: JsonDict, ) -> None: """Potentially injects bundled aggregations into the unsigned portion of the serialized event. @@ -427,13 +441,18 @@ def _inject_bundled_aggregations( serialized_event: The serialized event which may be modified. """ - # Make a copy in-case the object is cached. - aggregations = aggregations.copy() + serialized_aggregations = {} + + if aggregations.annotations: + serialized_aggregations[RelationTypes.ANNOTATION] = aggregations.annotations + + if aggregations.references: + serialized_aggregations[RelationTypes.REFERENCE] = aggregations.references - if RelationTypes.REPLACE in aggregations: + if aggregations.replace: # If there is an edit replace the content, preserving existing # relations. - edit = aggregations[RelationTypes.REPLACE] + edit = aggregations.replace # Ensure we take copies of the edit content, otherwise we risk modifying # the original event. @@ -451,24 +470,28 @@ def _inject_bundled_aggregations( else: serialized_event["content"].pop("m.relates_to", None) - aggregations[RelationTypes.REPLACE] = { + serialized_aggregations[RelationTypes.REPLACE] = { "event_id": edit.event_id, "origin_server_ts": edit.origin_server_ts, "sender": edit.sender, } # If this event is the start of a thread, include a summary of the replies. - if RelationTypes.THREAD in aggregations: - # Serialize the latest thread event. - latest_thread_event = aggregations[RelationTypes.THREAD]["latest_event"] - - # Don't bundle aggregations as this could recurse forever. - aggregations[RelationTypes.THREAD]["latest_event"] = self.serialize_event( - latest_thread_event, time_now, bundle_aggregations=None - ) + if aggregations.thread: + serialized_aggregations[RelationTypes.THREAD] = { + # Don't bundle aggregations as this could recurse forever. + "latest_event": self.serialize_event( + aggregations.thread.latest_event, time_now, bundle_aggregations=None + ), + "count": aggregations.thread.count, + "current_user_participated": aggregations.thread.current_user_participated, + } # Include the bundled aggregations in the event. - serialized_event["unsigned"].setdefault("m.relations", {}).update(aggregations) + if serialized_aggregations: + serialized_event["unsigned"].setdefault("m.relations", {}).update( + serialized_aggregations + ) def serialize_events( self, events: Iterable[Union[JsonDict, EventBase]], time_now: int, **kwargs: Any diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f963078e596c..1420d6772955 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -30,6 +30,7 @@ Tuple, ) +import attr from typing_extensions import TypedDict from synapse.api.constants import ( @@ -60,6 +61,7 @@ from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.rest.admin._base import assert_user_is_admin +from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.state import StateFilter from synapse.streams import EventSource from synapse.types import ( @@ -90,6 +92,17 @@ FIVE_MINUTES_IN_MS = 5 * 60 * 1000 +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventContext: + events_before: List[EventBase] + event: EventBase + events_after: List[EventBase] + state: List[EventBase] + aggregations: Dict[str, BundledAggregations] + start: str + end: str + + class RoomCreationHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() @@ -1119,7 +1132,7 @@ async def get_event_context( limit: int, event_filter: Optional[Filter], use_admin_priviledge: bool = False, - ) -> Optional[JsonDict]: + ) -> Optional[EventContext]: """Retrieves events, pagination tokens and state around a given event in a room. @@ -1167,38 +1180,28 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: results = await self.store.get_events_around( room_id, event_id, before_limit, after_limit, event_filter ) + events_before = results.events_before + events_after = results.events_after if event_filter: - results["events_before"] = await event_filter.filter( - results["events_before"] - ) - results["events_after"] = await event_filter.filter(results["events_after"]) + events_before = await event_filter.filter(events_before) + events_after = await event_filter.filter(events_after) - results["events_before"] = await filter_evts(results["events_before"]) - results["events_after"] = await filter_evts(results["events_after"]) + events_before = await filter_evts(events_before) + events_after = await filter_evts(events_after) # filter_evts can return a pruned event in case the user is allowed to see that # there's something there but not see the content, so use the event that's in # `filtered` rather than the event we retrieved from the datastore. - results["event"] = filtered[0] + event = filtered[0] # Fetch the aggregations. aggregations = await self.store.get_bundled_aggregations( - [results["event"]], user.to_string() + itertools.chain(events_before, (event,), events_after), + user.to_string(), ) - aggregations.update( - await self.store.get_bundled_aggregations( - results["events_before"], user.to_string() - ) - ) - aggregations.update( - await self.store.get_bundled_aggregations( - results["events_after"], user.to_string() - ) - ) - results["aggregations"] = aggregations - if results["events_after"]: - last_event_id = results["events_after"][-1].event_id + if events_after: + last_event_id = events_after[-1].event_id else: last_event_id = event_id @@ -1206,9 +1209,9 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: state_filter = StateFilter.from_lazy_load_member_list( ev.sender for ev in itertools.chain( - results["events_before"], - (results["event"],), - results["events_after"], + events_before, + (event,), + events_after, ) ) else: @@ -1226,21 +1229,23 @@ async def filter_evts(events: List[EventBase]) -> List[EventBase]: if event_filter: state_events = await event_filter.filter(state_events) - results["state"] = await filter_evts(state_events) - # We use a dummy token here as we only care about the room portion of # the token, which we replace. token = StreamToken.START - results["start"] = await token.copy_and_replace( - "room_key", results["start"] - ).to_string(self.store) - - results["end"] = await token.copy_and_replace( - "room_key", results["end"] - ).to_string(self.store) - - return results + return EventContext( + events_before=events_before, + event=event, + events_after=events_after, + state=await filter_evts(state_events), + aggregations=aggregations, + start=await token.copy_and_replace("room_key", results.start).to_string( + self.store + ), + end=await token.copy_and_replace("room_key", results.end).to_string( + self.store + ), + ) class TimestampLookupHandler: diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0b153a682261..02bb5ae72f51 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -361,36 +361,37 @@ async def search( logger.info( "Context for search returned %d and %d events", - len(res["events_before"]), - len(res["events_after"]), + len(res.events_before), + len(res.events_after), ) - res["events_before"] = await filter_events_for_client( - self.storage, user.to_string(), res["events_before"] + events_before = await filter_events_for_client( + self.storage, user.to_string(), res.events_before ) - res["events_after"] = await filter_events_for_client( - self.storage, user.to_string(), res["events_after"] + events_after = await filter_events_for_client( + self.storage, user.to_string(), res.events_after ) - res["start"] = await now_token.copy_and_replace( - "room_key", res["start"] - ).to_string(self.store) - - res["end"] = await now_token.copy_and_replace( - "room_key", res["end"] - ).to_string(self.store) + context = { + "events_before": events_before, + "events_after": events_after, + "start": await now_token.copy_and_replace( + "room_key", res.start + ).to_string(self.store), + "end": await now_token.copy_and_replace( + "room_key", res.end + ).to_string(self.store), + } if include_profile: senders = { ev.sender - for ev in itertools.chain( - res["events_before"], [event], res["events_after"] - ) + for ev in itertools.chain(events_before, [event], events_after) } - if res["events_after"]: - last_event_id = res["events_after"][-1].event_id + if events_after: + last_event_id = events_after[-1].event_id else: last_event_id = event.event_id @@ -402,7 +403,7 @@ async def search( last_event_id, state_filter ) - res["profile_info"] = { + context["profile_info"] = { s.state_key: { "displayname": s.content.get("displayname", None), "avatar_url": s.content.get("avatar_url", None), @@ -411,7 +412,7 @@ async def search( if s.type == EventTypes.Member and s.state_key in senders } - contexts[event.event_id] = res + contexts[event.event_id] = context else: contexts = {} @@ -421,10 +422,10 @@ async def search( for context in contexts.values(): context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now + context["events_before"], time_now # type: ignore[arg-type] ) context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now + context["events_after"], time_now # type: ignore[arg-type] ) state_results = {} diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7e2a892b63ae..c72ed7c2907a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -37,6 +37,7 @@ from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, start_active_span from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.databases.main.event_push_actions import NotifCounts +from synapse.storage.databases.main.relations import BundledAggregations from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( @@ -100,7 +101,7 @@ class TimelineBatch: limited: bool # A mapping of event ID to the bundled aggregations for the above events. # This is only calculated if limited is true. - bundled_aggregations: Optional[Dict[str, Dict[str, Any]]] = None + bundled_aggregations: Optional[Dict[str, BundledAggregations]] = None def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index dadfc574134c..3df8452eecf7 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -455,7 +455,7 @@ async def _get_notif_vars( } the_events = await filter_events_for_client( - self.storage, user_id, results["events_before"] + self.storage, user_id, results.events_before ) the_events.append(notif_event) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index efe25fe7ebf7..5b706efbcff0 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -729,7 +729,7 @@ async def on_GET( else: event_filter = None - results = await self.room_context_handler.get_event_context( + event_context = await self.room_context_handler.get_event_context( requester, room_id, event_id, @@ -738,25 +738,34 @@ async def on_GET( use_admin_priviledge=True, ) - if not results: + if not event_context: raise SynapseError( HTTPStatus.NOT_FOUND, "Event not found.", errcode=Codes.NOT_FOUND ) time_now = self.clock.time_msec() - aggregations = results.pop("aggregations", None) - results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], time_now, bundle_aggregations=aggregations - ) - results["event"] = self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=aggregations - ) - results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], time_now, bundle_aggregations=aggregations - ) - results["state"] = self._event_serializer.serialize_events( - results["state"], time_now - ) + results = { + "events_before": self._event_serializer.serialize_events( + event_context.events_before, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "event": self._event_serializer.serialize_event( + event_context.event, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "events_after": self._event_serializer.serialize_events( + event_context.events_after, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "state": self._event_serializer.serialize_events( + event_context.state, time_now + ), + "start": event_context.start, + "end": event_context.end, + } return HTTPStatus.OK, results diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 90bb9142a098..90355e44b25e 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -706,27 +706,36 @@ async def on_GET( else: event_filter = None - results = await self.room_context_handler.get_event_context( + event_context = await self.room_context_handler.get_event_context( requester, room_id, event_id, limit, event_filter ) - if not results: + if not event_context: raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() - aggregations = results.pop("aggregations", None) - results["events_before"] = self._event_serializer.serialize_events( - results["events_before"], time_now, bundle_aggregations=aggregations - ) - results["event"] = self._event_serializer.serialize_event( - results["event"], time_now, bundle_aggregations=aggregations - ) - results["events_after"] = self._event_serializer.serialize_events( - results["events_after"], time_now, bundle_aggregations=aggregations - ) - results["state"] = self._event_serializer.serialize_events( - results["state"], time_now - ) + results = { + "events_before": self._event_serializer.serialize_events( + event_context.events_before, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "event": self._event_serializer.serialize_event( + event_context.event, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "events_after": self._event_serializer.serialize_events( + event_context.events_after, + time_now, + bundle_aggregations=event_context.aggregations, + ), + "state": self._event_serializer.serialize_events( + event_context.state, time_now + ), + "start": event_context.start, + "end": event_context.end, + } return 200, results diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index d20ae1421e19..f9615da52583 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -48,6 +48,7 @@ from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace +from synapse.storage.databases.main.relations import BundledAggregations from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -526,7 +527,7 @@ async def encode_room( def serialize( events: Iterable[EventBase], - aggregations: Optional[Dict[str, Dict[str, Any]]] = None, + aggregations: Optional[Dict[str, BundledAggregations]] = None, ) -> List[JsonDict]: return self._event_serializer.serialize_events( events, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 2cb5d06c1352..a9a5dd5f03b6 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -13,17 +13,7 @@ # limitations under the License. import logging -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Iterable, - List, - Optional, - Tuple, - Union, - cast, -) +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union, cast import attr from frozendict import frozendict @@ -43,6 +33,7 @@ PaginationChunk, RelationPaginationToken, ) +from synapse.types import JsonDict from synapse.util.caches.descriptors import cached if TYPE_CHECKING: @@ -51,6 +42,30 @@ logger = logging.getLogger(__name__) +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _ThreadAggregation: + latest_event: EventBase + count: int + current_user_participated: bool + + +@attr.s(slots=True, auto_attribs=True) +class BundledAggregations: + """ + The bundled aggregations for an event. + + Some values require additional processing during serialization. + """ + + annotations: Optional[JsonDict] = None + references: Optional[JsonDict] = None + replace: Optional[EventBase] = None + thread: Optional[_ThreadAggregation] = None + + def __bool__(self) -> bool: + return bool(self.annotations or self.references or self.replace or self.thread) + + class RelationsWorkerStore(SQLBaseStore): def __init__( self, @@ -585,7 +600,7 @@ def _get_if_user_has_annotated_event(txn: LoggingTransaction) -> bool: async def _get_bundled_aggregation_for_event( self, event: EventBase, user_id: str - ) -> Optional[Dict[str, Any]]: + ) -> Optional[BundledAggregations]: """Generate bundled aggregations for an event. Note that this does not use a cache, but depends on cached methods. @@ -616,24 +631,24 @@ async def _get_bundled_aggregation_for_event( # The bundled aggregations to include, a mapping of relation type to a # type-specific value. Some types include the direct return type here # while others need more processing during serialization. - aggregations: Dict[str, Any] = {} + aggregations = BundledAggregations() annotations = await self.get_aggregation_groups_for_event(event_id, room_id) if annotations.chunk: - aggregations[RelationTypes.ANNOTATION] = annotations.to_dict() + aggregations.annotations = annotations.to_dict() references = await self.get_relations_for_event( event_id, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: - aggregations[RelationTypes.REFERENCE] = references.to_dict() + aggregations.references = references.to_dict() edit = None if event.type == EventTypes.Message: edit = await self.get_applicable_edit(event_id, room_id) if edit: - aggregations[RelationTypes.REPLACE] = edit + aggregations.replace = edit # If this event is the start of a thread, include a summary of the replies. if self._msc3440_enabled: @@ -644,11 +659,11 @@ async def _get_bundled_aggregation_for_event( event_id, room_id, user_id ) if latest_thread_event: - aggregations[RelationTypes.THREAD] = { - "latest_event": latest_thread_event, - "count": thread_count, - "current_user_participated": participated, - } + aggregations.thread = _ThreadAggregation( + latest_event=latest_thread_event, + count=thread_count, + current_user_participated=participated, + ) # Store the bundled aggregations in the event metadata for later use. return aggregations @@ -657,7 +672,7 @@ async def get_bundled_aggregations( self, events: Iterable[EventBase], user_id: str, - ) -> Dict[str, Dict[str, Any]]: + ) -> Dict[str, BundledAggregations]: """Generate bundled aggregations for events. Args: @@ -676,7 +691,7 @@ async def get_bundled_aggregations( results = {} for event in events: event_result = await self._get_bundled_aggregation_for_event(event, user_id) - if event_result is not None: + if event_result: results[event.event_id] = event_result return results diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 319464b1fa83..a898f847e7d5 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -81,6 +81,14 @@ class _EventDictReturn: stream_ordering: int +@attr.s(slots=True, frozen=True, auto_attribs=True) +class _EventsAround: + events_before: List[EventBase] + events_after: List[EventBase] + start: RoomStreamToken + end: RoomStreamToken + + def generate_pagination_where_clause( direction: str, column_names: Tuple[str, str], @@ -846,7 +854,7 @@ async def get_events_around( before_limit: int, after_limit: int, event_filter: Optional[Filter] = None, - ) -> dict: + ) -> _EventsAround: """Retrieve events and pagination tokens around a given event in a room. """ @@ -869,12 +877,12 @@ async def get_events_around( list(results["after"]["event_ids"]), get_prev_content=True ) - return { - "events_before": events_before, - "events_after": events_after, - "start": results["before"]["token"], - "end": results["after"]["token"], - } + return _EventsAround( + events_before=events_before, + events_after=events_after, + start=results["before"]["token"], + end=results["after"]["token"], + ) def _get_events_around_txn( self, diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c9b220e73d1a..96ae7790bb15 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -577,7 +577,7 @@ def assert_bundle(event_json: JsonDict) -> None: self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) - self._find_event_in_chunk(room_timeline["events"]) + assert_bundle(self._find_event_in_chunk(room_timeline["events"])) def test_aggregation_get_event_for_annotation(self): """Test that annotations do not get bundled aggregations included From 2d3bd9aa670eedd299cc03093459929adec41918 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 26 Jan 2022 14:21:13 +0000 Subject: [PATCH 03/19] Add a module callback to set username at registration (#11790) This is in the context of mainlining the Tchap fork of Synapse. Currently in Tchap usernames are derived from the user's email address (extracted from the UIA results, more specifically the m.login.email.identity step). This change also exports the check_username method from the registration handler as part of the module API, so that a module can check if the username it's trying to generate is correct and doesn't conflict with an existing one, and fallback gracefully if not. Co-authored-by: David Robertson --- changelog.d/11790.feature | 1 + .../password_auth_provider_callbacks.md | 62 +++++++++++++++ synapse/handlers/auth.py | 58 ++++++++++++++ synapse/module_api/__init__.py | 22 ++++++ synapse/rest/client/register.py | 12 ++- tests/handlers/test_password_providers.py | 79 ++++++++++++++++++- 6 files changed, 231 insertions(+), 3 deletions(-) create mode 100644 changelog.d/11790.feature diff --git a/changelog.d/11790.feature b/changelog.d/11790.feature new file mode 100644 index 000000000000..4a5cc8ec37db --- /dev/null +++ b/changelog.d/11790.feature @@ -0,0 +1 @@ +Add a module callback to set username at registration. diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index e53abf640989..ec8324d292d8 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -105,6 +105,68 @@ device ID), and the (now deactivated) access token. If multiple modules implement this callback, Synapse runs them all in order. +### `get_username_for_registration` + +_First introduced in Synapse v1.52.0_ + +```python +async def get_username_for_registration( + uia_results: Dict[str, Any], + params: Dict[str, Any], +) -> Optional[str] +``` + +Called when registering a new user. The module can return a username to set for the user +being registered by returning it as a string, or `None` if it doesn't wish to force a +username for this user. If a username is returned, it will be used as the local part of a +user's full Matrix ID (e.g. it's `alice` in `@alice:example.com`). + +This callback is called once [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api) +has been completed by the user. It is not called when registering a user via SSO. It is +passed two dictionaries, which include the information that the user has provided during +the registration process. + +The first dictionary contains the results of the [User-Interactive Authentication](https://spec.matrix.org/latest/client-server-api/#user-interactive-authentication-api) +flow followed by the user. Its keys are the identifiers of every step involved in the flow, +associated with either a boolean value indicating whether the step was correctly completed, +or additional information (e.g. email address, phone number...). A list of most existing +identifiers can be found in the [Matrix specification](https://spec.matrix.org/v1.1/client-server-api/#authentication-types). +Here's an example featuring all currently supported keys: + +```python +{ + "m.login.dummy": True, # Dummy authentication + "m.login.terms": True, # User has accepted the terms of service for the homeserver + "m.login.recaptcha": True, # User has completed the recaptcha challenge + "m.login.email.identity": { # User has provided and verified an email address + "medium": "email", + "address": "alice@example.com", + "validated_at": 1642701357084, + }, + "m.login.msisdn": { # User has provided and verified a phone number + "medium": "msisdn", + "address": "33123456789", + "validated_at": 1642701357084, + }, + "org.matrix.msc3231.login.registration_token": "sometoken", # User has registered through the flow described in MSC3231 +} +``` + +The second dictionary contains the parameters provided by the user's client in the request +to `/_matrix/client/v3/register`. See the [Matrix specification](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3register) +for a complete list of these parameters. + +If the module cannot, or does not wish to, generate a username for this user, it must +return `None`. + +If multiple modules implement this callback, they will be considered in order. If a +callback returns `None`, Synapse falls through to the next one. The value of the first +callback that does not return `None` will be used. If this happens, Synapse will not call +any of the subsequent implementations of this callback. If every callback return `None`, +the username provided by the user is used, if any (otherwise one is automatically +generated). + + ## Example The example module below implements authentication checkers for two different login types: diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index bd1a3225638a..e32c93e234d8 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -2060,6 +2060,10 @@ def run(*args: Tuple, **kwargs: Dict) -> Awaitable: Optional[Tuple[str, Optional[Callable[["LoginResponse"], Awaitable[None]]]]] ], ] +GET_USERNAME_FOR_REGISTRATION_CALLBACK = Callable[ + [JsonDict, JsonDict], + Awaitable[Optional[str]], +] class PasswordAuthProvider: @@ -2072,6 +2076,9 @@ def __init__(self) -> None: # lists of callbacks self.check_3pid_auth_callbacks: List[CHECK_3PID_AUTH_CALLBACK] = [] self.on_logged_out_callbacks: List[ON_LOGGED_OUT_CALLBACK] = [] + self.get_username_for_registration_callbacks: List[ + GET_USERNAME_FOR_REGISTRATION_CALLBACK + ] = [] # Mapping from login type to login parameters self._supported_login_types: Dict[str, Iterable[str]] = {} @@ -2086,6 +2093,9 @@ def register_password_auth_provider_callbacks( auth_checkers: Optional[ Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, + get_username_for_registration: Optional[ + GET_USERNAME_FOR_REGISTRATION_CALLBACK + ] = None, ) -> None: # Register check_3pid_auth callback if check_3pid_auth is not None: @@ -2130,6 +2140,11 @@ def register_password_auth_provider_callbacks( # Add the new method to the list of auth_checker_callbacks for this login type self.auth_checker_callbacks.setdefault(login_type, []).append(callback) + if get_username_for_registration is not None: + self.get_username_for_registration_callbacks.append( + get_username_for_registration, + ) + def get_supported_login_types(self) -> Mapping[str, Iterable[str]]: """Get the login types supported by this password provider @@ -2285,3 +2300,46 @@ async def on_logged_out( except Exception as e: logger.warning("Failed to run module API callback %s: %s", callback, e) continue + + async def get_username_for_registration( + self, + uia_results: JsonDict, + params: JsonDict, + ) -> Optional[str]: + """Defines the username to use when registering the user, using the credentials + and parameters provided during the UIA flow. + + Stops at the first callback that returns a string. + + Args: + uia_results: The credentials provided during the UIA flow. + params: The parameters provided by the registration request. + + Returns: + The localpart to use when registering this user, or None if no module + returned a localpart. + """ + for callback in self.get_username_for_registration_callbacks: + try: + res = await callback(uia_results, params) + + if isinstance(res, str): + return res + elif res is not None: + # mypy complains that this line is unreachable because it assumes the + # data returned by the module fits the expected type. We just want + # to make sure this is the case. + logger.warning( # type: ignore[unreachable] + "Ignoring non-string value returned by" + " get_username_for_registration callback %s: %s", + callback, + res, + ) + except Exception as e: + logger.error( + "Module raised an exception in get_username_for_registration: %s", + e, + ) + raise SynapseError(code=500, msg="Internal Server Error") + + return None diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 662e60bc3394..788b2e47d523 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -71,6 +71,7 @@ from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, CHECK_AUTH_CALLBACK, + GET_USERNAME_FOR_REGISTRATION_CALLBACK, ON_LOGGED_OUT_CALLBACK, AuthHandler, ) @@ -177,6 +178,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._presence_stream = hs.get_event_sources().sources.presence self._state = hs.get_state_handler() self._clock: Clock = hs.get_clock() + self._registration_handler = hs.get_registration_handler() self._send_email_handler = hs.get_send_email_handler() self.custom_template_dir = hs.config.server.custom_template_directory @@ -310,6 +312,9 @@ def register_password_auth_provider_callbacks( auth_checkers: Optional[ Dict[Tuple[str, Tuple[str, ...]], CHECK_AUTH_CALLBACK] ] = None, + get_username_for_registration: Optional[ + GET_USERNAME_FOR_REGISTRATION_CALLBACK + ] = None, ) -> None: """Registers callbacks for password auth provider capabilities. @@ -319,6 +324,7 @@ def register_password_auth_provider_callbacks( check_3pid_auth=check_3pid_auth, on_logged_out=on_logged_out, auth_checkers=auth_checkers, + get_username_for_registration=get_username_for_registration, ) def register_background_update_controller_callbacks( @@ -1202,6 +1208,22 @@ async def defer_to_thread( """ return await defer_to_thread(self._hs.get_reactor(), f, *args, **kwargs) + async def check_username(self, username: str) -> None: + """Checks if the provided username uses the grammar defined in the Matrix + specification, and is already being used by an existing user. + + Added in Synapse v1.52.0. + + Args: + username: The username to check. This is the local part of the user's full + Matrix user ID, i.e. it's "alice" if the full user ID is "@alice:foo.com". + + Raises: + SynapseError with the errcode "M_USER_IN_USE" if the username is already in + use. + """ + await self._registration_handler.check_username(username) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index c59dae7c0370..e3492f9f9399 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -425,6 +425,7 @@ def __init__(self, hs: "HomeServer"): self.ratelimiter = hs.get_registration_ratelimiter() self.password_policy_handler = hs.get_password_policy_handler() self.clock = hs.get_clock() + self.password_auth_provider = hs.get_password_auth_provider() self._registration_enabled = self.hs.config.registration.enable_registration self._refresh_tokens_enabled = ( hs.config.registration.refreshable_access_token_lifetime is not None @@ -638,7 +639,16 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = params.get("username", None) + desired_username = await ( + self.password_auth_provider.get_username_for_registration( + auth_result, + params, + ) + ) + + if desired_username is None: + desired_username = params.get("username", None) + guest_access_token = params.get("guest_access_token", None) if desired_username is not None: diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 2add72b28a3a..94809cb8bea3 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -20,10 +20,11 @@ from twisted.internet import defer import synapse +from synapse.api.constants import LoginType from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.module_api import ModuleApi -from synapse.rest.client import devices, login, logout -from synapse.types import JsonDict +from synapse.rest.client import devices, login, logout, register +from synapse.types import JsonDict, UserID from tests import unittest from tests.server import FakeChannel @@ -156,6 +157,7 @@ class PasswordAuthProviderTests(unittest.HomeserverTestCase): login.register_servlets, devices.register_servlets, logout.register_servlets, + register.register_servlets, ] def setUp(self): @@ -745,6 +747,79 @@ async def on_logged_out(user_id, device_id, access_token): on_logged_out.assert_called_once() self.assertTrue(self.called) + def test_username(self): + """Tests that the get_username_for_registration callback can define the username + of a user when registering. + """ + self._setup_get_username_for_registration() + + username = "rin" + channel = self.make_request( + "POST", + "/register", + { + "username": username, + "password": "bar", + "auth": {"type": LoginType.DUMMY}, + }, + ) + self.assertEqual(channel.code, 200) + + # Our callback takes the username and appends "-foo" to it, check that's what we + # have. + mxid = channel.json_body["user_id"] + self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo") + + def test_username_uia(self): + """Tests that the get_username_for_registration callback is only called at the + end of the UIA flow. + """ + m = self._setup_get_username_for_registration() + + # Initiate the UIA flow. + username = "rin" + channel = self.make_request( + "POST", + "register", + {"username": username, "type": "m.login.password", "password": "bar"}, + ) + self.assertEqual(channel.code, 401) + self.assertIn("session", channel.json_body) + + # Check that the callback hasn't been called yet. + m.assert_not_called() + + # Finish the UIA flow. + session = channel.json_body["session"] + channel = self.make_request( + "POST", + "register", + {"auth": {"session": session, "type": LoginType.DUMMY}}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + mxid = channel.json_body["user_id"] + self.assertEqual(UserID.from_string(mxid).localpart, username + "-foo") + + # Check that the callback has been called. + m.assert_called_once() + + def _setup_get_username_for_registration(self) -> Mock: + """Registers a get_username_for_registration callback that appends "-foo" to the + username the client is trying to register. + """ + + async def get_username_for_registration(uia_results, params): + self.assertIn(LoginType.DUMMY, uia_results) + username = params["username"] + return username + "-foo" + + m = Mock(side_effect=get_username_for_registration) + + password_auth_provider = self.hs.get_password_auth_provider() + password_auth_provider.get_username_for_registration_callbacks.append(m) + + return m + def _get_login_flows(self) -> JsonDict: channel = self.make_request("GET", "/_matrix/client/r0/login") self.assertEqual(channel.code, 200, channel.result) From cef0d5d90a4782096c03ec79f825d114b8d61b6e Mon Sep 17 00:00:00 2001 From: Vaishnav Nair <64798176+totallynotvaishnav@users.noreply.github.com> Date: Wed, 26 Jan 2022 20:18:27 +0530 Subject: [PATCH 04/19] Include `prev_content` field in AS events (#11798) * Include 'prev_content' field in AS events Signed-off-by: Vaishnav Nair Co-authored-by: Brendan Abolivier --- changelog.d/11798.bugfix | 1 + synapse/storage/databases/main/appservice.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11798.bugfix diff --git a/changelog.d/11798.bugfix b/changelog.d/11798.bugfix new file mode 100644 index 000000000000..2651fd11cff0 --- /dev/null +++ b/changelog.d/11798.bugfix @@ -0,0 +1 @@ +Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. \ No newline at end of file diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 92c95a41d793..2bb52884315e 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -384,7 +384,7 @@ def get_new_events_for_appservice_txn(txn): "get_new_events_for_appservice", get_new_events_for_appservice_txn ) - events = await self.get_events_as_list(event_ids) + events = await self.get_events_as_list(event_ids, get_prev_content=True) return upper_bound, events From ec07062e314308e221b1739e80d8472f7bbb3628 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 26 Jan 2022 16:05:29 -0800 Subject: [PATCH 05/19] Update installation docs to indicate that we support Python 3.10 (#11820) --- changelog.d/11820.doc | 1 + docs/setup/installation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/11820.doc diff --git a/changelog.d/11820.doc b/changelog.d/11820.doc new file mode 100644 index 000000000000..4f563b9b569b --- /dev/null +++ b/changelog.d/11820.doc @@ -0,0 +1 @@ +Update pypi installation docs to indicate that we now support Python 3.10. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index fe657a15dfa6..69ade036c39d 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -194,7 +194,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.7 or later, up to Python 3.9. +- Python 3.7 or later, up to Python 3.10. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org To install the Synapse homeserver run: From fd65139714433763e8ef5be6017ab6ba5988e766 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 27 Jan 2022 11:06:29 +0100 Subject: [PATCH 06/19] Fix some indentation inconsistencies in the sample config (modules) (#11838) --- changelog.d/11838.misc | 1 + docs/sample_config.yaml | 10 +++++----- synapse/config/modules.py | 10 +++++----- 3 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11838.misc diff --git a/changelog.d/11838.misc b/changelog.d/11838.misc new file mode 100644 index 000000000000..4747bbe88b0d --- /dev/null +++ b/changelog.d/11838.misc @@ -0,0 +1 @@ +Fix some indentation inconsistencies in the sample config. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b38e6d6c88da..abf28e449089 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -41,11 +41,11 @@ # documentation on how to configure or create custom modules for Synapse. # modules: - # - module: my_super_module.MySuperClass - # config: - # do_thing: true - # - module: my_other_super_module.SomeClass - # config: {} + #- module: my_super_module.MySuperClass + # config: + # do_thing: true + #- module: my_other_super_module.SomeClass + # config: {} ## Server ## diff --git a/synapse/config/modules.py b/synapse/config/modules.py index 85fb05890d7c..2ef02b8f5566 100644 --- a/synapse/config/modules.py +++ b/synapse/config/modules.py @@ -41,9 +41,9 @@ def generate_config_section(self, **kwargs): # documentation on how to configure or create custom modules for Synapse. # modules: - # - module: my_super_module.MySuperClass - # config: - # do_thing: true - # - module: my_other_super_module.SomeClass - # config: {} + #- module: my_super_module.MySuperClass + # config: + # do_thing: true + #- module: my_other_super_module.SomeClass + # config: {} """ From 57e4786e907c390502f4ec6fb915e24cf5124351 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 Jan 2022 10:54:27 +0000 Subject: [PATCH 07/19] Create singletons for `StateFilter.{all,none}()` (#11836) No point recreating these for each call, since they are frozen --- changelog.d/11836.misc | 1 + synapse/storage/state.py | 14 +++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 changelog.d/11836.misc diff --git a/changelog.d/11836.misc b/changelog.d/11836.misc new file mode 100644 index 000000000000..be7e331c639c --- /dev/null +++ b/changelog.d/11836.misc @@ -0,0 +1 @@ +Minor performance improvement in room state lookup. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index df8b2f108877..913448f0f95e 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -74,21 +74,21 @@ def __attrs_post_init__(self): @staticmethod def all() -> "StateFilter": - """Creates a filter that fetches everything. + """Returns a filter that fetches everything. Returns: - The new state filter. + The state filter. """ - return StateFilter(types=frozendict(), include_others=True) + return _ALL_STATE_FILTER @staticmethod def none() -> "StateFilter": - """Creates a filter that fetches nothing. + """Returns a filter that fetches nothing. Returns: The new state filter. """ - return StateFilter(types=frozendict(), include_others=False) + return _NONE_STATE_FILTER @staticmethod def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter": @@ -527,6 +527,10 @@ def approx_difference(self, other: "StateFilter") -> "StateFilter": ) +_ALL_STATE_FILTER = StateFilter(types=frozendict(), include_others=True) +_NONE_STATE_FILTER = StateFilter(types=frozendict(), include_others=False) + + class StateGroupStorage: """High level interface to fetching state for event.""" From 6d482ba259a55947678390c06b24a7ba91f0ebab Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 27 Jan 2022 17:45:39 +0000 Subject: [PATCH 08/19] Pass `isolation_level` to `runWithConnection` (#11847) This was missed in https://github.com/matrix-org/synapse/pull/11799 --- changelog.d/11847.misc | 1 + synapse/storage/database.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/11847.misc diff --git a/changelog.d/11847.misc b/changelog.d/11847.misc new file mode 100644 index 000000000000..5c3b2bcaf4e5 --- /dev/null +++ b/changelog.d/11847.misc @@ -0,0 +1 @@ +Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 7455326ed331..99802228c9f7 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -748,6 +748,7 @@ async def runInteraction( func, *args, db_autocommit=db_autocommit, + isolation_level=isolation_level, **kwargs, ) From bf60da1a60096fac5fb778b732ff2214862ac808 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 28 Jan 2022 14:41:33 +0000 Subject: [PATCH 09/19] Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19 --- changelog.d/11846.feature | 1 + docs/sample_config.yaml | 14 +++ synapse/config/server.py | 27 ++++++ synapse/handlers/profile.py | 67 +++++++++++++ synapse/handlers/room_member.py | 6 ++ tests/handlers/test_profile.py | 94 +++++++++++++++++- tests/rest/client/test_profile.py | 156 ++++++++++++++++++++++++++++++ 7 files changed, 363 insertions(+), 2 deletions(-) create mode 100644 changelog.d/11846.feature diff --git a/changelog.d/11846.feature b/changelog.d/11846.feature new file mode 100644 index 000000000000..fcf6affdb53d --- /dev/null +++ b/changelog.d/11846.feature @@ -0,0 +1 @@ +Allow configuring a maximum file size as well as a list of allowed content types for avatars. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index abf28e449089..689b207fc0c0 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -471,6 +471,20 @@ limit_remote_rooms: # #allow_per_room_profiles: false +# The largest allowed file size for a user avatar. Defaults to no restriction. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's media repository. +# +#max_avatar_size: 10M + +# The MIME types allowed for user avatars. Defaults to no restriction. +# +# Note that user avatar changes will not work if this is set without +# using Synapse's media repository. +# +#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # diff --git a/synapse/config/server.py b/synapse/config/server.py index f200d0c1f1cf..a460cf25b42f 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -489,6 +489,19 @@ def read_config(self, config, **kwargs): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + # The maximum size an avatar can have, in bytes. + self.max_avatar_size = config.get("max_avatar_size") + if self.max_avatar_size is not None: + self.max_avatar_size = self.parse_size(self.max_avatar_size) + + # The MIME types allowed for an avatar. + self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes") + if self.allowed_avatar_mimetypes and not isinstance( + self.allowed_avatar_mimetypes, + list, + ): + raise ConfigError("allowed_avatar_mimetypes must be a list") + self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])] # no_tls is not really supported any more, but let's grandfather it in @@ -1168,6 +1181,20 @@ def generate_config_section( # #allow_per_room_profiles: false + # The largest allowed file size for a user avatar. Defaults to no restriction. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's media repository. + # + #max_avatar_size: 10M + + # The MIME types allowed for user avatars. Defaults to no restriction. + # + # Note that user avatar changes will not work if this is set without + # using Synapse's media repository. + # + #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] + # How long to keep redacted events in unredacted form in the database. After # this period redacted events get replaced with their redacted form in the DB. # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 6b5a6ded8b29..36e3ad2ba971 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -31,6 +31,8 @@ create_requester, get_domain_from_id, ) +from synapse.util.caches.descriptors import cached +from synapse.util.stringutils import parse_and_validate_mxc_uri if TYPE_CHECKING: from synapse.server import HomeServer @@ -64,6 +66,11 @@ def __init__(self, hs: "HomeServer"): self.user_directory_handler = hs.get_user_directory_handler() self.request_ratelimiter = hs.get_request_ratelimiter() + self.max_avatar_size = hs.config.server.max_avatar_size + self.allowed_avatar_mimetypes = hs.config.server.allowed_avatar_mimetypes + + self.server_name = hs.config.server.server_name + if hs.config.worker.run_background_tasks: self.clock.looping_call( self._update_remote_profile_cache, self.PROFILE_UPDATE_MS @@ -286,6 +293,9 @@ async def set_avatar_url( 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN,) ) + if not await self.check_avatar_size_and_mime_type(new_avatar_url): + raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN) + avatar_url_to_set: Optional[str] = new_avatar_url if new_avatar_url == "": avatar_url_to_set = None @@ -307,6 +317,63 @@ async def set_avatar_url( await self._update_join_states(requester, target_user) + @cached() + async def check_avatar_size_and_mime_type(self, mxc: str) -> bool: + """Check that the size and content type of the avatar at the given MXC URI are + within the configured limits. + + Args: + mxc: The MXC URI at which the avatar can be found. + + Returns: + A boolean indicating whether the file can be allowed to be set as an avatar. + """ + if not self.max_avatar_size and not self.allowed_avatar_mimetypes: + return True + + server_name, _, media_id = parse_and_validate_mxc_uri(mxc) + + if server_name == self.server_name: + media_info = await self.store.get_local_media(media_id) + else: + media_info = await self.store.get_cached_remote_media(server_name, media_id) + + if media_info is None: + # Both configuration options need to access the file's metadata, and + # retrieving remote avatars just for this becomes a bit of a faff, especially + # if e.g. the file is too big. It's also generally safe to assume most files + # used as avatar are uploaded locally, or if the upload didn't happen as part + # of a PUT request on /avatar_url that the file was at least previewed by the + # user locally (and therefore downloaded to the remote media cache). + logger.warning("Forbidding avatar change to %s: avatar not on server", mxc) + return False + + if self.max_avatar_size: + # Ensure avatar does not exceed max allowed avatar size + if media_info["media_length"] > self.max_avatar_size: + logger.warning( + "Forbidding avatar change to %s: %d bytes is above the allowed size " + "limit", + mxc, + media_info["media_length"], + ) + return False + + if self.allowed_avatar_mimetypes: + # Ensure the avatar's file type is allowed + if ( + self.allowed_avatar_mimetypes + and media_info["media_type"] not in self.allowed_avatar_mimetypes + ): + logger.warning( + "Forbidding avatar change to %s: mimetype %s not allowed", + mxc, + media_info["media_type"], + ) + return False + + return True + async def on_profile_query(self, args: JsonDict) -> JsonDict: """Handles federation profile query requests.""" diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 6aa910dd10f9..3dd5e1b6e4dc 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -590,6 +590,12 @@ async def update_membership_locked( errcode=Codes.BAD_JSON, ) + if "avatar_url" in content: + if not await self.profile_handler.check_avatar_size_and_mime_type( + content["avatar_url"], + ): + raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN) + # The event content should *not* include the authorising user as # it won't be properly signed. Strip it out since it might come # back from a client updating a display name / avatar. diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index c153018fd81b..60235e569987 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -11,12 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Any, Dict from unittest.mock import Mock import synapse.types from synapse.api.errors import AuthError, SynapseError from synapse.rest import admin +from synapse.server import HomeServer from synapse.types import UserID from tests import unittest @@ -46,7 +47,7 @@ def register_query_handler(query_type, handler): ) return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor, clock, hs: HomeServer): self.store = hs.get_datastore() self.frank = UserID.from_string("@1234abcd:test") @@ -248,3 +249,92 @@ def test_set_my_avatar_if_disabled(self): ), SynapseError, ) + + def test_avatar_constraints_no_config(self): + """Tests that the method to check an avatar against configured constraints skips + all of its check if no constraint is configured. + """ + # The first check that's done by this method is whether the file exists; if we + # don't get an error on a non-existing file then it means all of the checks were + # successfully skipped. + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/unknown_file") + ) + self.assertTrue(res) + + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_constraints_missing(self): + """Tests that an avatar isn't allowed if the file at the given MXC URI couldn't + be found. + """ + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/unknown_file") + ) + self.assertFalse(res) + + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_constraints_file_size(self): + """Tests that a file that's above the allowed file size is forbidden but one + that's below it is allowed. + """ + self._setup_local_files( + { + "small": {"size": 40}, + "big": {"size": 60}, + } + ) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/small") + ) + self.assertTrue(res) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/big") + ) + self.assertFalse(res) + + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_avatar_constraint_mime_type(self): + """Tests that a file with an unauthorised MIME type is forbidden but one with + an authorised content type is allowed. + """ + self._setup_local_files( + { + "good": {"mimetype": "image/png"}, + "bad": {"mimetype": "application/octet-stream"}, + } + ) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/good") + ) + self.assertTrue(res) + + res = self.get_success( + self.handler.check_avatar_size_and_mime_type("mxc://test/bad") + ) + self.assertFalse(res) + + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): + """Stores metadata about files in the database. + + Args: + names_and_props: A dictionary with one entry per file, with the key being the + file's name, and the value being a dictionary of properties. Supported + properties are "mimetype" (for the file's type) and "size" (for the + file's size). + """ + store = self.hs.get_datastore() + + for name, props in names_and_props.items(): + self.get_success( + store.store_local_media( + media_id=name, + media_type=props.get("mimetype", "image/png"), + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=props.get("size", 50), + user_id=UserID.from_string("@rin:test"), + ) + ) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 2860579c2e54..ead883ded886 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -13,8 +13,12 @@ # limitations under the License. """Tests REST events for /profile paths.""" +from typing import Any, Dict + +from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import login, profile, room +from synapse.types import UserID from tests import unittest @@ -25,6 +29,7 @@ class ProfileTestCase(unittest.HomeserverTestCase): admin.register_servlets_for_client_rest_resource, login.register_servlets, profile.register_servlets, + room.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -150,6 +155,157 @@ def _get_avatar_url(self, name=None): self.assertEqual(channel.code, 200, channel.result) return channel.json_body.get("avatar_url") + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_size_limit_global(self): + """Tests that the maximum size limit for avatars is enforced when updating a + global profile. + """ + self._setup_local_files( + { + "small": {"size": 40}, + "big": {"size": 60}, + } + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/big"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/small"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"max_avatar_size": 50}) + def test_avatar_size_limit_per_room(self): + """Tests that the maximum size limit for avatars is enforced when updating a + per-room profile. + """ + self._setup_local_files( + { + "small": {"size": 40}, + "big": {"size": 60}, + } + ) + + room_id = self.helper.create_room_as(tok=self.owner_tok) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/big"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/small"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_avatar_allowed_mime_type_global(self): + """Tests that the MIME type whitelist for avatars is enforced when updating a + global profile. + """ + self._setup_local_files( + { + "good": {"mimetype": "image/png"}, + "bad": {"mimetype": "application/octet-stream"}, + } + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/bad"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/profile/{self.owner}/avatar_url", + content={"avatar_url": "mxc://test/good"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + @unittest.override_config({"allowed_avatar_mimetypes": ["image/png"]}) + def test_avatar_allowed_mime_type_per_room(self): + """Tests that the MIME type whitelist for avatars is enforced when updating a + per-room profile. + """ + self._setup_local_files( + { + "good": {"mimetype": "image/png"}, + "bad": {"mimetype": "application/octet-stream"}, + } + ) + + room_id = self.helper.create_room_as(tok=self.owner_tok) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/bad"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 403, channel.result) + self.assertEqual( + channel.json_body["errcode"], Codes.FORBIDDEN, channel.json_body + ) + + channel = self.make_request( + "PUT", + f"/rooms/{room_id}/state/m.room.member/{self.owner}", + content={"membership": "join", "avatar_url": "mxc://test/good"}, + access_token=self.owner_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]): + """Stores metadata about files in the database. + + Args: + names_and_props: A dictionary with one entry per file, with the key being the + file's name, and the value being a dictionary of properties. Supported + properties are "mimetype" (for the file's type) and "size" (for the + file's size). + """ + store = self.hs.get_datastore() + + for name, props in names_and_props.items(): + self.get_success( + store.store_local_media( + media_id=name, + media_type=props.get("mimetype", "image/png"), + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=props.get("size", 50), + user_id=UserID.from_string("@rin:test"), + ) + ) + class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): From 7eb198ddc8f6dd7c6aa4984656db995de4358158 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 31 Jan 2022 15:40:20 +0100 Subject: [PATCH 10/19] Remove not needed old table of contents in documentation (#11860) --- changelog.d/11860.doc | 1 + docs/MSC1711_certificates_FAQ.md | 21 --------------------- docs/admin_api/media_admin_api.md | 17 ----------------- docs/admin_api/rooms.md | 15 --------------- 4 files changed, 1 insertion(+), 53 deletions(-) create mode 100644 changelog.d/11860.doc diff --git a/changelog.d/11860.doc b/changelog.d/11860.doc new file mode 100644 index 000000000000..04b88c5f2c30 --- /dev/null +++ b/changelog.d/11860.doc @@ -0,0 +1 @@ +Remove not needed old table of contents in documentation. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 086899a9d836..32ba15652d1a 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -44,27 +44,6 @@ For more details and context on the release of the r0.1 Server/Server API and imminent Matrix 1.0 release, you can also see our [main talk from FOSDEM 2019](https://matrix.org/blog/2019/02/04/matrix-at-fosdem-2019/). -## Contents -* Timeline -* Configuring certificates for compatibility with Synapse 1.0 -* FAQ - * Synapse 0.99.0 has just been released, what do I need to do right now? - * How do I upgrade? - * What will happen if I do not set up a valid federation certificate - immediately? - * What will happen if I do nothing at all? - * When do I need a SRV record or .well-known URI? - * Can I still use an SRV record? - * I have created a .well-known URI. Do I still need an SRV record? - * It used to work just fine, why are you breaking everything? - * Can I manage my own certificates rather than having Synapse renew - certificates itself? - * Do you still recommend against using a reverse proxy on the federation port? - * Do I still need to give my TLS certificates to Synapse if I am using a - reverse proxy? - * Do I need the same certificate for the client and federation port? - * How do I tell Synapse to reload my keys/certificates after I replace them? - ## Timeline **5th Feb 2019 - Synapse 0.99.0 is released.** diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 60b8bc7379a2..2c44e1f758d8 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -1,20 +1,3 @@ -# Contents -- [Querying media](#querying-media) - * [List all media in a room](#list-all-media-in-a-room) - * [List all media uploaded by a user](#list-all-media-uploaded-by-a-user) -- [Quarantine media](#quarantine-media) - * [Quarantining media by ID](#quarantining-media-by-id) - * [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id) - * [Quarantining media in a room](#quarantining-media-in-a-room) - * [Quarantining all media of a user](#quarantining-all-media-of-a-user) - * [Protecting media from being quarantined](#protecting-media-from-being-quarantined) - * [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined) -- [Delete local media](#delete-local-media) - * [Delete a specific local media](#delete-a-specific-local-media) - * [Delete local media by date or size](#delete-local-media-by-date-or-size) - * [Delete media uploaded by a user](#delete-media-uploaded-by-a-user) -- [Purge Remote Media API](#purge-remote-media-api) - # Querying media These APIs allow extracting media information from the homeserver. diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 0f1a74134ff9..daf2522f6ab9 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -1,18 +1,3 @@ -# Contents -- [List Room API](#list-room-api) -- [Room Details API](#room-details-api) -- [Room Members API](#room-members-api) -- [Room State API](#room-state-api) -- [Block Room API](#block-room-api) -- [Delete Room API](#delete-room-api) - * [Version 1 (old version)](#version-1-old-version) - * [Version 2 (new version)](#version-2-new-version) - * [Status of deleting rooms](#status-of-deleting-rooms) - * [Undoing room shutdowns](#undoing-room-shutdowns) -- [Make Room Admin API](#make-room-admin-api) -- [Forward Extremities Admin API](#forward-extremities-admin-api) -- [Event Context API](#event-context-api) - # List Room API The List Room admin API allows server admins to get a list of rooms on their From 02755c31882b75ae6e71d9033ed1e72d0fb7c00b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 31 Jan 2022 10:13:32 -0500 Subject: [PATCH 11/19] Remove the obsolete MSC1849 configuration flag. (#11843) MSC1849 was replaced by MSC2675, which was merged. The configuration flag, which defaulted to true, is no longer useful. --- changelog.d/11843.removal | 1 + synapse/config/experimental.py | 2 -- synapse/storage/databases/main/relations.py | 4 ---- 3 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 changelog.d/11843.removal diff --git a/changelog.d/11843.removal b/changelog.d/11843.removal new file mode 100644 index 000000000000..d6d49b4ad50b --- /dev/null +++ b/changelog.d/11843.removal @@ -0,0 +1 @@ +Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index dbaeb1091861..65c807a19af7 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -24,8 +24,6 @@ class ExperimentalConfig(Config): def read_config(self, config: JsonDict, **kwargs): experimental = config.get("experimental_features") or {} - # Whether to enable experimental MSC1849 (aka relations) support - self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True) # MSC3440 (thread relation) self.msc3440_enabled: bool = experimental.get("msc3440_enabled", False) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index a9a5dd5f03b6..37468a518381 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -75,7 +75,6 @@ def __init__( ): super().__init__(database, db_conn, hs) - self._msc1849_enabled = hs.config.experimental.msc1849_enabled self._msc3440_enabled = hs.config.experimental.msc3440_enabled @cached(tree=True) @@ -683,9 +682,6 @@ async def get_bundled_aggregations( A map of event ID to the bundled aggregation for the event. Not all events may have bundled aggregations in the results. """ - # If bundled aggregations are disabled, nothing to do. - if not self._msc1849_enabled: - return {} # TODO Parallelize. results = {} From 0da2301b21399b85a955d92dc355024a67b8cb40 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 31 Jan 2022 17:24:29 +0100 Subject: [PATCH 12/19] Consolidate the `access_token` information in the admin api (#11861) Co-authored-by: reivilibre --- changelog.d/11861.doc | 1 + docs/admin_api/account_validity.md | 3 ++ docs/admin_api/delete_group.md | 6 +-- docs/admin_api/event_reports.md | 7 ++- docs/admin_api/media_admin_api.md | 8 ++-- docs/admin_api/purge_history_api.md | 9 ++-- docs/admin_api/room_membership.md | 6 +-- docs/admin_api/rooms.md | 6 +-- docs/admin_api/statistics.md | 6 +-- docs/admin_api/user_admin_api.md | 73 ++--------------------------- 10 files changed, 30 insertions(+), 95 deletions(-) create mode 100644 changelog.d/11861.doc diff --git a/changelog.d/11861.doc b/changelog.d/11861.doc new file mode 100644 index 000000000000..53c75a088378 --- /dev/null +++ b/changelog.d/11861.doc @@ -0,0 +1 @@ +Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md index b74b5d0c1a96..d878bf7451e3 100644 --- a/docs/admin_api/account_validity.md +++ b/docs/admin_api/account_validity.md @@ -4,6 +4,9 @@ This API allows a server administrator to manage the validity of an account. To use it, you must enable the account validity feature (under `account_validity`) in Synapse's configuration. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## Renew account This API extends the validity of an account by as much time as configured in the diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md index 2e0a1d24741f..73a96842ac72 100644 --- a/docs/admin_api/delete_group.md +++ b/docs/admin_api/delete_group.md @@ -4,11 +4,11 @@ This API lets a server admin delete a local group. Doing so will kick all users out of the group so that their clients will correctly handle the group being deleted. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The API is: ``` POST /_synapse/admin/v1/delete_group/ ``` - -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index f523774ba8e3..be6f0961bfcb 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -2,12 +2,13 @@ This API returns information about reported events. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The api is: ``` GET /_synapse/admin/v1/event_reports?from=0&limit=10 ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). It returns a JSON body like the following: @@ -94,8 +95,6 @@ The api is: ``` GET /_synapse/admin/v1/event_reports/ ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). It returns a JSON body like the following: diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 2c44e1f758d8..a8cdf1972726 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -2,6 +2,9 @@ These APIs allow extracting media information from the homeserver. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## List all media in a room This API gets a list of known media in a room. @@ -11,8 +14,6 @@ The API is: ``` GET /_synapse/admin/v1/room//media ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). The API returns a JSON body like the following: ```json @@ -300,8 +301,5 @@ The following fields are returned in the JSON response body: * `deleted`: integer - The number of media items successfully deleted -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). - If the user re-requests purged remote media, synapse will re-request the media from the originating server. diff --git a/docs/admin_api/purge_history_api.md b/docs/admin_api/purge_history_api.md index 277e28d9cbcb..2527e2758ba3 100644 --- a/docs/admin_api/purge_history_api.md +++ b/docs/admin_api/purge_history_api.md @@ -10,15 +10,15 @@ paginate further back in the room from the point being purged from. Note that Synapse requires at least one message in each room, so it will never delete the last message in a room. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The API is: ``` POST /_synapse/admin/v1/purge_history/[/] ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - By default, events sent by local users are not deleted, as they may represent the only copies of this content in existence. (Events sent by remote users are deleted.) @@ -57,9 +57,6 @@ It is possible to poll for updates on recent purges with a second API; GET /_synapse/admin/v1/purge_history_status/ ``` -Again, you will need to authenticate by providing an `access_token` for a -server admin. - This API returns a JSON body like the following: ```json diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md index 548b790a5c0c..310d6ae628fa 100644 --- a/docs/admin_api/room_membership.md +++ b/docs/admin_api/room_membership.md @@ -5,6 +5,9 @@ to a room with a given `room_id_or_alias`. You can only modify the membership of local users. The server administrator must be in the room and have permission to invite users. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## Parameters The following parameters are available: @@ -23,9 +26,6 @@ POST /_synapse/admin/v1/join/ } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [Admin API](../usage/administration/admin_api). - Response: ```json diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index daf2522f6ab9..d4873f94905f 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -4,6 +4,9 @@ The List Room admin API allows server admins to get a list of rooms on their server. There are various parameters available that allow for filtering and sorting the returned list. This API supports pagination. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + **Parameters** The following query parameters are available: @@ -478,9 +481,6 @@ several minutes or longer. The local server will only have the power to move local user and room aliases to the new room. Users on other servers will be unaffected. -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see [Admin API](../usage/administration/admin_api). - ## Version 1 (old version) This version works synchronously. That means you only get the response once the server has diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md index 1901f1eea025..a26c76f9f317 100644 --- a/docs/admin_api/statistics.md +++ b/docs/admin_api/statistics.md @@ -3,15 +3,15 @@ Returns information about all local media usage of users. Gives the possibility to filter them by time and user. +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + The API is: ``` GET /_synapse/admin/v1/statistics/users/media ``` -To use it, you will need to authenticate by providing an `access_token` -for a server admin: see [Admin API](../usage/administration/admin_api). - A response body like the following is returned: ```json diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index fdc1f2d1cfd1..4f5f377b3808 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1,5 +1,8 @@ # User Admin API +To use it, you will need to authenticate by providing an `access_token` +for a server admin: see [Admin API](../usage/administration/admin_api). + ## Query User Account This API returns information about a specific user account. @@ -10,9 +13,6 @@ The api is: GET /_synapse/admin/v2/users/ ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - It returns a JSON body like the following: ```jsonc @@ -104,9 +104,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - Returns HTTP status code: - `201` - When a new user object was created. - `200` - When a user was modified. @@ -156,9 +153,6 @@ By default, the response is ordered by ascending user ID. GET /_synapse/admin/v2/users?from=0&limit=10&guests=false ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -278,9 +272,6 @@ GET /_matrix/client/r0/admin/whois/ See also: [Client Server API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - It returns a JSON body like the following: ```json @@ -335,9 +326,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - The erase parameter is optional and defaults to `false`. An empty body may be passed for backwards compatibility. @@ -394,9 +382,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - The parameter `new_password` is required. The parameter `logout_devices` is optional and defaults to `true`. @@ -409,9 +394,6 @@ The api is: GET /_synapse/admin/v1/users//admin ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -439,10 +421,6 @@ with a body of: } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - - ## List room memberships of a user Gets a list of all `room_id` that a specific `user_id` is member. @@ -453,9 +431,6 @@ The API is: GET /_synapse/admin/v1/users//joined_rooms ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -574,9 +549,6 @@ The API is: GET /_synapse/admin/v1/users//media ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -691,9 +663,6 @@ The API is: DELETE /_synapse/admin/v1/users//media ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -766,9 +735,6 @@ The API is: GET /_synapse/admin/v2/users//devices ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -834,9 +800,6 @@ POST /_synapse/admin/v2/users//delete_devices } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. **Parameters** @@ -858,9 +821,6 @@ The API is: GET /_synapse/admin/v2/users//devices/ ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -906,9 +866,6 @@ PUT /_synapse/admin/v2/users//devices/ } ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. **Parameters** @@ -935,9 +892,6 @@ DELETE /_synapse/admin/v2/users//devices/ {} ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. **Parameters** @@ -956,9 +910,6 @@ The API is: GET /_synapse/admin/v1/users//pushers ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -1053,9 +1004,6 @@ To un-shadow-ban a user the API is: DELETE /_synapse/admin/v1/users//shadow_ban ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned in both cases. **Parameters** @@ -1078,9 +1026,6 @@ The API is: GET /_synapse/admin/v1/users//override_ratelimit ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -1120,9 +1065,6 @@ The API is: POST /_synapse/admin/v1/users//override_ratelimit ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - A response body like the following is returned: ```json @@ -1165,9 +1107,6 @@ The API is: DELETE /_synapse/admin/v1/users//override_ratelimit ``` -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) - An empty JSON dict is returned. ```json @@ -1196,7 +1135,5 @@ The API is: GET /_synapse/admin/v1/username_available?username=$localpart ``` -The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API. - -To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](../usage/administration/admin_api) +The request and response format is the same as the +[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API. From 901b264c0c88f39cbfb8b2229e0dc57968882658 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 31 Jan 2022 20:20:05 +0100 Subject: [PATCH 13/19] Add type hints to `tests/rest/admin` (#11851) --- changelog.d/11851.misc | 1 + mypy.ini | 3 - tests/rest/admin/test_admin.py | 134 +++------- tests/rest/admin/test_user.py | 262 ++++++++++---------- tests/rest/admin/test_username_available.py | 16 +- 5 files changed, 184 insertions(+), 232 deletions(-) create mode 100644 changelog.d/11851.misc diff --git a/changelog.d/11851.misc b/changelog.d/11851.misc new file mode 100644 index 000000000000..ccc3ec3482b1 --- /dev/null +++ b/changelog.d/11851.misc @@ -0,0 +1 @@ +Add type hints to `tests/rest/admin`. diff --git a/mypy.ini b/mypy.ini index 85fa22d28f2b..2884078d0ad3 100644 --- a/mypy.ini +++ b/mypy.ini @@ -77,9 +77,6 @@ exclude = (?x) |tests/push/test_http.py |tests/push/test_presentable_names.py |tests/push/test_push_rule_evaluator.py - |tests/rest/admin/test_admin.py - |tests/rest/admin/test_user.py - |tests/rest/admin/test_username_available.py |tests/rest/client/test_account.py |tests/rest/client/test_events.py |tests/rest/client/test_filter.py diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 3adadcb46bc4..849d00ab4d94 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -12,18 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import urllib.parse from http import HTTPStatus -from unittest.mock import Mock +from typing import List -from twisted.internet.defer import Deferred +from parameterized import parameterized + +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.http.server import JsonResource -from synapse.logging.context import make_deferred_yieldable from synapse.rest.admin import VersionServlet from synapse.rest.client import groups, login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request @@ -33,12 +35,12 @@ class VersionTestCase(unittest.HomeserverTestCase): url = "/_synapse/admin/v1/server_version" - def create_test_resource(self): + def create_test_resource(self) -> JsonResource: resource = JsonResource(self.hs) VersionServlet(self.hs).register(resource) return resource - def test_version_string(self): + def test_version_string(self) -> None: channel = self.make_request("GET", self.url, shorthand=False) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) @@ -54,14 +56,14 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): groups.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") self.other_user_token = self.login("user", "pass") - def test_delete_group(self): + def test_delete_group(self) -> None: # Create a new group channel = self.make_request( "POST", @@ -112,7 +114,7 @@ def test_delete_group(self): self.assertNotIn(group_id, self._get_groups_user_is_in(self.admin_user_tok)) self.assertNotIn(group_id, self._get_groups_user_is_in(self.other_user_token)) - def _check_group(self, group_id, expect_code): + def _check_group(self, group_id: str, expect_code: int) -> None: """Assert that trying to fetch the given group results in the given HTTP status code """ @@ -124,7 +126,7 @@ def _check_group(self, group_id, expect_code): self.assertEqual(expect_code, channel.code, msg=channel.json_body) - def _get_groups_user_is_in(self, access_token): + def _get_groups_user_is_in(self, access_token: str) -> List[str]: """Returns the list of groups the user is in (given their access token)""" channel = self.make_request("GET", b"/joined_groups", access_token=access_token) @@ -143,59 +145,15 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Allow for uploading and downloading to/from the media repo self.media_repo = hs.get_media_repository_resource() self.download_resource = self.media_repo.children[b"download"] self.upload_resource = self.media_repo.children[b"upload"] - def make_homeserver(self, reactor, clock): - - self.fetches = [] - - async def get_file(destination, path, output_stream, args=None, max_size=None): - """ - Returns tuple[int,dict,str,int] of file length, response headers, - absolute URI, and response code. - """ - - def write_to(r): - data, response = r - output_stream.write(data) - return response - - d = Deferred() - d.addCallback(write_to) - self.fetches.append((d, destination, path, args)) - return await make_deferred_yieldable(d) - - client = Mock() - client.get_file = get_file - - self.storage_path = self.mktemp() - self.media_store_path = self.mktemp() - os.mkdir(self.storage_path) - os.mkdir(self.media_store_path) - - config = self.default_config() - config["media_store_path"] = self.media_store_path - config["thumbnail_requirements"] = {} - config["max_image_pixels"] = 2000000 - - provider_config = { - "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", - "store_local": True, - "store_synchronous": False, - "store_remote": True, - "config": {"directory": self.storage_path}, - } - config["media_storage_providers"] = [provider_config] - - hs = self.setup_test_homeserver(config=config, federation_http_client=client) - - return hs - - def _ensure_quarantined(self, admin_user_tok, server_and_media_id): + def _ensure_quarantined( + self, admin_user_tok: str, server_and_media_id: str + ) -> None: """Ensure a piece of media is quarantined when trying to access it.""" channel = make_request( self.reactor, @@ -216,12 +174,18 @@ def _ensure_quarantined(self, admin_user_tok, server_and_media_id): ), ) - def test_quarantine_media_requires_admin(self): + @parameterized.expand( + [ + # Attempt quarantine media APIs as non-admin + "/_synapse/admin/v1/media/quarantine/example.org/abcde12345", + # And the roomID/userID endpoint + "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine", + ] + ) + def test_quarantine_media_requires_admin(self, url: str) -> None: self.register_user("nonadmin", "pass", admin=False) non_admin_user_tok = self.login("nonadmin", "pass") - # Attempt quarantine media APIs as non-admin - url = "/_synapse/admin/v1/media/quarantine/example.org/abcde12345" channel = self.make_request( "POST", url.encode("ascii"), @@ -235,22 +199,7 @@ def test_quarantine_media_requires_admin(self): msg="Expected forbidden on quarantining media as a non-admin", ) - # And the roomID/userID endpoint - url = "/_synapse/admin/v1/room/!room%3Aexample.com/media/quarantine" - channel = self.make_request( - "POST", - url.encode("ascii"), - access_token=non_admin_user_tok, - ) - - # Expect a forbidden error - self.assertEqual( - HTTPStatus.FORBIDDEN, - channel.code, - msg="Expected forbidden on quarantining media as a non-admin", - ) - - def test_quarantine_media_by_id(self): + def test_quarantine_media_by_id(self) -> None: self.register_user("id_admin", "pass", admin=True) admin_user_tok = self.login("id_admin", "pass") @@ -295,7 +244,15 @@ def test_quarantine_media_by_id(self): # Attempt to access the media self._ensure_quarantined(admin_user_tok, server_name_and_media_id) - def test_quarantine_all_media_in_room(self, override_url_template=None): + @parameterized.expand( + [ + # regular API path + "/_synapse/admin/v1/room/%s/media/quarantine", + # deprecated API path + "/_synapse/admin/v1/quarantine_media/%s", + ] + ) + def test_quarantine_all_media_in_room(self, url: str) -> None: self.register_user("room_admin", "pass", admin=True) admin_user_tok = self.login("room_admin", "pass") @@ -333,16 +290,9 @@ def test_quarantine_all_media_in_room(self, override_url_template=None): tok=non_admin_user_tok, ) - # Quarantine all media in the room - if override_url_template: - url = override_url_template % urllib.parse.quote(room_id) - else: - url = "/_synapse/admin/v1/room/%s/media/quarantine" % urllib.parse.quote( - room_id - ) channel = self.make_request( "POST", - url, + url % urllib.parse.quote(room_id), access_token=admin_user_tok, ) self.pump(1.0) @@ -359,11 +309,7 @@ def test_quarantine_all_media_in_room(self, override_url_template=None): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) self._ensure_quarantined(admin_user_tok, server_and_media_id_2) - def test_quarantine_all_media_in_room_deprecated_api_path(self): - # Perform the above test with the deprecated API path - self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s") - - def test_quarantine_all_media_by_user(self): + def test_quarantine_all_media_by_user(self) -> None: self.register_user("user_admin", "pass", admin=True) admin_user_tok = self.login("user_admin", "pass") @@ -401,7 +347,7 @@ def test_quarantine_all_media_by_user(self): self._ensure_quarantined(admin_user_tok, server_and_media_id_1) self._ensure_quarantined(admin_user_tok, server_and_media_id_2) - def test_cannot_quarantine_safe_media(self): + def test_cannot_quarantine_safe_media(self) -> None: self.register_user("user_admin", "pass", admin=True) admin_user_tok = self.login("user_admin", "pass") @@ -475,7 +421,7 @@ class PurgeHistoryTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -488,7 +434,7 @@ def prepare(self, reactor, clock, hs): self.url = f"/_synapse/admin/v1/purge_history/{self.room_id}" self.url_status = "/_synapse/admin/v1/purge_history_status/" - def test_purge_history(self): + def test_purge_history(self) -> None: """ Simple test of purge history API. Test only that is is possible to call, get status HTTPStatus.OK and purge_id. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 9711405735db..272637e96575 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -23,13 +23,17 @@ from parameterized import parameterized, parameterized_class +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.constants import UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions from synapse.rest.client import devices, login, logout, profile, room, sync from synapse.rest.media.v1.filepath import MediaFilePaths +from synapse.server import HomeServer from synapse.types import JsonDict, UserID +from synapse.util import Clock from tests import unittest from tests.server import FakeSite, make_request @@ -44,7 +48,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): profile.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.url = "/_synapse/admin/v1/register" @@ -61,12 +65,12 @@ def make_homeserver(self, reactor, clock): self.hs.config.registration.registration_shared_secret = "shared" - self.hs.get_media_repository = Mock() - self.hs.get_deactivate_account_handler = Mock() + self.hs.get_media_repository = Mock() # type: ignore[assignment] + self.hs.get_deactivate_account_handler = Mock() # type: ignore[assignment] return self.hs - def test_disabled(self): + def test_disabled(self) -> None: """ If there is no shared secret, registration through this method will be prevented. @@ -80,7 +84,7 @@ def test_disabled(self): "Shared secret registration is not enabled", channel.json_body["error"] ) - def test_get_nonce(self): + def test_get_nonce(self) -> None: """ Calling GET on the endpoint will return a randomised nonce, using the homeserver's secrets provider. @@ -93,7 +97,7 @@ def test_get_nonce(self): self.assertEqual(channel.json_body, {"nonce": "abcd"}) - def test_expired_nonce(self): + def test_expired_nonce(self) -> None: """ Calling GET on the endpoint will return a randomised nonce, which will only last for SALT_TIMEOUT (60s). @@ -118,7 +122,7 @@ def test_expired_nonce(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("unrecognised nonce", channel.json_body["error"]) - def test_register_incorrect_nonce(self): + def test_register_incorrect_nonce(self) -> None: """ Only the provided nonce can be used, as it's checked in the MAC. """ @@ -141,7 +145,7 @@ def test_register_incorrect_nonce(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("HMAC incorrect", channel.json_body["error"]) - def test_register_correct_nonce(self): + def test_register_correct_nonce(self) -> None: """ When the correct nonce is provided, and the right key is provided, the user is registered. @@ -168,7 +172,7 @@ def test_register_correct_nonce(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["user_id"]) - def test_nonce_reuse(self): + def test_nonce_reuse(self) -> None: """ A valid unrecognised nonce. """ @@ -197,14 +201,14 @@ def test_nonce_reuse(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("unrecognised nonce", channel.json_body["error"]) - def test_missing_parts(self): + def test_missing_parts(self) -> None: """ Synapse will complain if you don't give nonce, username, password, and mac. Admin and user_types are optional. Additional checks are done for length and type. """ - def nonce(): + def nonce() -> str: channel = self.make_request("GET", self.url) return channel.json_body["nonce"] @@ -297,7 +301,7 @@ def nonce(): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Invalid user type", channel.json_body["error"]) - def test_displayname(self): + def test_displayname(self) -> None: """ Test that displayname of new user is set """ @@ -400,7 +404,7 @@ def test_displayname(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) - def test_register_mau_limit_reached(self): + def test_register_mau_limit_reached(self) -> None: """ Check we can register a user via the shared secret registration API even if the MAU limit is reached. @@ -450,13 +454,13 @@ class UsersListTestCase(unittest.HomeserverTestCase): ] url = "/_synapse/admin/v2/users" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to list users without authentication. """ @@ -465,7 +469,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -477,7 +481,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_all_users(self): + def test_all_users(self) -> None: """ List all users, including deactivated users. """ @@ -497,7 +501,7 @@ def test_all_users(self): # Check that all fields are available self._check_fields(channel.json_body["users"]) - def test_search_term(self): + def test_search_term(self) -> None: """Test that searching for a users works correctly""" def _search_test( @@ -505,7 +509,7 @@ def _search_test( search_term: str, search_field: Optional[str] = "name", expected_http_code: Optional[int] = HTTPStatus.OK, - ): + ) -> None: """Search for a user and check that the returned user's id is a match Args: @@ -575,7 +579,7 @@ def _search_test( _search_test(None, "foo", "user_id") _search_test(None, "bar", "user_id") - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -640,7 +644,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - def test_limit(self): + def test_limit(self) -> None: """ Testing list of users with limit """ @@ -661,7 +665,7 @@ def test_limit(self): self.assertEqual(channel.json_body["next_token"], "5") self._check_fields(channel.json_body["users"]) - def test_from(self): + def test_from(self) -> None: """ Testing list of users with a defined starting point (from) """ @@ -682,7 +686,7 @@ def test_from(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["users"]) - def test_limit_and_from(self): + def test_limit_and_from(self) -> None: """ Testing list of users with a defined starting point and limit """ @@ -703,7 +707,7 @@ def test_limit_and_from(self): self.assertEqual(len(channel.json_body["users"]), 10) self._check_fields(channel.json_body["users"]) - def test_next_token(self): + def test_next_token(self) -> None: """ Testing that `next_token` appears at the right place """ @@ -765,7 +769,7 @@ def test_next_token(self): self.assertEqual(len(channel.json_body["users"]), 1) self.assertNotIn("next_token", channel.json_body) - def test_order_by(self): + def test_order_by(self) -> None: """ Testing order list with parameter `order_by` """ @@ -843,7 +847,7 @@ def _order_test( expected_user_list: List[str], order_by: Optional[str], dir: Optional[str] = None, - ): + ) -> None: """Request the list of users in a certain order. Assert that order is what we expect Args: @@ -870,7 +874,7 @@ def _order_test( self.assertEqual(expected_user_list, returned_order) self._check_fields(channel.json_body["users"]) - def _check_fields(self, content: List[JsonDict]): + def _check_fields(self, content: List[JsonDict]) -> None: """Checks that the expected user attributes are present in content Args: content: List that is checked for content @@ -886,7 +890,7 @@ def _check_fields(self, content: List[JsonDict]): self.assertIn("avatar_url", u) self.assertIn("creation_ts", u) - def _create_users(self, number_users: int): + def _create_users(self, number_users: int) -> None: """ Create a number of users Args: @@ -908,7 +912,7 @@ class DeactivateAccountTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -931,7 +935,7 @@ def prepare(self, reactor, clock, hs): self.store.user_add_threepid("@user:test", "email", "foo@bar.com", 0, 0) ) - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to deactivate users without authentication. """ @@ -940,7 +944,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_not_admin(self): + def test_requester_is_not_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -961,7 +965,7 @@ def test_requester_is_not_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that deactivation for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -975,7 +979,7 @@ def test_user_does_not_exist(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - def test_erase_is_not_bool(self): + def test_erase_is_not_bool(self) -> None: """ If parameter `erase` is not boolean, return an error """ @@ -990,7 +994,7 @@ def test_erase_is_not_bool(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.BAD_JSON, channel.json_body["errcode"]) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that deactivation for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -1001,7 +1005,7 @@ def test_user_is_not_local(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only deactivate local users", channel.json_body["error"]) - def test_deactivate_user_erase_true(self): + def test_deactivate_user_erase_true(self) -> None: """ Test deactivating a user and set `erase` to `true` """ @@ -1046,7 +1050,7 @@ def test_deactivate_user_erase_true(self): self._is_erased("@user:test", True) - def test_deactivate_user_erase_false(self): + def test_deactivate_user_erase_false(self) -> None: """ Test deactivating a user and set `erase` to `false` """ @@ -1091,7 +1095,7 @@ def test_deactivate_user_erase_false(self): self._is_erased("@user:test", False) - def test_deactivate_user_erase_true_no_profile(self): + def test_deactivate_user_erase_true_no_profile(self) -> None: """ Test deactivating a user and set `erase` to `true` if user has no profile information (stored in the database table `profiles`). @@ -1162,7 +1166,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): sync.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.auth_handler = hs.get_auth_handler() @@ -1185,7 +1189,7 @@ def prepare(self, reactor, clock, hs): self.url_prefix = "/_synapse/admin/v2/users/%s" self.url_other_user = self.url_prefix % self.other_user - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -1210,7 +1214,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -1224,7 +1228,7 @@ def test_user_does_not_exist(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual("M_NOT_FOUND", channel.json_body["errcode"]) - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -1319,7 +1323,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_PARAM, channel.json_body["errcode"]) - def test_get_user(self): + def test_get_user(self) -> None: """ Test a simple get of a user. """ @@ -1334,7 +1338,7 @@ def test_get_user(self): self.assertEqual("User", channel.json_body["displayname"]) self._check_fields(channel.json_body) - def test_create_server_admin(self): + def test_create_server_admin(self) -> None: """ Check that a new admin user is created successfully. """ @@ -1383,7 +1387,7 @@ def test_create_server_admin(self): self.assertEqual("mxc://fibble/wibble", channel.json_body["avatar_url"]) self._check_fields(channel.json_body) - def test_create_user(self): + def test_create_user(self) -> None: """ Check that a new regular user is created successfully. """ @@ -1450,7 +1454,7 @@ def test_create_user(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) - def test_create_user_mau_limit_reached_active_admin(self): + def test_create_user_mau_limit_reached_active_admin(self) -> None: """ Check that an admin can register a new user via the admin API even if the MAU limit is reached. @@ -1496,7 +1500,7 @@ def test_create_user_mau_limit_reached_active_admin(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 2, "mau_trial_days": 0} ) - def test_create_user_mau_limit_reached_passive_admin(self): + def test_create_user_mau_limit_reached_passive_admin(self) -> None: """ Check that an admin can register a new user via the admin API even if the MAU limit is reached. @@ -1541,7 +1545,7 @@ def test_create_user_mau_limit_reached_passive_admin(self): "public_baseurl": "https://example.com", } ) - def test_create_user_email_notif_for_new_users(self): + def test_create_user_email_notif_for_new_users(self) -> None: """ Check that a new regular user is created successfully and got an email pusher. @@ -1584,7 +1588,7 @@ def test_create_user_email_notif_for_new_users(self): "public_baseurl": "https://example.com", } ) - def test_create_user_email_no_notif_for_new_users(self): + def test_create_user_email_no_notif_for_new_users(self) -> None: """ Check that a new regular user is created successfully and got not an email pusher. @@ -1615,7 +1619,7 @@ def test_create_user_email_no_notif_for_new_users(self): pushers = list(pushers) self.assertEqual(len(pushers), 0) - def test_set_password(self): + def test_set_password(self) -> None: """ Test setting a new password for another user. """ @@ -1631,7 +1635,7 @@ def test_set_password(self): self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self._check_fields(channel.json_body) - def test_set_displayname(self): + def test_set_displayname(self) -> None: """ Test setting the displayname of another user. """ @@ -1659,7 +1663,7 @@ def test_set_displayname(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual("foobar", channel.json_body["displayname"]) - def test_set_threepid(self): + def test_set_threepid(self) -> None: """ Test setting threepid for an other user. """ @@ -1740,7 +1744,7 @@ def test_set_threepid(self): self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) - def test_set_duplicate_threepid(self): + def test_set_duplicate_threepid(self) -> None: """ Test setting the same threepid for a second user. First user loses and second user gets mapping of this threepid. @@ -1827,7 +1831,7 @@ def test_set_duplicate_threepid(self): self.assertEqual(0, len(channel.json_body["threepids"])) self._check_fields(channel.json_body) - def test_set_external_id(self): + def test_set_external_id(self) -> None: """ Test setting external id for an other user. """ @@ -1925,7 +1929,7 @@ def test_set_external_id(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(0, len(channel.json_body["external_ids"])) - def test_set_duplicate_external_id(self): + def test_set_duplicate_external_id(self) -> None: """ Test that setting the same external id for a second user fails and external id from user must not be changed. @@ -2048,7 +2052,7 @@ def test_set_duplicate_external_id(self): ) self._check_fields(channel.json_body) - def test_deactivate_user(self): + def test_deactivate_user(self) -> None: """ Test deactivating another user. """ @@ -2113,7 +2117,7 @@ def test_deactivate_user(self): self.assertNotIn("password_hash", channel.json_body) @override_config({"user_directory": {"enabled": True, "search_all_users": True}}) - def test_change_name_deactivate_user_user_directory(self): + def test_change_name_deactivate_user_user_directory(self) -> None: """ Test change profile information of a deactivated user and check that it does not appear in user directory @@ -2156,7 +2160,7 @@ def test_change_name_deactivate_user_user_directory(self): profile = self.get_success(self.store.get_user_in_directory(self.other_user)) self.assertIsNone(profile) - def test_reactivate_user(self): + def test_reactivate_user(self) -> None: """ Test reactivating another user. """ @@ -2189,7 +2193,7 @@ def test_reactivate_user(self): self.assertNotIn("password_hash", channel.json_body) @override_config({"password_config": {"localdb_enabled": False}}) - def test_reactivate_user_localdb_disabled(self): + def test_reactivate_user_localdb_disabled(self) -> None: """ Test reactivating another user when using SSO. """ @@ -2223,7 +2227,7 @@ def test_reactivate_user_localdb_disabled(self): self.assertNotIn("password_hash", channel.json_body) @override_config({"password_config": {"enabled": False}}) - def test_reactivate_user_password_disabled(self): + def test_reactivate_user_password_disabled(self) -> None: """ Test reactivating another user when using SSO. """ @@ -2256,7 +2260,7 @@ def test_reactivate_user_password_disabled(self): # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", channel.json_body) - def test_set_user_as_admin(self): + def test_set_user_as_admin(self) -> None: """ Test setting the admin flag on a user. """ @@ -2284,7 +2288,7 @@ def test_set_user_as_admin(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertTrue(channel.json_body["admin"]) - def test_set_user_type(self): + def test_set_user_type(self) -> None: """ Test changing user type. """ @@ -2335,7 +2339,7 @@ def test_set_user_type(self): self.assertEqual("@user:test", channel.json_body["name"]) self.assertIsNone(channel.json_body["user_type"]) - def test_accidental_deactivation_prevention(self): + def test_accidental_deactivation_prevention(self) -> None: """ Ensure an account can't accidentally be deactivated by using a str value for the deactivated body parameter @@ -2418,7 +2422,7 @@ def _deactivate_user(self, user_id: str) -> None: # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", channel.json_body) - def _check_fields(self, content: JsonDict): + def _check_fields(self, content: JsonDict) -> None: """Checks that the expected user attributes are present in content Args: @@ -2448,7 +2452,7 @@ class UserMembershipRestTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") @@ -2457,7 +2461,7 @@ def prepare(self, reactor, clock, hs): self.other_user ) - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to list rooms of an user without authentication. """ @@ -2466,7 +2470,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -2481,7 +2485,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that a lookup for a user that does not exist returns an empty list """ @@ -2496,7 +2500,7 @@ def test_user_does_not_exist(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that a lookup for a user that is not a local and participates in no conversation returns an empty list """ @@ -2512,7 +2516,7 @@ def test_user_is_not_local(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) - def test_no_memberships(self): + def test_no_memberships(self) -> None: """ Tests that a normal lookup for rooms is successfully if user has no memberships @@ -2528,7 +2532,7 @@ def test_no_memberships(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["joined_rooms"])) - def test_get_rooms(self): + def test_get_rooms(self) -> None: """ Tests that a normal lookup for rooms is successfully """ @@ -2549,7 +2553,7 @@ def test_get_rooms(self): self.assertEqual(number_rooms, channel.json_body["total"]) self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) - def test_get_rooms_with_nonlocal_user(self): + def test_get_rooms_with_nonlocal_user(self) -> None: """ Tests that a normal lookup for rooms is successful with a non-local user """ @@ -2604,7 +2608,7 @@ class PushersRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -2615,7 +2619,7 @@ def prepare(self, reactor, clock, hs): self.other_user ) - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to list pushers of an user without authentication. """ @@ -2624,7 +2628,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_no_admin(self): + def test_requester_is_no_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -2639,7 +2643,7 @@ def test_requester_is_no_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_user_does_not_exist(self): + def test_user_does_not_exist(self) -> None: """ Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -2653,7 +2657,7 @@ def test_user_does_not_exist(self): self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -2668,7 +2672,7 @@ def test_user_is_not_local(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) - def test_get_pushers(self): + def test_get_pushers(self) -> None: """ Tests that a normal lookup for pushers is successfully """ @@ -2732,7 +2736,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.media_repo = hs.get_media_repository_resource() self.filepaths = MediaFilePaths(hs.config.media.media_store_path) @@ -2746,7 +2750,7 @@ def prepare(self, reactor, clock, hs): ) @parameterized.expand(["GET", "DELETE"]) - def test_no_auth(self, method: str): + def test_no_auth(self, method: str) -> None: """Try to list media of an user without authentication.""" channel = self.make_request(method, self.url, {}) @@ -2754,7 +2758,7 @@ def test_no_auth(self, method: str): self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) - def test_requester_is_no_admin(self, method: str): + def test_requester_is_no_admin(self, method: str) -> None: """If the user is not a server admin, an error is returned.""" other_user_token = self.login("user", "pass") @@ -2768,7 +2772,7 @@ def test_requester_is_no_admin(self, method: str): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) - def test_user_does_not_exist(self, method: str): + def test_user_does_not_exist(self, method: str) -> None: """Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND""" url = "/_synapse/admin/v1/users/@unknown_person:test/media" channel = self.make_request( @@ -2781,7 +2785,7 @@ def test_user_does_not_exist(self, method: str): self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) @parameterized.expand(["GET", "DELETE"]) - def test_user_is_not_local(self, method: str): + def test_user_is_not_local(self, method: str) -> None: """Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST""" url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/media" @@ -2794,7 +2798,7 @@ def test_user_is_not_local(self, method: str): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only look up local users", channel.json_body["error"]) - def test_limit_GET(self): + def test_limit_GET(self) -> None: """Testing list of media with limit""" number_media = 20 @@ -2813,7 +2817,7 @@ def test_limit_GET(self): self.assertEqual(channel.json_body["next_token"], 5) self._check_fields(channel.json_body["media"]) - def test_limit_DELETE(self): + def test_limit_DELETE(self) -> None: """Testing delete of media with limit""" number_media = 20 @@ -2830,7 +2834,7 @@ def test_limit_DELETE(self): self.assertEqual(channel.json_body["total"], 5) self.assertEqual(len(channel.json_body["deleted_media"]), 5) - def test_from_GET(self): + def test_from_GET(self) -> None: """Testing list of media with a defined starting point (from)""" number_media = 20 @@ -2849,7 +2853,7 @@ def test_from_GET(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["media"]) - def test_from_DELETE(self): + def test_from_DELETE(self) -> None: """Testing delete of media with a defined starting point (from)""" number_media = 20 @@ -2866,7 +2870,7 @@ def test_from_DELETE(self): self.assertEqual(channel.json_body["total"], 15) self.assertEqual(len(channel.json_body["deleted_media"]), 15) - def test_limit_and_from_GET(self): + def test_limit_and_from_GET(self) -> None: """Testing list of media with a defined starting point and limit""" number_media = 20 @@ -2885,7 +2889,7 @@ def test_limit_and_from_GET(self): self.assertEqual(len(channel.json_body["media"]), 10) self._check_fields(channel.json_body["media"]) - def test_limit_and_from_DELETE(self): + def test_limit_and_from_DELETE(self) -> None: """Testing delete of media with a defined starting point and limit""" number_media = 20 @@ -2903,7 +2907,7 @@ def test_limit_and_from_DELETE(self): self.assertEqual(len(channel.json_body["deleted_media"]), 10) @parameterized.expand(["GET", "DELETE"]) - def test_invalid_parameter(self, method: str): + def test_invalid_parameter(self, method: str) -> None: """If parameters are invalid, an error is returned.""" # unkown order_by channel = self.make_request( @@ -2945,7 +2949,7 @@ def test_invalid_parameter(self, method: str): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - def test_next_token(self): + def test_next_token(self) -> None: """ Testing that `next_token` appears at the right place @@ -3010,7 +3014,7 @@ def test_next_token(self): self.assertEqual(len(channel.json_body["media"]), 1) self.assertNotIn("next_token", channel.json_body) - def test_user_has_no_media_GET(self): + def test_user_has_no_media_GET(self) -> None: """ Tests that a normal lookup for media is successfully if user has no media created @@ -3026,7 +3030,7 @@ def test_user_has_no_media_GET(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["media"])) - def test_user_has_no_media_DELETE(self): + def test_user_has_no_media_DELETE(self) -> None: """ Tests that a delete is successful if user has no media """ @@ -3041,7 +3045,7 @@ def test_user_has_no_media_DELETE(self): self.assertEqual(0, channel.json_body["total"]) self.assertEqual(0, len(channel.json_body["deleted_media"])) - def test_get_media(self): + def test_get_media(self) -> None: """Tests that a normal lookup for media is successful""" number_media = 5 @@ -3060,7 +3064,7 @@ def test_get_media(self): self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["media"]) - def test_delete_media(self): + def test_delete_media(self) -> None: """Tests that a normal delete of media is successful""" number_media = 5 @@ -3089,7 +3093,7 @@ def test_delete_media(self): for local_path in local_paths: self.assertFalse(os.path.exists(local_path)) - def test_order_by(self): + def test_order_by(self) -> None: """ Testing order list with parameter `order_by` """ @@ -3252,7 +3256,7 @@ def _create_media_and_access( return media_id - def _check_fields(self, content: List[JsonDict]): + def _check_fields(self, content: List[JsonDict]) -> None: """Checks that the expected user attributes are present in content Args: content: List that is checked for content @@ -3272,7 +3276,7 @@ def _order_test( expected_media_list: List[str], order_by: Optional[str], dir: Optional[str] = None, - ): + ) -> None: """Request the list of media in a certain order. Assert that order is what we expect Args: @@ -3312,7 +3316,7 @@ class UserTokenRestTestCase(unittest.HomeserverTestCase): logout.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3331,14 +3335,14 @@ def _get_token(self) -> str: self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) return channel.json_body["access_token"] - def test_no_auth(self): + def test_no_auth(self) -> None: """Try to login as a user without authentication.""" channel = self.make_request("POST", self.url, b"{}") self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_not_admin(self): + def test_not_admin(self) -> None: """Try to login as a user as a non-admin user.""" channel = self.make_request( "POST", self.url, b"{}", access_token=self.other_user_tok @@ -3346,7 +3350,7 @@ def test_not_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) - def test_send_event(self): + def test_send_event(self) -> None: """Test that sending event as a user works.""" # Create a room. room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok) @@ -3360,7 +3364,7 @@ def test_send_event(self): event = self.get_success(self.store.get_event(event_id)) self.assertEqual(event.sender, self.other_user) - def test_devices(self): + def test_devices(self) -> None: """Tests that logging in as a user doesn't create a new device for them.""" # Login in as the user self._get_token() @@ -3374,7 +3378,7 @@ def test_devices(self): # We should only see the one device (from the login in `prepare`) self.assertEqual(len(channel.json_body["devices"]), 1) - def test_logout(self): + def test_logout(self) -> None: """Test that calling `/logout` with the token works.""" # Login in as the user puppet_token = self._get_token() @@ -3397,7 +3401,7 @@ def test_logout(self): ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) - def test_user_logout_all(self): + def test_user_logout_all(self) -> None: """Tests that the target user calling `/logout/all` does *not* expire the token. """ @@ -3424,7 +3428,7 @@ def test_user_logout_all(self): ) self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) - def test_admin_logout_all(self): + def test_admin_logout_all(self) -> None: """Tests that the admin user calling `/logout/all` does expire the token. """ @@ -3464,7 +3468,7 @@ def test_admin_logout_all(self): "form_secret": "123secret", } ) - def test_consent(self): + def test_consent(self) -> None: """Test that sending a message is not subject to the privacy policies.""" # Have the admin user accept the terms. self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0")) @@ -3492,7 +3496,7 @@ def test_consent(self): @override_config( {"limit_usage_by_mau": True, "max_mau_value": 1, "mau_trial_days": 0} ) - def test_mau_limit(self): + def test_mau_limit(self) -> None: # Create a room as the admin user. This will bump the monthly active users to 1. room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) @@ -3524,14 +3528,14 @@ class WhoisRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user = self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") self.other_user = self.register_user("user", "pass") - self.url = self.url_prefix % self.other_user + self.url = self.url_prefix % self.other_user # type: ignore[attr-defined] - def test_no_auth(self): + def test_no_auth(self) -> None: """ Try to get information of an user without authentication. """ @@ -3539,7 +3543,7 @@ def test_no_auth(self): self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) - def test_requester_is_not_admin(self): + def test_requester_is_not_admin(self) -> None: """ If the user is not a server admin, an error is returned. """ @@ -3554,11 +3558,11 @@ def test_requester_is_not_admin(self): self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) - def test_user_is_not_local(self): + def test_user_is_not_local(self) -> None: """ Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ - url = self.url_prefix % "@unknown_person:unknown_domain" + url = self.url_prefix % "@unknown_person:unknown_domain" # type: ignore[attr-defined] channel = self.make_request( "GET", @@ -3568,7 +3572,7 @@ def test_user_is_not_local(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual("Can only whois a local user", channel.json_body["error"]) - def test_get_whois_admin(self): + def test_get_whois_admin(self) -> None: """ The lookup should succeed for an admin. """ @@ -3581,7 +3585,7 @@ def test_get_whois_admin(self): self.assertEqual(self.other_user, channel.json_body["user_id"]) self.assertIn("devices", channel.json_body) - def test_get_whois_user(self): + def test_get_whois_user(self) -> None: """ The lookup should succeed for a normal user looking up their own information. """ @@ -3604,7 +3608,7 @@ class ShadowBanRestTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3617,7 +3621,7 @@ def prepare(self, reactor, clock, hs): ) @parameterized.expand(["POST", "DELETE"]) - def test_no_auth(self, method: str): + def test_no_auth(self, method: str) -> None: """ Try to get information of an user without authentication. """ @@ -3626,7 +3630,7 @@ def test_no_auth(self, method: str): self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["POST", "DELETE"]) - def test_requester_is_not_admin(self, method: str): + def test_requester_is_not_admin(self, method: str) -> None: """ If the user is not a server admin, an error is returned. """ @@ -3637,7 +3641,7 @@ def test_requester_is_not_admin(self, method: str): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["POST", "DELETE"]) - def test_user_is_not_local(self, method: str): + def test_user_is_not_local(self, method: str) -> None: """ Tests that shadow-banning for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -3646,7 +3650,7 @@ def test_user_is_not_local(self, method: str): channel = self.make_request(method, url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) - def test_success(self): + def test_success(self) -> None: """ Shadow-banning should succeed for an admin. """ @@ -3682,7 +3686,7 @@ class RateLimitTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) @@ -3695,7 +3699,7 @@ def prepare(self, reactor, clock, hs): ) @parameterized.expand(["GET", "POST", "DELETE"]) - def test_no_auth(self, method: str): + def test_no_auth(self, method: str) -> None: """ Try to get information of a user without authentication. """ @@ -3705,7 +3709,7 @@ def test_no_auth(self, method: str): self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "POST", "DELETE"]) - def test_requester_is_no_admin(self, method: str): + def test_requester_is_no_admin(self, method: str) -> None: """ If the user is not a server admin, an error is returned. """ @@ -3721,7 +3725,7 @@ def test_requester_is_no_admin(self, method: str): self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) @parameterized.expand(["GET", "POST", "DELETE"]) - def test_user_does_not_exist(self, method: str): + def test_user_does_not_exist(self, method: str) -> None: """ Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND """ @@ -3743,7 +3747,7 @@ def test_user_does_not_exist(self, method: str): ("DELETE", "Only local users can be ratelimited"), ] ) - def test_user_is_not_local(self, method: str, error_msg: str): + def test_user_is_not_local(self, method: str, error_msg: str) -> None: """ Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST """ @@ -3760,7 +3764,7 @@ def test_user_is_not_local(self, method: str, error_msg: str): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(error_msg, channel.json_body["error"]) - def test_invalid_parameter(self): + def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ @@ -3808,7 +3812,7 @@ def test_invalid_parameter(self): self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - def test_return_zero_when_null(self): + def test_return_zero_when_null(self) -> None: """ If values in database are `null` API should return an int `0` """ @@ -3834,7 +3838,7 @@ def test_return_zero_when_null(self): self.assertEqual(0, channel.json_body["messages_per_second"]) self.assertEqual(0, channel.json_body["burst_count"]) - def test_success(self): + def test_success(self) -> None: """ Rate-limiting (set/update/delete) should succeed for an admin. """ @@ -3908,7 +3912,7 @@ class AccountDataTestCase(unittest.HomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs) -> None: + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastore() self.admin_user = self.register_user("admin", "pass", admin=True) diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index 7978626e7197..b21f6d4689f8 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -14,9 +14,13 @@ from http import HTTPStatus +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.api.errors import Codes, SynapseError from synapse.rest.client import login +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -28,11 +32,11 @@ class UsernameAvailableTestCase(unittest.HomeserverTestCase): ] url = "/_synapse/admin/v1/username_available" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - async def check_username(username): + async def check_username(username: str) -> bool: if username == "allowed": return True raise SynapseError( @@ -44,24 +48,24 @@ async def check_username(username): handler = self.hs.get_registration_handler() handler.check_username = check_username - def test_username_available(self): + def test_username_available(self) -> None: """ The endpoint should return a HTTPStatus.OK response if the username does not exist """ url = "%s?username=%s" % (self.url, "allowed") - channel = self.make_request("GET", url, None, self.admin_user_tok) + channel = self.make_request("GET", url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["available"]) - def test_username_unavailable(self): + def test_username_unavailable(self) -> None: """ The endpoint should return a HTTPStatus.OK response if the username does not exist """ url = "%s?username=%s" % (self.url, "disallowed") - channel = self.make_request("GET", url, None, self.admin_user_tok) + channel = self.make_request("GET", url, access_token=self.admin_user_tok) self.assertEqual( HTTPStatus.BAD_REQUEST, From a35e9db9be8666d9ce1164bec3f30b7623ecdfb5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Feb 2022 11:04:17 +0000 Subject: [PATCH 14/19] 1.52.0rc1 --- CHANGES.md | 61 +++++++++++++++++++++++++++++++++++++++ changelog.d/11612.bugfix | 1 - changelog.d/11621.feature | 1 - changelog.d/11639.feature | 1 - changelog.d/11658.feature | 1 - changelog.d/11683.removal | 1 - changelog.d/11743.feature | 1 - changelog.d/11767.bugfix | 1 - changelog.d/11784.bugfix | 1 - changelog.d/11788.feature | 1 - changelog.d/11789.feature | 1 - changelog.d/11790.feature | 1 - changelog.d/11792.misc | 1 - changelog.d/11793.misc | 1 - changelog.d/11794.misc | 1 - changelog.d/11795.misc | 1 - changelog.d/11798.bugfix | 1 - changelog.d/11799.misc | 1 - changelog.d/11810.misc | 1 - changelog.d/11811.misc | 1 - changelog.d/11813.misc | 1 - changelog.d/11815.misc | 1 - changelog.d/11816.misc | 1 - changelog.d/11817.misc | 1 - changelog.d/11820.doc | 1 - changelog.d/11821.doc | 1 - changelog.d/11823.misc | 1 - changelog.d/11827.bugfix | 1 - changelog.d/11830.misc | 1 - changelog.d/11834.misc | 1 - changelog.d/11836.misc | 1 - changelog.d/11838.misc | 1 - changelog.d/11843.removal | 1 - changelog.d/11846.feature | 1 - changelog.d/11847.misc | 1 - changelog.d/11851.misc | 1 - changelog.d/11860.doc | 1 - changelog.d/11861.doc | 1 - debian/changelog | 6 ++++ synapse/__init__.py | 2 +- 40 files changed, 68 insertions(+), 38 deletions(-) delete mode 100644 changelog.d/11612.bugfix delete mode 100644 changelog.d/11621.feature delete mode 100644 changelog.d/11639.feature delete mode 100644 changelog.d/11658.feature delete mode 100644 changelog.d/11683.removal delete mode 100644 changelog.d/11743.feature delete mode 100644 changelog.d/11767.bugfix delete mode 100644 changelog.d/11784.bugfix delete mode 100644 changelog.d/11788.feature delete mode 100644 changelog.d/11789.feature delete mode 100644 changelog.d/11790.feature delete mode 100644 changelog.d/11792.misc delete mode 100644 changelog.d/11793.misc delete mode 100644 changelog.d/11794.misc delete mode 100644 changelog.d/11795.misc delete mode 100644 changelog.d/11798.bugfix delete mode 100644 changelog.d/11799.misc delete mode 100644 changelog.d/11810.misc delete mode 100644 changelog.d/11811.misc delete mode 100644 changelog.d/11813.misc delete mode 100644 changelog.d/11815.misc delete mode 100644 changelog.d/11816.misc delete mode 100644 changelog.d/11817.misc delete mode 100644 changelog.d/11820.doc delete mode 100644 changelog.d/11821.doc delete mode 100644 changelog.d/11823.misc delete mode 100644 changelog.d/11827.bugfix delete mode 100644 changelog.d/11830.misc delete mode 100644 changelog.d/11834.misc delete mode 100644 changelog.d/11836.misc delete mode 100644 changelog.d/11838.misc delete mode 100644 changelog.d/11843.removal delete mode 100644 changelog.d/11846.feature delete mode 100644 changelog.d/11847.misc delete mode 100644 changelog.d/11851.misc delete mode 100644 changelog.d/11860.doc delete mode 100644 changelog.d/11861.doc diff --git a/CHANGES.md b/CHANGES.md index 37b9e6bb9613..6a8ce170dc54 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +Synapse 1.52.0rc1 (2022-02-01) +============================== + +Features +-------- + +- Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) +- Add admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) +- Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) +- Add a config flag to inhibit M_USER_IN_USE during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) +- Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) +- Allow configuring a maximum file size as well as a list of allowed content types for avatars. ([\#11846](https://github.com/matrix-org/synapse/issues/11846)) + + +Bugfixes +-------- + +- Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). ([\#11612](https://github.com/matrix-org/synapse/issues/11612)) +- Fix a long-standing bug when previewing Reddit URLs which do not contain an image. ([\#11767](https://github.com/matrix-org/synapse/issues/11767)) +- Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. ([\#11784](https://github.com/matrix-org/synapse/issues/11784)) +- Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. ([\#11798](https://github.com/matrix-org/synapse/issues/11798)) +- Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. ([\#11827](https://github.com/matrix-org/synapse/issues/11827)) + + +Improved Documentation +---------------------- + +- Update pypi installation docs to indicate that we now support Python 3.10. ([\#11820](https://github.com/matrix-org/synapse/issues/11820)) +- Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. ([\#11821](https://github.com/matrix-org/synapse/issues/11821)) +- Remove not needed old table of contents in documentation. ([\#11860](https://github.com/matrix-org/synapse/issues/11860)) +- Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. ([\#11861](https://github.com/matrix-org/synapse/issues/11861)) + + +Deprecations and Removals +------------------------- + +- Drop support for Python 3.6, which is EOL. ([\#11683](https://github.com/matrix-org/synapse/issues/11683)) +- Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. ([\#11843](https://github.com/matrix-org/synapse/issues/11843)) + + +Internal Changes +---------------- + +- Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. ([\#11792](https://github.com/matrix-org/synapse/issues/11792)) +- Add `FrozenEvent.get_state_key` and use it in a couple of places. ([\#11793](https://github.com/matrix-org/synapse/issues/11793)) +- Preparation for database schema simplifications: stop reading from `event_reference_hashes`. ([\#11794](https://github.com/matrix-org/synapse/issues/11794)) +- Drop unused table `public_room_list_stream`. ([\#11795](https://github.com/matrix-org/synapse/issues/11795)) +- Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. ([\#11799](https://github.com/matrix-org/synapse/issues/11799), [\#11847](https://github.com/matrix-org/synapse/issues/11847)) +- Docker: skip the initial amd64-only build and go straight to multiarch. ([\#11810](https://github.com/matrix-org/synapse/issues/11810)) +- Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811)) +- Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813)) +- Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815)) +- Drop support for Python 3.6, which is EOL. ([\#11816](https://github.com/matrix-org/synapse/issues/11816)) +- Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830)) +- Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823)) +- Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834)) +- Minor performance improvement in room state lookup. ([\#11836](https://github.com/matrix-org/synapse/issues/11836)) +- Fix some indentation inconsistencies in the sample config. ([\#11838](https://github.com/matrix-org/synapse/issues/11838)) +- Add type hints to `tests/rest/admin`. ([\#11851](https://github.com/matrix-org/synapse/issues/11851)) + + Synapse 1.51.0 (2022-01-25) =========================== diff --git a/changelog.d/11612.bugfix b/changelog.d/11612.bugfix deleted file mode 100644 index 842f6892fda3..000000000000 --- a/changelog.d/11612.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the bundled aggregations in the `/sync` response, per [MSC2675](https://github.com/matrix-org/matrix-doc/pull/2675). diff --git a/changelog.d/11621.feature b/changelog.d/11621.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11621.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11639.feature b/changelog.d/11639.feature deleted file mode 100644 index e9f6704f7a1f..000000000000 --- a/changelog.d/11639.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin API to reset connection timeouts for remote server. \ No newline at end of file diff --git a/changelog.d/11658.feature b/changelog.d/11658.feature deleted file mode 100644 index 2ec9fb5eecf2..000000000000 --- a/changelog.d/11658.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API to get a list of rooms that federate with a given remote homeserver. \ No newline at end of file diff --git a/changelog.d/11683.removal b/changelog.d/11683.removal deleted file mode 100644 index b1f048f7f52d..000000000000 --- a/changelog.d/11683.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for Python 3.6, which is EOL. \ No newline at end of file diff --git a/changelog.d/11743.feature b/changelog.d/11743.feature deleted file mode 100644 index 9809f48b9688..000000000000 --- a/changelog.d/11743.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config flag to inhibit M_USER_IN_USE during registration. diff --git a/changelog.d/11767.bugfix b/changelog.d/11767.bugfix deleted file mode 100644 index 3e344747f4a2..000000000000 --- a/changelog.d/11767.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug when previewing Reddit URLs which do not contain an image. diff --git a/changelog.d/11784.bugfix b/changelog.d/11784.bugfix deleted file mode 100644 index 6569a8c2997a..000000000000 --- a/changelog.d/11784.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug that media streams could cause long-lived connections when generating URL previews. diff --git a/changelog.d/11788.feature b/changelog.d/11788.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11788.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11789.feature b/changelog.d/11789.feature deleted file mode 100644 index dc426fb658ac..000000000000 --- a/changelog.d/11789.feature +++ /dev/null @@ -1 +0,0 @@ -Remove account data (including client config, push rules and ignored users) upon user deactivation. \ No newline at end of file diff --git a/changelog.d/11790.feature b/changelog.d/11790.feature deleted file mode 100644 index 4a5cc8ec37db..000000000000 --- a/changelog.d/11790.feature +++ /dev/null @@ -1 +0,0 @@ -Add a module callback to set username at registration. diff --git a/changelog.d/11792.misc b/changelog.d/11792.misc deleted file mode 100644 index 6aa1cd61c354..000000000000 --- a/changelog.d/11792.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for database schema simplifications: add `state_key` and `rejection_reason` columns to `events` table. diff --git a/changelog.d/11793.misc b/changelog.d/11793.misc deleted file mode 100644 index fc0530bf2cb6..000000000000 --- a/changelog.d/11793.misc +++ /dev/null @@ -1 +0,0 @@ -Add `FrozenEvent.get_state_key` and use it in a couple of places. diff --git a/changelog.d/11794.misc b/changelog.d/11794.misc deleted file mode 100644 index 29826bc0e5ab..000000000000 --- a/changelog.d/11794.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for database schema simplifications: stop reading from `event_reference_hashes`. diff --git a/changelog.d/11795.misc b/changelog.d/11795.misc deleted file mode 100644 index aeba317670e9..000000000000 --- a/changelog.d/11795.misc +++ /dev/null @@ -1 +0,0 @@ -Drop unused table `public_room_list_stream`. diff --git a/changelog.d/11798.bugfix b/changelog.d/11798.bugfix deleted file mode 100644 index 2651fd11cff0..000000000000 --- a/changelog.d/11798.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include a `prev_content` field in state events sent to Application Services. Contributed by @totallynotvaishnav. \ No newline at end of file diff --git a/changelog.d/11799.misc b/changelog.d/11799.misc deleted file mode 100644 index 5c3b2bcaf4e5..000000000000 --- a/changelog.d/11799.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/changelog.d/11810.misc b/changelog.d/11810.misc deleted file mode 100644 index 5579b85979b1..000000000000 --- a/changelog.d/11810.misc +++ /dev/null @@ -1 +0,0 @@ -Docker: skip the initial amd64-only build and go straight to multiarch. diff --git a/changelog.d/11811.misc b/changelog.d/11811.misc deleted file mode 100644 index b911a2d042d2..000000000000 --- a/changelog.d/11811.misc +++ /dev/null @@ -1 +0,0 @@ -Run Complement on the Github Actions VM and not inside a Docker container. \ No newline at end of file diff --git a/changelog.d/11813.misc b/changelog.d/11813.misc deleted file mode 100644 index f90d183b45cb..000000000000 --- a/changelog.d/11813.misc +++ /dev/null @@ -1 +0,0 @@ -Log module names at startup. diff --git a/changelog.d/11815.misc b/changelog.d/11815.misc deleted file mode 100644 index 83aa6d6eb046..000000000000 --- a/changelog.d/11815.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type safety of bundled aggregations code. diff --git a/changelog.d/11816.misc b/changelog.d/11816.misc deleted file mode 100644 index b1f048f7f52d..000000000000 --- a/changelog.d/11816.misc +++ /dev/null @@ -1 +0,0 @@ -Drop support for Python 3.6, which is EOL. \ No newline at end of file diff --git a/changelog.d/11817.misc b/changelog.d/11817.misc deleted file mode 100644 index 3d6b2ea4d485..000000000000 --- a/changelog.d/11817.misc +++ /dev/null @@ -1 +0,0 @@ -Correct a type annotation in the event validation logic. diff --git a/changelog.d/11820.doc b/changelog.d/11820.doc deleted file mode 100644 index 4f563b9b569b..000000000000 --- a/changelog.d/11820.doc +++ /dev/null @@ -1 +0,0 @@ -Update pypi installation docs to indicate that we now support Python 3.10. diff --git a/changelog.d/11821.doc b/changelog.d/11821.doc deleted file mode 100644 index a16a6ef956e2..000000000000 --- a/changelog.d/11821.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing steps to the contribution submission process in the documentation. Contributed by @sequentialread. diff --git a/changelog.d/11823.misc b/changelog.d/11823.misc deleted file mode 100644 index 2d153eae4ade..000000000000 --- a/changelog.d/11823.misc +++ /dev/null @@ -1 +0,0 @@ -Minor updates and documentation for database schema delta files. diff --git a/changelog.d/11827.bugfix b/changelog.d/11827.bugfix deleted file mode 100644 index 30222dfb6269..000000000000 --- a/changelog.d/11827.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 0.33.3 causing requests to sometimes log strings such as `HTTPStatus.OK` instead of integer status codes. \ No newline at end of file diff --git a/changelog.d/11830.misc b/changelog.d/11830.misc deleted file mode 100644 index fe248d00ab94..000000000000 --- a/changelog.d/11830.misc +++ /dev/null @@ -1 +0,0 @@ -Correct a type annotation in the event validation logic. \ No newline at end of file diff --git a/changelog.d/11834.misc b/changelog.d/11834.misc deleted file mode 100644 index 29a5635f7a1e..000000000000 --- a/changelog.d/11834.misc +++ /dev/null @@ -1 +0,0 @@ -Workaround a type annotation problem in `prometheus_client` 0.13.0. \ No newline at end of file diff --git a/changelog.d/11836.misc b/changelog.d/11836.misc deleted file mode 100644 index be7e331c639c..000000000000 --- a/changelog.d/11836.misc +++ /dev/null @@ -1 +0,0 @@ -Minor performance improvement in room state lookup. diff --git a/changelog.d/11838.misc b/changelog.d/11838.misc deleted file mode 100644 index 4747bbe88b0d..000000000000 --- a/changelog.d/11838.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some indentation inconsistencies in the sample config. \ No newline at end of file diff --git a/changelog.d/11843.removal b/changelog.d/11843.removal deleted file mode 100644 index d6d49b4ad50b..000000000000 --- a/changelog.d/11843.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the `experimental_msc1849_support_enabled` flag as the features are now stable. diff --git a/changelog.d/11846.feature b/changelog.d/11846.feature deleted file mode 100644 index fcf6affdb53d..000000000000 --- a/changelog.d/11846.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring a maximum file size as well as a list of allowed content types for avatars. diff --git a/changelog.d/11847.misc b/changelog.d/11847.misc deleted file mode 100644 index 5c3b2bcaf4e5..000000000000 --- a/changelog.d/11847.misc +++ /dev/null @@ -1 +0,0 @@ -Preparation for reducing Postgres serialization errors: allow setting transaction isolation level. Contributed by Nick @ Beeper. diff --git a/changelog.d/11851.misc b/changelog.d/11851.misc deleted file mode 100644 index ccc3ec3482b1..000000000000 --- a/changelog.d/11851.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `tests/rest/admin`. diff --git a/changelog.d/11860.doc b/changelog.d/11860.doc deleted file mode 100644 index 04b88c5f2c30..000000000000 --- a/changelog.d/11860.doc +++ /dev/null @@ -1 +0,0 @@ -Remove not needed old table of contents in documentation. diff --git a/changelog.d/11861.doc b/changelog.d/11861.doc deleted file mode 100644 index 53c75a088378..000000000000 --- a/changelog.d/11861.doc +++ /dev/null @@ -1 +0,0 @@ -Consolidate the `access_token` information at the top of each relevant page in the Admin API documentation. diff --git a/debian/changelog b/debian/changelog index 3a598c4148c9..a45888565554 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.52.0~rc1) stable; urgency=medium + + * New synapse release 1.52.0~rc1. + + -- Synapse Packaging team Tue, 01 Feb 2022 11:04:09 +0000 + matrix-synapse-py3 (1.51.0) stable; urgency=medium * New synapse release 1.51.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 603dbb27e150..5e6503306101 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.51.0" +__version__ = "1.52.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From b7282fe7d17ec767a30725dcb1f66107ef0bd838 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 1 Feb 2022 11:07:12 +0000 Subject: [PATCH 15/19] Don't mention 3.6 EOL under misc It's already under deps & removals --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6a8ce170dc54..36707db03bf7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Features -------- - Remove account data (including client config, push rules and ignored users) upon user deactivation. ([\#11621](https://github.com/matrix-org/synapse/issues/11621), [\#11788](https://github.com/matrix-org/synapse/issues/11788), [\#11789](https://github.com/matrix-org/synapse/issues/11789)) -- Add admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) +- Add an admin API to reset connection timeouts for remote server. ([\#11639](https://github.com/matrix-org/synapse/issues/11639)) - Add an admin API to get a list of rooms that federate with a given remote homeserver. ([\#11658](https://github.com/matrix-org/synapse/issues/11658)) - Add a config flag to inhibit M_USER_IN_USE during registration. ([\#11743](https://github.com/matrix-org/synapse/issues/11743)) - Add a module callback to set username at registration. ([\#11790](https://github.com/matrix-org/synapse/issues/11790)) @@ -50,7 +50,6 @@ Internal Changes - Run Complement on the Github Actions VM and not inside a Docker container. ([\#11811](https://github.com/matrix-org/synapse/issues/11811)) - Log module names at startup. ([\#11813](https://github.com/matrix-org/synapse/issues/11813)) - Improve type safety of bundled aggregations code. ([\#11815](https://github.com/matrix-org/synapse/issues/11815)) -- Drop support for Python 3.6, which is EOL. ([\#11816](https://github.com/matrix-org/synapse/issues/11816)) - Correct a type annotation in the event validation logic. ([\#11817](https://github.com/matrix-org/synapse/issues/11817), [\#11830](https://github.com/matrix-org/synapse/issues/11830)) - Minor updates and documentation for database schema delta files. ([\#11823](https://github.com/matrix-org/synapse/issues/11823)) - Workaround a type annotation problem in `prometheus_client` 0.13.0. ([\#11834](https://github.com/matrix-org/synapse/issues/11834)) From 64ec45fc1b0856dc7daacca7d3ab75d50bd89f84 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 1 Feb 2022 14:13:38 +0000 Subject: [PATCH 16/19] Send to-device messages to application services (#11215) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/11215.feature | 1 + synapse/appservice/__init__.py | 3 + synapse/appservice/api.py | 29 +- synapse/appservice/scheduler.py | 97 ++++-- synapse/config/experimental.py | 7 + synapse/handlers/appservice.py | 136 +++++++-- synapse/handlers/sync.py | 4 +- synapse/notifier.py | 4 +- synapse/storage/databases/main/appservice.py | 24 +- synapse/storage/databases/main/deviceinbox.py | 276 ++++++++++++++--- ...9_add_device_id_appservice_stream_type.sql | 21 ++ tests/appservice/test_scheduler.py | 109 ++++--- tests/handlers/test_appservice.py | 281 +++++++++++++++++- tests/storage/test_appservice.py | 26 +- 14 files changed, 856 insertions(+), 162 deletions(-) create mode 100644 changelog.d/11215.feature create mode 100644 synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql diff --git a/changelog.d/11215.feature b/changelog.d/11215.feature new file mode 100644 index 000000000000..468020834b3d --- /dev/null +++ b/changelog.d/11215.feature @@ -0,0 +1 @@ +Add experimental support for sending to-device messages to application services, as specified by [MSC2409](https://github.com/matrix-org/matrix-doc/pull/2409). Disabled by default. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 8c9ff93b2c13..7dbebd97b5d3 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -351,11 +351,13 @@ def __init__( id: int, events: List[EventBase], ephemeral: List[JsonDict], + to_device_messages: List[JsonDict], ): self.service = service self.id = id self.events = events self.ephemeral = ephemeral + self.to_device_messages = to_device_messages async def send(self, as_api: "ApplicationServiceApi") -> bool: """Sends this transaction using the provided AS API interface. @@ -369,6 +371,7 @@ async def send(self, as_api: "ApplicationServiceApi") -> bool: service=self.service, events=self.events, ephemeral=self.ephemeral, + to_device_messages=self.to_device_messages, txn_id=self.id, ) diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index def4424af0ee..73be7ff3d4a1 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -218,8 +218,23 @@ async def push_bulk( service: "ApplicationService", events: List[EventBase], ephemeral: List[JsonDict], + to_device_messages: List[JsonDict], txn_id: Optional[int] = None, ) -> bool: + """ + Push data to an application service. + + Args: + service: The application service to send to. + events: The persistent events to send. + ephemeral: The ephemeral events to send. + to_device_messages: The to-device messages to send. + txn_id: An unique ID to assign to this transaction. Application services should + deduplicate transactions received with identitical IDs. + + Returns: + True if the task succeeded, False if it failed. + """ if service.url is None: return True @@ -237,13 +252,15 @@ async def push_bulk( uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id))) # Never send ephemeral events to appservices that do not support it + body: Dict[str, List[JsonDict]] = {"events": serialized_events} if service.supports_ephemeral: - body = { - "events": serialized_events, - "de.sorunome.msc2409.ephemeral": ephemeral, - } - else: - body = {"events": serialized_events} + body.update( + { + # TODO: Update to stable prefixes once MSC2409 completes FCP merge. + "de.sorunome.msc2409.ephemeral": ephemeral, + "de.sorunome.msc2409.to_device": to_device_messages, + } + ) try: await self.put_json( diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 185e3a527815..c42fa32fff32 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -48,7 +48,16 @@ components. """ import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Set +from typing import ( + TYPE_CHECKING, + Awaitable, + Callable, + Collection, + Dict, + List, + Optional, + Set, +) from synapse.appservice import ApplicationService, ApplicationServiceState from synapse.appservice.api import ApplicationServiceApi @@ -71,6 +80,9 @@ # Maximum number of ephemeral events to provide in an AS transaction. MAX_EPHEMERAL_EVENTS_PER_TRANSACTION = 100 +# Maximum number of to-device messages to provide in an AS transaction. +MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION = 100 + class ApplicationServiceScheduler: """Public facing API for this module. Does the required DI to tie the @@ -97,15 +109,40 @@ async def start(self) -> None: for service in services: self.txn_ctrl.start_recoverer(service) - def submit_event_for_as( - self, service: ApplicationService, event: EventBase + def enqueue_for_appservice( + self, + appservice: ApplicationService, + events: Optional[Collection[EventBase]] = None, + ephemeral: Optional[Collection[JsonDict]] = None, + to_device_messages: Optional[Collection[JsonDict]] = None, ) -> None: - self.queuer.enqueue_event(service, event) + """ + Enqueue some data to be sent off to an application service. - def submit_ephemeral_events_for_as( - self, service: ApplicationService, events: List[JsonDict] - ) -> None: - self.queuer.enqueue_ephemeral(service, events) + Args: + appservice: The application service to create and send a transaction to. + events: The persistent room events to send. + ephemeral: The ephemeral events to send. + to_device_messages: The to-device messages to send. These differ from normal + to-device messages sent to clients, as they have 'to_device_id' and + 'to_user_id' fields. + """ + # We purposefully allow this method to run with empty events/ephemeral + # collections, so that callers do not need to check iterable size themselves. + if not events and not ephemeral and not to_device_messages: + return + + if events: + self.queuer.queued_events.setdefault(appservice.id, []).extend(events) + if ephemeral: + self.queuer.queued_ephemeral.setdefault(appservice.id, []).extend(ephemeral) + if to_device_messages: + self.queuer.queued_to_device_messages.setdefault(appservice.id, []).extend( + to_device_messages + ) + + # Kick off a new application service transaction + self.queuer.start_background_request(appservice) class _ServiceQueuer: @@ -121,13 +158,15 @@ def __init__(self, txn_ctrl: "_TransactionController", clock: Clock): self.queued_events: Dict[str, List[EventBase]] = {} # dict of {service_id: [events]} self.queued_ephemeral: Dict[str, List[JsonDict]] = {} + # dict of {service_id: [to_device_message_json]} + self.queued_to_device_messages: Dict[str, List[JsonDict]] = {} # the appservices which currently have a transaction in flight self.requests_in_flight: Set[str] = set() self.txn_ctrl = txn_ctrl self.clock = clock - def _start_background_request(self, service: ApplicationService) -> None: + def start_background_request(self, service: ApplicationService) -> None: # start a sender for this appservice if we don't already have one if service.id in self.requests_in_flight: return @@ -136,16 +175,6 @@ def _start_background_request(self, service: ApplicationService) -> None: "as-sender-%s" % (service.id,), self._send_request, service ) - def enqueue_event(self, service: ApplicationService, event: EventBase) -> None: - self.queued_events.setdefault(service.id, []).append(event) - self._start_background_request(service) - - def enqueue_ephemeral( - self, service: ApplicationService, events: List[JsonDict] - ) -> None: - self.queued_ephemeral.setdefault(service.id, []).extend(events) - self._start_background_request(service) - async def _send_request(self, service: ApplicationService) -> None: # sanity-check: we shouldn't get here if this service already has a sender # running. @@ -162,11 +191,21 @@ async def _send_request(self, service: ApplicationService) -> None: ephemeral = all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] del all_events_ephemeral[:MAX_EPHEMERAL_EVENTS_PER_TRANSACTION] - if not events and not ephemeral: + all_to_device_messages = self.queued_to_device_messages.get( + service.id, [] + ) + to_device_messages_to_send = all_to_device_messages[ + :MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION + ] + del all_to_device_messages[:MAX_TO_DEVICE_MESSAGES_PER_TRANSACTION] + + if not events and not ephemeral and not to_device_messages_to_send: return try: - await self.txn_ctrl.send(service, events, ephemeral) + await self.txn_ctrl.send( + service, events, ephemeral, to_device_messages_to_send + ) except Exception: logger.exception("AS request failed") finally: @@ -198,10 +237,24 @@ async def send( service: ApplicationService, events: List[EventBase], ephemeral: Optional[List[JsonDict]] = None, + to_device_messages: Optional[List[JsonDict]] = None, ) -> None: + """ + Create a transaction with the given data and send to the provided + application service. + + Args: + service: The application service to send the transaction to. + events: The persistent events to include in the transaction. + ephemeral: The ephemeral events to include in the transaction. + to_device_messages: The to-device messages to include in the transaction. + """ try: txn = await self.store.create_appservice_txn( - service=service, events=events, ephemeral=ephemeral or [] + service=service, + events=events, + ephemeral=ephemeral or [], + to_device_messages=to_device_messages or [], ) service_is_up = await self._is_service_up(service) if service_is_up: diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 65c807a19af7..e4719d19b857 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -52,3 +52,10 @@ def read_config(self, config: JsonDict, **kwargs): self.msc3202_device_masquerading_enabled: bool = experimental.get( "msc3202_device_masquerading", False ) + + # MSC2409 (this setting only relates to optionally sending to-device messages). + # Presence, typing and read receipt EDUs are already sent to application services that + # have opted in to receive them. If enabled, this adds to-device messages to that list. + self.msc2409_to_device_messages_enabled: bool = experimental.get( + "msc2409_to_device_messages_enabled", False + ) diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 7833e77e2b6b..0fb919acf672 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -55,6 +55,9 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.notify_appservices = hs.config.appservice.notify_appservices self.event_sources = hs.get_event_sources() + self._msc2409_to_device_messages_enabled = ( + hs.config.experimental.msc2409_to_device_messages_enabled + ) self.current_max = 0 self.is_processing = False @@ -132,7 +135,9 @@ async def start_scheduler() -> None: # Fork off pushes to these services for service in services: - self.scheduler.submit_event_for_as(service, event) + self.scheduler.enqueue_for_appservice( + service, events=[event] + ) now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) @@ -199,8 +204,9 @@ def notify_interested_services_ephemeral( Args: stream_key: The stream the event came from. - `stream_key` can be "typing_key", "receipt_key" or "presence_key". Any other - value for `stream_key` will cause this function to return early. + `stream_key` can be "typing_key", "receipt_key", "presence_key" or + "to_device_key". Any other value for `stream_key` will cause this function + to return early. Ephemeral events will only be pushed to appservices that have opted into receiving them by setting `push_ephemeral` to true in their registration @@ -216,8 +222,15 @@ def notify_interested_services_ephemeral( if not self.notify_appservices: return - # Ignore any unsupported streams - if stream_key not in ("typing_key", "receipt_key", "presence_key"): + # Notify appservices of updates in ephemeral event streams. + # Only the following streams are currently supported. + # FIXME: We should use constants for these values. + if stream_key not in ( + "typing_key", + "receipt_key", + "presence_key", + "to_device_key", + ): return # Assert that new_token is an integer (and not a RoomStreamToken). @@ -233,6 +246,13 @@ def notify_interested_services_ephemeral( # Additional context: https://github.com/matrix-org/synapse/pull/11137 assert isinstance(new_token, int) + # Ignore to-device messages if the feature flag is not enabled + if ( + stream_key == "to_device_key" + and not self._msc2409_to_device_messages_enabled + ): + return + # Check whether there are any appservices which have registered to receive # ephemeral events. # @@ -266,7 +286,7 @@ async def _notify_interested_services_ephemeral( with Measure(self.clock, "notify_interested_services_ephemeral"): for service in services: if stream_key == "typing_key": - # Note that we don't persist the token (via set_type_stream_id_for_appservice) + # Note that we don't persist the token (via set_appservice_stream_type_pos) # for typing_key due to performance reasons and due to their highly # ephemeral nature. # @@ -274,7 +294,7 @@ async def _notify_interested_services_ephemeral( # and, if they apply to this application service, send it off. events = await self._handle_typing(service, new_token) if events: - self.scheduler.submit_ephemeral_events_for_as(service, events) + self.scheduler.enqueue_for_appservice(service, ephemeral=events) continue # Since we read/update the stream position for this AS/stream @@ -285,28 +305,37 @@ async def _notify_interested_services_ephemeral( ): if stream_key == "receipt_key": events = await self._handle_receipts(service, new_token) - if events: - self.scheduler.submit_ephemeral_events_for_as( - service, events - ) + self.scheduler.enqueue_for_appservice(service, ephemeral=events) # Persist the latest handled stream token for this appservice - await self.store.set_type_stream_id_for_appservice( + await self.store.set_appservice_stream_type_pos( service, "read_receipt", new_token ) elif stream_key == "presence_key": events = await self._handle_presence(service, users, new_token) - if events: - self.scheduler.submit_ephemeral_events_for_as( - service, events - ) + self.scheduler.enqueue_for_appservice(service, ephemeral=events) # Persist the latest handled stream token for this appservice - await self.store.set_type_stream_id_for_appservice( + await self.store.set_appservice_stream_type_pos( service, "presence", new_token ) + elif stream_key == "to_device_key": + # Retrieve a list of to-device message events, as well as the + # maximum stream token of the messages we were able to retrieve. + to_device_messages = await self._get_to_device_messages( + service, new_token, users + ) + self.scheduler.enqueue_for_appservice( + service, to_device_messages=to_device_messages + ) + + # Persist the latest handled stream token for this appservice + await self.store.set_appservice_stream_type_pos( + service, "to_device", new_token + ) + async def _handle_typing( self, service: ApplicationService, new_token: int ) -> List[JsonDict]: @@ -440,6 +469,79 @@ async def _handle_presence( return events + async def _get_to_device_messages( + self, + service: ApplicationService, + new_token: int, + users: Collection[Union[str, UserID]], + ) -> List[JsonDict]: + """ + Given an application service, determine which events it should receive + from those between the last-recorded to-device message stream token for this + appservice and the given stream token. + + Args: + service: The application service to check for which events it should receive. + new_token: The latest to-device event stream token. + users: The users to be notified for the new to-device messages + (ie, the recipients of the messages). + + Returns: + A list of JSON dictionaries containing data derived from the to-device events + that should be sent to the given application service. + """ + # Get the stream token that this application service has processed up until + from_key = await self.store.get_type_stream_id_for_appservice( + service, "to_device" + ) + + # Filter out users that this appservice is not interested in + users_appservice_is_interested_in: List[str] = [] + for user in users: + # FIXME: We should do this farther up the call stack. We currently repeat + # this operation in _handle_presence. + if isinstance(user, UserID): + user = user.to_string() + + if service.is_interested_in_user(user): + users_appservice_is_interested_in.append(user) + + if not users_appservice_is_interested_in: + # Return early if the AS was not interested in any of these users + return [] + + # Retrieve the to-device messages for each user + recipient_device_to_messages = await self.store.get_messages_for_user_devices( + users_appservice_is_interested_in, + from_key, + new_token, + ) + + # According to MSC2409, we'll need to add 'to_user_id' and 'to_device_id' fields + # to the event JSON so that the application service will know which user/device + # combination this messages was intended for. + # + # So we mangle this dict into a flat list of to-device messages with the relevant + # user ID and device ID embedded inside each message dict. + message_payload: List[JsonDict] = [] + for ( + user_id, + device_id, + ), messages in recipient_device_to_messages.items(): + for message_json in messages: + # Remove 'message_id' from the to-device message, as it's an internal ID + message_json.pop("message_id", None) + + message_payload.append( + { + "to_user_id": user_id, + "to_device_id": device_id, + **message_json, + } + ) + + return message_payload + async def query_user_exists(self, user_id: str) -> bool: """Check if any application service knows this user_id exists. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index c72ed7c2907a..aa9a76f8a968 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1348,8 +1348,8 @@ async def _generate_sync_entry_for_to_device( if sync_result_builder.since_token is not None: since_stream_id = int(sync_result_builder.since_token.to_device_key) - if since_stream_id != int(now_token.to_device_key): - messages, stream_id = await self.store.get_new_messages_for_device( + if device_id is not None and since_stream_id != int(now_token.to_device_key): + messages, stream_id = await self.store.get_messages_for_device( user_id, device_id, since_stream_id, now_token.to_device_key ) diff --git a/synapse/notifier.py b/synapse/notifier.py index 632b2245ef55..5988c67d9076 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -461,7 +461,9 @@ def on_new_event( users, ) except Exception: - logger.exception("Error notifying application services of event") + logger.exception( + "Error notifying application services of ephemeral events" + ) def on_new_replication_data(self) -> None: """Used to inform replication listeners that something has happened diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 2bb52884315e..304814af5dc5 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -198,6 +198,7 @@ async def create_appservice_txn( service: ApplicationService, events: List[EventBase], ephemeral: List[JsonDict], + to_device_messages: List[JsonDict], ) -> AppServiceTransaction: """Atomically creates a new transaction for this application service with the given list of events. Ephemeral events are NOT persisted to the @@ -207,6 +208,7 @@ async def create_appservice_txn( service: The service who the transaction is for. events: A list of persistent events to put in the transaction. ephemeral: A list of ephemeral events to put in the transaction. + to_device_messages: A list of to-device messages to put in the transaction. Returns: A new transaction. @@ -237,7 +239,11 @@ def _create_appservice_txn(txn): (service.id, new_txn_id, event_ids), ) return AppServiceTransaction( - service=service, id=new_txn_id, events=events, ephemeral=ephemeral + service=service, + id=new_txn_id, + events=events, + ephemeral=ephemeral, + to_device_messages=to_device_messages, ) return await self.db_pool.runInteraction( @@ -330,7 +336,11 @@ def _get_oldest_unsent_txn(txn): events = await self.get_events_as_list(event_ids) return AppServiceTransaction( - service=service, id=entry["txn_id"], events=events, ephemeral=[] + service=service, + id=entry["txn_id"], + events=events, + ephemeral=[], + to_device_messages=[], ) def _get_last_txn(self, txn, service_id: Optional[str]) -> int: @@ -391,7 +401,7 @@ def get_new_events_for_appservice_txn(txn): async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: - if type not in ("read_receipt", "presence"): + if type not in ("read_receipt", "presence", "to_device"): raise ValueError( "Expected type to be a valid application stream id type, got %s" % (type,) @@ -415,16 +425,16 @@ def get_type_stream_id_for_appservice_txn(txn): "get_type_stream_id_for_appservice", get_type_stream_id_for_appservice_txn ) - async def set_type_stream_id_for_appservice( + async def set_appservice_stream_type_pos( self, service: ApplicationService, stream_type: str, pos: Optional[int] ) -> None: - if stream_type not in ("read_receipt", "presence"): + if stream_type not in ("read_receipt", "presence", "to_device"): raise ValueError( "Expected type to be a valid application stream id type, got %s" % (stream_type,) ) - def set_type_stream_id_for_appservice_txn(txn): + def set_appservice_stream_type_pos_txn(txn): stream_id_type = "%s_stream_id" % stream_type txn.execute( "UPDATE application_services_state SET %s = ? WHERE as_id=?" @@ -433,7 +443,7 @@ def set_type_stream_id_for_appservice_txn(txn): ) await self.db_pool.runInteraction( - "set_type_stream_id_for_appservice", set_type_stream_id_for_appservice_txn + "set_appservice_stream_type_pos", set_appservice_stream_type_pos_txn ) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 4eca97189bef..8801b7b2dd83 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple, cast from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace @@ -24,6 +24,7 @@ DatabasePool, LoggingDatabaseConnection, LoggingTransaction, + make_in_list_sql_clause, ) from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( @@ -136,63 +137,260 @@ def process_replication_rows(self, stream_name, instance_name, token, rows): def get_to_device_stream_token(self): return self._device_inbox_id_gen.get_current_token() - async def get_new_messages_for_device( + async def get_messages_for_user_devices( + self, + user_ids: Collection[str], + from_stream_id: int, + to_stream_id: int, + ) -> Dict[Tuple[str, str], List[JsonDict]]: + """ + Retrieve to-device messages for a given set of users. + + Only to-device messages with stream ids between the given boundaries + (from < X <= to) are returned. + + Args: + user_ids: The users to retrieve to-device messages for. + from_stream_id: The lower boundary of stream id to filter with (exclusive). + to_stream_id: The upper boundary of stream id to filter with (inclusive). + + Returns: + A dictionary of (user id, device id) -> list of to-device messages. + """ + # We expect the stream ID returned by _get_device_messages to always + # be to_stream_id. So, no need to return it from this function. + ( + user_id_device_id_to_messages, + last_processed_stream_id, + ) = await self._get_device_messages( + user_ids=user_ids, + from_stream_id=from_stream_id, + to_stream_id=to_stream_id, + ) + + assert ( + last_processed_stream_id == to_stream_id + ), "Expected _get_device_messages to process all to-device messages up to `to_stream_id`" + + return user_id_device_id_to_messages + + async def get_messages_for_device( self, user_id: str, - device_id: Optional[str], - last_stream_id: int, - current_stream_id: int, + device_id: str, + from_stream_id: int, + to_stream_id: int, limit: int = 100, - ) -> Tuple[List[dict], int]: + ) -> Tuple[List[JsonDict], int]: """ + Retrieve to-device messages for a single user device. + + Only to-device messages with stream ids between the given boundaries + (from < X <= to) are returned. + Args: - user_id: The recipient user_id. - device_id: The recipient device_id. - last_stream_id: The last stream ID checked. - current_stream_id: The current position of the to device - message stream. - limit: The maximum number of messages to retrieve. + user_id: The ID of the user to retrieve messages for. + device_id: The ID of the device to retrieve to-device messages for. + from_stream_id: The lower boundary of stream id to filter with (exclusive). + to_stream_id: The upper boundary of stream id to filter with (inclusive). + limit: A limit on the number of to-device messages returned. Returns: A tuple containing: - * A list of messages for the device. - * The max stream token of these messages. There may be more to retrieve - if the given limit was reached. + * A list of to-device messages within the given stream id range intended for + the given user / device combo. + * The last-processed stream ID. Subsequent calls of this function with the + same device should pass this value as 'from_stream_id'. """ - has_changed = self._device_inbox_stream_cache.has_entity_changed( - user_id, last_stream_id + ( + user_id_device_id_to_messages, + last_processed_stream_id, + ) = await self._get_device_messages( + user_ids=[user_id], + device_id=device_id, + from_stream_id=from_stream_id, + to_stream_id=to_stream_id, + limit=limit, ) - if not has_changed: - return [], current_stream_id - def get_new_messages_for_device_txn(txn): - sql = ( - "SELECT stream_id, message_json FROM device_inbox" - " WHERE user_id = ? AND device_id = ?" - " AND ? < stream_id AND stream_id <= ?" - " ORDER BY stream_id ASC" - " LIMIT ?" + if not user_id_device_id_to_messages: + # There were no messages! + return [], to_stream_id + + # Extract the messages, no need to return the user and device ID again + to_device_messages = user_id_device_id_to_messages.get((user_id, device_id), []) + + return to_device_messages, last_processed_stream_id + + async def _get_device_messages( + self, + user_ids: Collection[str], + from_stream_id: int, + to_stream_id: int, + device_id: Optional[str] = None, + limit: Optional[int] = None, + ) -> Tuple[Dict[Tuple[str, str], List[JsonDict]], int]: + """ + Retrieve pending to-device messages for a collection of user devices. + + Only to-device messages with stream ids between the given boundaries + (from < X <= to) are returned. + + Note that a stream ID can be shared by multiple copies of the same message with + different recipient devices. Stream IDs are only unique in the context of a single + user ID / device ID pair. Thus, applying a limit (of messages to return) when working + with a sliding window of stream IDs is only possible when querying messages of a + single user device. + + Finally, note that device IDs are not unique across users. + + Args: + user_ids: The user IDs to filter device messages by. + from_stream_id: The lower boundary of stream id to filter with (exclusive). + to_stream_id: The upper boundary of stream id to filter with (inclusive). + device_id: A device ID to query to-device messages for. If not provided, to-device + messages from all device IDs for the given user IDs will be queried. May not be + provided if `user_ids` contains more than one entry. + limit: The maximum number of to-device messages to return. Can only be used when + passing a single user ID / device ID tuple. + + Returns: + A tuple containing: + * A dict of (user_id, device_id) -> list of to-device messages + * The last-processed stream ID. If this is less than `to_stream_id`, then + there may be more messages to retrieve. If `limit` is not set, then this + is always equal to 'to_stream_id'. + """ + if not user_ids: + logger.warning("No users provided upon querying for device IDs") + return {}, to_stream_id + + # Prevent a query for one user's device also retrieving another user's device with + # the same device ID (device IDs are not unique across users). + if len(user_ids) > 1 and device_id is not None: + raise AssertionError( + "Programming error: 'device_id' cannot be supplied to " + "_get_device_messages when >1 user_id has been provided" ) - txn.execute( - sql, (user_id, device_id, last_stream_id, current_stream_id, limit) + + # A limit can only be applied when querying for a single user ID / device ID tuple. + # See the docstring of this function for more details. + if limit is not None and device_id is None: + raise AssertionError( + "Programming error: _get_device_messages was passed 'limit' " + "without a specific user_id/device_id" ) - messages = [] - stream_pos = current_stream_id + user_ids_to_query: Set[str] = set() + device_ids_to_query: Set[str] = set() + + # Note that a device ID could be an empty str + if device_id is not None: + # If a device ID was passed, use it to filter results. + # Otherwise, device IDs will be derived from the given collection of user IDs. + device_ids_to_query.add(device_id) + + # Determine which users have devices with pending messages + for user_id in user_ids: + if self._device_inbox_stream_cache.has_entity_changed( + user_id, from_stream_id + ): + # This user has new messages sent to them. Query messages for them + user_ids_to_query.add(user_id) + + def get_device_messages_txn(txn: LoggingTransaction): + # Build a query to select messages from any of the given devices that + # are between the given stream id bounds. + + # If a list of device IDs was not provided, retrieve all devices IDs + # for the given users. We explicitly do not query hidden devices, as + # hidden devices should not receive to-device messages. + # Note that this is more efficient than just dropping `device_id` from the query, + # since device_inbox has an index on `(user_id, device_id, stream_id)` + if not device_ids_to_query: + user_device_dicts = self.db_pool.simple_select_many_txn( + txn, + table="devices", + column="user_id", + iterable=user_ids_to_query, + keyvalues={"user_id": user_id, "hidden": False}, + retcols=("device_id",), + ) - for row in txn: - stream_pos = row[0] - messages.append(db_to_json(row[1])) + device_ids_to_query.update( + {row["device_id"] for row in user_device_dicts} + ) - # If the limit was not reached we know that there's no more data for this - # user/device pair up to current_stream_id. - if len(messages) < limit: - stream_pos = current_stream_id + if not device_ids_to_query: + # We've ended up with no devices to query. + return {}, to_stream_id - return messages, stream_pos + # We include both user IDs and device IDs in this query, as we have an index + # (device_inbox_user_stream_id) for them. + user_id_many_clause_sql, user_id_many_clause_args = make_in_list_sql_clause( + self.database_engine, "user_id", user_ids_to_query + ) + ( + device_id_many_clause_sql, + device_id_many_clause_args, + ) = make_in_list_sql_clause( + self.database_engine, "device_id", device_ids_to_query + ) + + sql = f""" + SELECT stream_id, user_id, device_id, message_json FROM device_inbox + WHERE {user_id_many_clause_sql} + AND {device_id_many_clause_sql} + AND ? < stream_id AND stream_id <= ? + ORDER BY stream_id ASC + """ + sql_args = ( + *user_id_many_clause_args, + *device_id_many_clause_args, + from_stream_id, + to_stream_id, + ) + + # If a limit was provided, limit the data retrieved from the database + if limit is not None: + sql += "LIMIT ?" + sql_args += (limit,) + + txn.execute(sql, sql_args) + + # Create and fill a dictionary of (user ID, device ID) -> list of messages + # intended for each device. + last_processed_stream_pos = to_stream_id + recipient_device_to_messages: Dict[Tuple[str, str], List[JsonDict]] = {} + for row in txn: + last_processed_stream_pos = row[0] + recipient_user_id = row[1] + recipient_device_id = row[2] + message_dict = db_to_json(row[3]) + + # Store the device details + recipient_device_to_messages.setdefault( + (recipient_user_id, recipient_device_id), [] + ).append(message_dict) + + if limit is not None and txn.rowcount == limit: + # We ended up bumping up against the message limit. There may be more messages + # to retrieve. Return what we have, as well as the last stream position that + # was processed. + # + # The caller is expected to set this as the lower (exclusive) bound + # for the next query of this device. + return recipient_device_to_messages, last_processed_stream_pos + + # The limit was not reached, thus we know that recipient_device_to_messages + # contains all to-device messages for the given device and stream id range. + # + # We return to_stream_id, which the caller should then provide as the lower + # (exclusive) bound on the next query of this device. + return recipient_device_to_messages, to_stream_id return await self.db_pool.runInteraction( - "get_new_messages_for_device", get_new_messages_for_device_txn + "get_device_messages", get_device_messages_txn ) @trace diff --git a/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql b/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql new file mode 100644 index 000000000000..bbf0af53110f --- /dev/null +++ b/synapse/storage/schema/main/delta/68/02_msc2409_add_device_id_appservice_stream_type.sql @@ -0,0 +1,21 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a column to track what to_device stream id that this application +-- service has been caught up to. + +-- NULL indicates that this appservice has never received any to_device messages. This +-- can be used, for example, to avoid sending a huge dump of messages at startup. +ALTER TABLE application_services_state ADD COLUMN to_device_stream_id BIGINT; \ No newline at end of file diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 55f0899bae7d..8fb6687f89eb 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -11,23 +11,29 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import TYPE_CHECKING from unittest.mock import Mock from twisted.internet import defer from synapse.appservice import ApplicationServiceState from synapse.appservice.scheduler import ( + ApplicationServiceScheduler, _Recoverer, - _ServiceQueuer, _TransactionController, ) from synapse.logging.context import make_deferred_yieldable +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.test_utils import simple_async_mock from ..utils import MockClock +if TYPE_CHECKING: + from twisted.internet.testing import MemoryReactor + class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): def setUp(self): @@ -58,7 +64,10 @@ def test_single_service_up_txn_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[] # txn made and saved + service=service, + events=events, + ephemeral=[], + to_device_messages=[], # txn made and saved ) self.assertEquals(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed @@ -79,7 +88,10 @@ def test_single_service_down(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[] # txn made and saved + service=service, + events=events, + ephemeral=[], + to_device_messages=[], # txn made and saved ) self.assertEquals(0, txn.send.call_count) # txn not sent though self.assertEquals(0, txn.complete.call_count) # or completed @@ -102,7 +114,7 @@ def test_single_service_up_txn_not_sent(self): self.successResultOf(defer.ensureDeferred(self.txnctrl.send(service, events))) self.store.create_appservice_txn.assert_called_once_with( - service=service, events=events, ephemeral=[] + service=service, events=events, ephemeral=[], to_device_messages=[] ) self.assertEquals(1, self.recoverer_fn.call_count) # recoverer made self.assertEquals(1, self.recoverer.recover.call_count) # and invoked @@ -189,38 +201,41 @@ def take_txn(*args, **kwargs): self.callback.assert_called_once_with(self.recoverer) -class ApplicationServiceSchedulerQueuerTestCase(unittest.TestCase): - def setUp(self): +class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer): + self.scheduler = ApplicationServiceScheduler(hs) self.txn_ctrl = Mock() self.txn_ctrl.send = simple_async_mock() - self.queuer = _ServiceQueuer(self.txn_ctrl, MockClock()) + + # Replace instantiated _TransactionController instances with our Mock + self.scheduler.txn_ctrl = self.txn_ctrl + self.scheduler.queuer.txn_ctrl = self.txn_ctrl def test_send_single_event_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4) event = Mock() - self.queuer.enqueue_event(service, event) - self.txn_ctrl.send.assert_called_once_with(service, [event], []) + self.scheduler.enqueue_for_appservice(service, events=[event]) + self.txn_ctrl.send.assert_called_once_with(service, [event], [], []) def test_send_single_event_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock( - side_effect=lambda x, y, z: make_deferred_yieldable(d) - ) + self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) service = Mock(id=4) event = Mock(event_id="first") event2 = Mock(event_id="second") event3 = Mock(event_id="third") # Send an event and don't resolve it just yet. - self.queuer.enqueue_event(service, event) + self.scheduler.enqueue_for_appservice(service, events=[event]) # Send more events: expect send() to NOT be called multiple times. - self.queuer.enqueue_event(service, event2) - self.queuer.enqueue_event(service, event3) - self.txn_ctrl.send.assert_called_with(service, [event], []) + # (call enqueue_for_appservice multiple times deliberately) + self.scheduler.enqueue_for_appservice(service, events=[event2]) + self.scheduler.enqueue_for_appservice(service, events=[event3]) + self.txn_ctrl.send.assert_called_with(service, [event], [], []) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve the send event: expect the queued events to be sent d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [event2, event3], []) + self.txn_ctrl.send.assert_called_with(service, [event2, event3], [], []) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_multiple_service_queues(self): @@ -238,23 +253,23 @@ def test_multiple_service_queues(self): send_return_list = [srv_1_defer, srv_2_defer] - def do_send(x, y, z): + def do_send(*args, **kwargs): return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) # send events for different ASes and make sure they are sent - self.queuer.enqueue_event(srv1, srv_1_event) - self.queuer.enqueue_event(srv1, srv_1_event2) - self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], []) - self.queuer.enqueue_event(srv2, srv_2_event) - self.queuer.enqueue_event(srv2, srv_2_event2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], []) + self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event]) + self.scheduler.enqueue_for_appservice(srv1, events=[srv_1_event2]) + self.txn_ctrl.send.assert_called_with(srv1, [srv_1_event], [], []) + self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event]) + self.scheduler.enqueue_for_appservice(srv2, events=[srv_2_event2]) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event], [], []) # make sure callbacks for a service only send queued events for THAT # service srv_2_defer.callback(srv2) - self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], []) + self.txn_ctrl.send.assert_called_with(srv2, [srv_2_event2], [], []) self.assertEquals(3, self.txn_ctrl.send.call_count) def test_send_large_txns(self): @@ -262,7 +277,7 @@ def test_send_large_txns(self): srv_2_defer = defer.Deferred() send_return_list = [srv_1_defer, srv_2_defer] - def do_send(x, y, z): + def do_send(*args, **kwargs): return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) @@ -270,67 +285,65 @@ def do_send(x, y, z): service = Mock(id=4, name="service") event_list = [Mock(name="event%i" % (i + 1)) for i in range(200)] for event in event_list: - self.queuer.enqueue_event(service, event) + self.scheduler.enqueue_for_appservice(service, [event], []) # Expect the first event to be sent immediately. - self.txn_ctrl.send.assert_called_with(service, [event_list[0]], []) + self.txn_ctrl.send.assert_called_with(service, [event_list[0]], [], []) srv_1_defer.callback(service) # Then send the next 100 events - self.txn_ctrl.send.assert_called_with(service, event_list[1:101], []) + self.txn_ctrl.send.assert_called_with(service, event_list[1:101], [], []) srv_2_defer.callback(service) # Then the final 99 events - self.txn_ctrl.send.assert_called_with(service, event_list[101:], []) + self.txn_ctrl.send.assert_called_with(service, event_list[101:], [], []) self.assertEquals(3, self.txn_ctrl.send.call_count) def test_send_single_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event")] - self.queuer.enqueue_ephemeral(service, event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list, []) def test_send_multiple_ephemeral_no_queue(self): # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")] - self.queuer.enqueue_ephemeral(service, event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], event_list) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], event_list, []) def test_send_single_ephemeral_with_queue(self): d = defer.Deferred() - self.txn_ctrl.send = Mock( - side_effect=lambda x, y, z: make_deferred_yieldable(d) - ) + self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) service = Mock(id=4) event_list_1 = [Mock(event_id="event1"), Mock(event_id="event2")] event_list_2 = [Mock(event_id="event3"), Mock(event_id="event4")] event_list_3 = [Mock(event_id="event5"), Mock(event_id="event6")] # Send an event and don't resolve it just yet. - self.queuer.enqueue_ephemeral(service, event_list_1) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_1) # Send more events: expect send() to NOT be called multiple times. - self.queuer.enqueue_ephemeral(service, event_list_2) - self.queuer.enqueue_ephemeral(service, event_list_3) - self.txn_ctrl.send.assert_called_with(service, [], event_list_1) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_2) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list_3) + self.txn_ctrl.send.assert_called_with(service, [], event_list_1, []) self.assertEquals(1, self.txn_ctrl.send.call_count) # Resolve txn_ctrl.send d.callback(service) # Expect the queued events to be sent - self.txn_ctrl.send.assert_called_with(service, [], event_list_2 + event_list_3) + self.txn_ctrl.send.assert_called_with( + service, [], event_list_2 + event_list_3, [] + ) self.assertEquals(2, self.txn_ctrl.send.call_count) def test_send_large_txns_ephemeral(self): d = defer.Deferred() - self.txn_ctrl.send = Mock( - side_effect=lambda x, y, z: make_deferred_yieldable(d) - ) + self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) # Expect the event to be sent immediately. service = Mock(id=4, name="service") first_chunk = [Mock(name="event%i" % (i + 1)) for i in range(100)] second_chunk = [Mock(name="event%i" % (i + 101)) for i in range(50)] event_list = first_chunk + second_chunk - self.queuer.enqueue_ephemeral(service, event_list) - self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk) + self.scheduler.enqueue_for_appservice(service, ephemeral=event_list) + self.txn_ctrl.send.assert_called_once_with(service, [], first_chunk, []) d.callback(service) - self.txn_ctrl.send.assert_called_with(service, [], second_chunk) + self.txn_ctrl.send.assert_called_with(service, [], second_chunk, []) self.assertEquals(2, self.txn_ctrl.send.call_count) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index d6f14e2dba79..fe57ff267119 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -1,4 +1,4 @@ -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,18 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Iterable, List, Optional from unittest.mock import Mock from twisted.internet import defer +import synapse.rest.admin +import synapse.storage +from synapse.appservice import ApplicationService from synapse.handlers.appservice import ApplicationServicesHandler +from synapse.rest.client import login, receipts, room, sendtodevice from synapse.types import RoomStreamToken +from synapse.util.stringutils import random_string -from tests.test_utils import make_awaitable +from tests import unittest +from tests.test_utils import make_awaitable, simple_async_mock from tests.utils import MockClock -from .. import unittest - class AppServiceHandlerTestCase(unittest.TestCase): """Tests the ApplicationServicesHandler.""" @@ -36,6 +41,9 @@ def setUp(self): hs.get_datastore.return_value = self.mock_store self.mock_store.get_received_ts.return_value = make_awaitable(0) self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None) + self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable( + None + ) hs.get_application_service_api.return_value = self.mock_as_api hs.get_application_service_scheduler.return_value = self.mock_scheduler hs.get_clock.return_value = MockClock() @@ -63,8 +71,8 @@ def test_notify_interested_services(self): ] self.handler.notify_interested_services(RoomStreamToken(None, 1)) - self.mock_scheduler.submit_event_for_as.assert_called_once_with( - interested_service, event + self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( + interested_service, events=[event] ) def test_query_user_exists_unknown_user(self): @@ -261,7 +269,6 @@ def test_notify_interested_services_ephemeral(self): """ interested_service = self._mkservice(is_interested=True) services = [interested_service] - self.mock_store.get_app_services.return_value = services self.mock_store.get_type_stream_id_for_appservice.return_value = make_awaitable( 579 @@ -275,10 +282,10 @@ def test_notify_interested_services_ephemeral(self): self.handler.notify_interested_services_ephemeral( "receipt_key", 580, ["@fakerecipient:example.com"] ) - self.mock_scheduler.submit_ephemeral_events_for_as.assert_called_once_with( - interested_service, [event] + self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( + interested_service, ephemeral=[event] ) - self.mock_store.set_type_stream_id_for_appservice.assert_called_once_with( + self.mock_store.set_appservice_stream_type_pos.assert_called_once_with( interested_service, "read_receipt", 580, @@ -305,7 +312,10 @@ def test_notify_interested_services_ephemeral_out_of_order(self): self.handler.notify_interested_services_ephemeral( "receipt_key", 580, ["@fakerecipient:example.com"] ) - self.mock_scheduler.submit_ephemeral_events_for_as.assert_not_called() + # This method will be called, but with an empty list of events + self.mock_scheduler.enqueue_for_appservice.assert_called_once_with( + interested_service, ephemeral=[] + ) def _mkservice(self, is_interested, protocols=None): service = Mock() @@ -321,3 +331,252 @@ def _mkservice_alias(self, is_interested_in_alias): service.token = "mock_service_token" service.url = "mock_service_url" return service + + +class ApplicationServicesHandlerSendEventsTestCase(unittest.HomeserverTestCase): + """ + Tests that the ApplicationServicesHandler sends events to application + services correctly. + """ + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + room.register_servlets, + sendtodevice.register_servlets, + receipts.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + # Mock the ApplicationServiceScheduler's _TransactionController's send method so that + # we can track any outgoing ephemeral events + self.send_mock = simple_async_mock() + hs.get_application_service_handler().scheduler.txn_ctrl.send = self.send_mock + + # Mock out application services, and allow defining our own in tests + self._services: List[ApplicationService] = [] + self.hs.get_datastore().get_app_services = Mock(return_value=self._services) + + # A user on the homeserver. + self.local_user_device_id = "local_device" + self.local_user = self.register_user("local_user", "password") + self.local_user_token = self.login( + "local_user", "password", self.local_user_device_id + ) + + # A user on the homeserver which lies within an appservice's exclusive user namespace. + self.exclusive_as_user_device_id = "exclusive_as_device" + self.exclusive_as_user = self.register_user("exclusive_as_user", "password") + self.exclusive_as_user_token = self.login( + "exclusive_as_user", "password", self.exclusive_as_user_device_id + ) + + @unittest.override_config( + {"experimental_features": {"msc2409_to_device_messages_enabled": True}} + ) + def test_application_services_receive_local_to_device(self): + """ + Test that when a user sends a to-device message to another user + that is an application service's user namespace, the + application service will receive it. + """ + interested_appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + } + ], + }, + ) + + # Have local_user send a to-device message to exclusive_as_user + message_content = {"some_key": "some really interesting value"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={ + "messages": { + self.exclusive_as_user: { + self.exclusive_as_user_device_id: message_content + } + } + }, + access_token=self.local_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Have exclusive_as_user send a to-device message to local_user + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/4", + content={ + "messages": { + self.local_user: {self.local_user_device_id: message_content} + } + }, + access_token=self.exclusive_as_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + # Check if our application service - that is interested in exclusive_as_user - received + # the to-device message as part of an AS transaction. + # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS. + # + # The uninterested application service should not have been notified at all. + self.send_mock.assert_called_once() + service, _events, _ephemeral, to_device_messages = self.send_mock.call_args[0] + + # Assert that this was the same to-device message that local_user sent + self.assertEqual(service, interested_appservice) + self.assertEqual(to_device_messages[0]["type"], "m.room_key_request") + self.assertEqual(to_device_messages[0]["sender"], self.local_user) + + # Additional fields 'to_user_id' and 'to_device_id' specifically for + # to-device messages via the AS API + self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user) + self.assertEqual( + to_device_messages[0]["to_device_id"], self.exclusive_as_user_device_id + ) + self.assertEqual(to_device_messages[0]["content"], message_content) + + @unittest.override_config( + {"experimental_features": {"msc2409_to_device_messages_enabled": True}} + ) + def test_application_services_receive_bursts_of_to_device(self): + """ + Test that when a user sends >100 to-device messages at once, any + interested AS's will receive them in separate transactions. + + Also tests that uninterested application services do not receive messages. + """ + # Register two application services with exclusive interest in a user + interested_appservices = [] + for _ in range(2): + appservice = self._register_application_service( + namespaces={ + ApplicationService.NS_USERS: [ + { + "regex": "@exclusive_as_user:.+", + "exclusive": True, + } + ], + }, + ) + interested_appservices.append(appservice) + + # ...and an application service which does not have any user interest. + self._register_application_service() + + to_device_message_content = { + "some key": "some interesting value", + } + + # We need to send a large burst of to-device messages. We also would like to + # include them all in the same application service transaction so that we can + # test large transactions. + # + # To do this, we can send a single to-device message to many user devices at + # once. + # + # We insert number_of_messages - 1 messages into the database directly. We'll then + # send a final to-device message to the real device, which will also kick off + # an AS transaction (as just inserting messages into the DB won't). + number_of_messages = 150 + fake_device_ids = [f"device_{num}" for num in range(number_of_messages - 1)] + messages = { + self.exclusive_as_user: { + device_id: to_device_message_content for device_id in fake_device_ids + } + } + + # Create a fake device per message. We can't send to-device messages to + # a device that doesn't exist. + self.get_success( + self.hs.get_datastore().db_pool.simple_insert_many( + desc="test_application_services_receive_burst_of_to_device", + table="devices", + keys=("user_id", "device_id"), + values=[ + ( + self.exclusive_as_user, + device_id, + ) + for device_id in fake_device_ids + ], + ) + ) + + # Seed the device_inbox table with our fake messages + self.get_success( + self.hs.get_datastore().add_messages_to_device_inbox(messages, {}) + ) + + # Now have local_user send a final to-device message to exclusive_as_user. All unsent + # to-device messages should be sent to any application services + # interested in exclusive_as_user. + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/4", + content={ + "messages": { + self.exclusive_as_user: { + self.exclusive_as_user_device_id: to_device_message_content + } + } + }, + access_token=self.local_user_token, + ) + self.assertEqual(chan.code, 200, chan.result) + + self.send_mock.assert_called() + + # Count the total number of to-device messages that were sent out per-service. + # Ensure that we only sent to-device messages to interested services, and that + # each interested service received the full count of to-device messages. + service_id_to_message_count: Dict[str, int] = {} + + for call in self.send_mock.call_args_list: + service, _events, _ephemeral, to_device_messages = call[0] + + # Check that this was made to an interested service + self.assertIn(service, interested_appservices) + + # Add to the count of messages for this application service + service_id_to_message_count.setdefault(service.id, 0) + service_id_to_message_count[service.id] += len(to_device_messages) + + # Assert that each interested service received the full count of messages + for count in service_id_to_message_count.values(): + self.assertEqual(count, number_of_messages) + + def _register_application_service( + self, + namespaces: Optional[Dict[str, Iterable[Dict]]] = None, + ) -> ApplicationService: + """ + Register a new application service, with the given namespaces of interest. + + Args: + namespaces: A dictionary containing any user, room or alias namespaces that + the application service is interested in. + + Returns: + The registered application service. + """ + # Create an application service + appservice = ApplicationService( + token=random_string(10), + hostname="example.com", + id=random_string(10), + sender="@as:example.com", + rate_limited=False, + namespaces=namespaces, + supports_ephemeral=True, + ) + + # Register the application service + self._services.append(appservice) + + return appservice diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 329490caad53..ddcb7f554918 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -266,7 +266,9 @@ def test_create_appservice_txn_first( service = Mock(id=self.as_list[0]["id"]) events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) txn = self.get_success( - defer.ensureDeferred(self.store.create_appservice_txn(service, events, [])) + defer.ensureDeferred( + self.store.create_appservice_txn(service, events, [], []) + ) ) self.assertEquals(txn.id, 1) self.assertEquals(txn.events, events) @@ -280,7 +282,9 @@ def test_create_appservice_txn_older_last_txn( self.get_success(self._set_last_txn(service.id, 9643)) # AS is falling behind self.get_success(self._insert_txn(service.id, 9644, events)) self.get_success(self._insert_txn(service.id, 9645, events)) - txn = self.get_success(self.store.create_appservice_txn(service, events, [])) + txn = self.get_success( + self.store.create_appservice_txn(service, events, [], []) + ) self.assertEquals(txn.id, 9646) self.assertEquals(txn.events, events) self.assertEquals(txn.service, service) @@ -291,7 +295,9 @@ def test_create_appservice_txn_up_to_date_last_txn( service = Mock(id=self.as_list[0]["id"]) events = cast(List[EventBase], [Mock(event_id="e1"), Mock(event_id="e2")]) self.get_success(self._set_last_txn(service.id, 9643)) - txn = self.get_success(self.store.create_appservice_txn(service, events, [])) + txn = self.get_success( + self.store.create_appservice_txn(service, events, [], []) + ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) self.assertEquals(txn.service, service) @@ -313,7 +319,9 @@ def test_create_appservice_txn_up_fuzzing( self.get_success(self._insert_txn(self.as_list[2]["id"], 10, events)) self.get_success(self._insert_txn(self.as_list[3]["id"], 9643, events)) - txn = self.get_success(self.store.create_appservice_txn(service, events, [])) + txn = self.get_success( + self.store.create_appservice_txn(service, events, [], []) + ) self.assertEquals(txn.id, 9644) self.assertEquals(txn.events, events) self.assertEquals(txn.service, service) @@ -481,10 +489,10 @@ def test_get_type_stream_id_for_appservice_invalid_type(self) -> None: ValueError, ) - def test_set_type_stream_id_for_appservice(self) -> None: + def test_set_appservice_stream_type_pos(self) -> None: read_receipt_value = 1024 self.get_success( - self.store.set_type_stream_id_for_appservice( + self.store.set_appservice_stream_type_pos( self.service, "read_receipt", read_receipt_value ) ) @@ -494,7 +502,7 @@ def test_set_type_stream_id_for_appservice(self) -> None: self.assertEqual(result, read_receipt_value) self.get_success( - self.store.set_type_stream_id_for_appservice( + self.store.set_appservice_stream_type_pos( self.service, "presence", read_receipt_value ) ) @@ -503,9 +511,9 @@ def test_set_type_stream_id_for_appservice(self) -> None: ) self.assertEqual(result, read_receipt_value) - def test_set_type_stream_id_for_appservice_invalid_type(self) -> None: + def test_set_appservice_stream_type_pos_invalid_type(self) -> None: self.get_failure( - self.store.set_type_stream_id_for_appservice(self.service, "foobar", 1024), + self.store.set_appservice_stream_type_pos(self.service, "foobar", 1024), ValueError, ) From 5c16c3302125f2a5d8a006ad42792f22b320c737 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 1 Feb 2022 16:23:55 +0100 Subject: [PATCH 17/19] Allow modules to retrieve server and worker names (#11868) Fixes #10701 --- changelog.d/11868.feature | 1 + synapse/module_api/__init__.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 changelog.d/11868.feature diff --git a/changelog.d/11868.feature b/changelog.d/11868.feature new file mode 100644 index 000000000000..3723dac4ea10 --- /dev/null +++ b/changelog.d/11868.feature @@ -0,0 +1 @@ +Allow modules to retrieve the current instance's server name and worker name. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 788b2e47d523..29fbc73c971d 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -401,6 +401,32 @@ def email_app_name(self) -> str: """ return self._hs.config.email.email_app_name + @property + def server_name(self) -> str: + """The server name for the local homeserver. + + Added in Synapse v1.53.0. + """ + return self._server_name + + @property + def worker_name(self) -> Optional[str]: + """The name of the worker this specific instance is running as per the + "worker_name" configuration setting, or None if it's the main process. + + Added in Synapse v1.53.0. + """ + return self._hs.config.worker.worker_name + + @property + def worker_app(self) -> Optional[str]: + """The name of the worker app this specific instance is running as per the + "worker_app" configuration setting, or None if it's the main process. + + Added in Synapse v1.53.0. + """ + return self._hs.config.worker.worker_app + async def get_userinfo_by_id(self, user_id: str) -> Optional[UserInfo]: """Get user info by user_id From 3f72c2a322af0d9d45452aa9c78723793bea2432 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 1 Feb 2022 17:45:13 +0000 Subject: [PATCH 18/19] Convert `ApplicationServiceTestCase` to use `simple_async_mock` (#11880) --- changelog.d/11880.misc | 1 + tests/appservice/test_appservice.py | 19 +++++++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 changelog.d/11880.misc diff --git a/changelog.d/11880.misc b/changelog.d/11880.misc new file mode 100644 index 000000000000..8125947b2a17 --- /dev/null +++ b/changelog.d/11880.misc @@ -0,0 +1 @@ +Convert `ApplicationServiceTestCase` to use `simple_async_mock`. \ No newline at end of file diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index ba2a2bfd64ad..07d8105f41d6 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -19,6 +19,7 @@ from synapse.appservice import ApplicationService, Namespace from tests import unittest +from tests.test_utils import simple_async_mock def _regex(regex: str, exclusive: bool = True) -> Namespace: @@ -91,10 +92,10 @@ def test_regex_alias_match(self): self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) - self.store.get_aliases_for_room.return_value = defer.succeed( + self.store.get_aliases_for_room = simple_async_mock( ["#irc_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_users_in_room.return_value = defer.succeed([]) + self.store.get_users_in_room = simple_async_mock([]) self.assertTrue( ( yield defer.ensureDeferred( @@ -144,10 +145,10 @@ def test_regex_alias_no_match(self): self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) - self.store.get_aliases_for_room.return_value = defer.succeed( + self.store.get_aliases_for_room = simple_async_mock( ["#xmpp_foobar:matrix.org", "#athing:matrix.org"] ) - self.store.get_users_in_room.return_value = defer.succeed([]) + self.store.get_users_in_room = simple_async_mock([]) self.assertFalse( ( yield defer.ensureDeferred( @@ -163,10 +164,8 @@ def test_regex_multiple_matches(self): ) self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" - self.store.get_aliases_for_room.return_value = defer.succeed( - ["#irc_barfoo:matrix.org"] - ) - self.store.get_users_in_room.return_value = defer.succeed([]) + self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"]) + self.store.get_users_in_room = simple_async_mock([]) self.assertTrue( ( yield defer.ensureDeferred( @@ -191,10 +190,10 @@ def test_interested_in_self(self): def test_member_list_match(self): self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. - self.store.get_users_in_room.return_value = defer.succeed( + self.store.get_users_in_room = simple_async_mock( ["@alice:here", "@irc_fo:here", "@bob:here"] ) - self.store.get_aliases_for_room.return_value = defer.succeed([]) + self.store.get_aliases_for_room = simple_async_mock([]) self.event.sender = "@xmpp_foobar:matrix.org" self.assertTrue( From 513913cc6ba77833082178b7d2b2f0565daf3c99 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 2 Feb 2022 09:59:55 +0000 Subject: [PATCH 19/19] Expose the registered device ID from the `register_appservice_user` test helper. (#11615) --- changelog.d/11615.misc | 1 + tests/handlers/test_user_directory.py | 6 ++++-- tests/rest/client/test_room_batch.py | 2 +- tests/storage/test_user_directory.py | 4 +++- tests/unittest.py | 9 +++++---- 5 files changed, 14 insertions(+), 8 deletions(-) create mode 100644 changelog.d/11615.misc diff --git a/changelog.d/11615.misc b/changelog.d/11615.misc new file mode 100644 index 000000000000..0aa953650493 --- /dev/null +++ b/changelog.d/11615.misc @@ -0,0 +1 @@ +Expose the registered device ID from the `register_appservice_user` test helper. \ No newline at end of file diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 70c621b825f4..482c90ef68c5 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -169,7 +169,9 @@ def test_excludes_appservices_user(self) -> None: # Register an AS user. user = self.register_user("user", "pass") token = self.login(user, "pass") - as_user = self.register_appservice_user("as_user_potato", self.appservice.token) + as_user, _ = self.register_appservice_user( + "as_user_potato", self.appservice.token + ) # Join the AS user to rooms owned by the normal user. public, private = self._create_rooms_and_inject_memberships( @@ -388,7 +390,7 @@ def test_handle_local_profile_change_with_deactivated_user(self) -> None: def test_handle_local_profile_change_with_appservice_user(self) -> None: # create user - as_user_id = self.register_appservice_user( + as_user_id, _ = self.register_appservice_user( "as_user_alice", self.appservice.token ) diff --git a/tests/rest/client/test_room_batch.py b/tests/rest/client/test_room_batch.py index 721454c1875f..e9f870403536 100644 --- a/tests/rest/client/test_room_batch.py +++ b/tests/rest/client/test_room_batch.py @@ -89,7 +89,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.clock = clock self.storage = hs.get_storage() - self.virtual_user_id = self.register_appservice_user( + self.virtual_user_id, _ = self.register_appservice_user( "as_user_potato", self.appservice.token ) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 7f5b28aed8c4..48f1e9d8411b 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -341,7 +341,9 @@ def test_population_excludes_appservice_user(self) -> None: # Register an AS user. user = self.register_user("user", "pass") token = self.login(user, "pass") - as_user = self.register_appservice_user("as_user_potato", self.appservice.token) + as_user, _ = self.register_appservice_user( + "as_user_potato", self.appservice.token + ) # Join the AS user to rooms owned by the normal user. public, private = self._create_rooms_and_inject_memberships( diff --git a/tests/unittest.py b/tests/unittest.py index 14318483674e..6fc617601a40 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -620,18 +620,19 @@ def register_appservice_user( self, username: str, appservice_token: str, - ) -> str: + ) -> Tuple[str, str]: """Register an appservice user as an application service. Requires the client-facing registration API be registered. Args: username: the user to be registered by an application service. - Should be a full username, i.e. ""@localpart:hostname" as opposed to just "localpart" + Should NOT be a full username, i.e. just "localpart" as opposed to "@localpart:hostname" appservice_token: the acccess token for that application service. Raises: if the request to '/register' does not return 200 OK. - Returns: the MXID of the new user. + Returns: + The MXID of the new user, the device ID of the new user's first device. """ channel = self.make_request( "POST", @@ -643,7 +644,7 @@ def register_appservice_user( access_token=appservice_token, ) self.assertEqual(channel.code, 200, channel.json_body) - return channel.json_body["user_id"] + return channel.json_body["user_id"], channel.json_body["device_id"] def login( self,