Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Precompute joined hosts and store in Redis #9198

Merged
merged 20 commits into from
Jan 26, 2021
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/9198.misc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Precompute joined hosts and store in Redis.
12 changes: 11 additions & 1 deletion stubs/txredisapi.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,20 @@
"""Contains *incomplete* type hints for txredisapi.
"""

from typing import List, Optional, Type, Union
from typing import Any, Awaitable, List, Optional, Type, Union

class RedisProtocol:
def publish(self, channel: str, message: bytes): ...
def set(
self,
key: str,
value: Any,
expire: Optional[int] = None,
pexpire: Optional[int] = None,
only_if_not_exists: bool = False,
only_if_exists: bool = False,
) -> Awaitable[None]: ...
def get(self, key: str) -> Awaitable[Any]: ...

class SubscriberProtocol:
def __init__(self, *args, **kwargs): ...
Expand Down
2 changes: 2 additions & 0 deletions synapse/config/_base.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ from synapse.config import (
password_auth_providers,
push,
ratelimiting,
redis,
registration,
repository,
room_directory,
Expand Down Expand Up @@ -79,6 +80,7 @@ class RootConfig:
roomdirectory: room_directory.RoomDirectoryConfig
thirdpartyrules: third_party_event_rules.ThirdPartyRulesConfig
tracer: tracer.TracerConfig
redis: redis.RedisConfig

config_classes: List = ...
def __init__(self) -> None: ...
Expand Down
45 changes: 30 additions & 15 deletions synapse/federation/sender/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def __init__(self, hs: "synapse.server.HomeServer"):
self.server_name = hs.hostname

self.store = hs.get_datastore()
self.state_store = hs.get_datastores().state
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
self.state = hs.get_state_handler()

self.clock = hs.get_clock()
Expand Down Expand Up @@ -142,6 +143,8 @@ def __init__(self, hs: "synapse.server.HomeServer"):
self._wake_destinations_needing_catchup,
)

self._external_cache = hs.get_external_cache()

def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue:
"""Get or create a PerDestinationQueue for the given destination

Expand Down Expand Up @@ -197,22 +200,34 @@ async def handle_event(event: EventBase) -> None:
if not event.internal_metadata.should_proactively_send():
return

try:
# Get the state from before the event.
# We need to make sure that this is the state from before
# the event and not from after it.
# Otherwise if the last member on a server in a room is
# banned then it won't receive the event because it won't
# be in the room after the ban.
destinations = await self.state.get_hosts_in_room_at_events(
event.room_id, event_ids=event.prev_event_ids()
)
except Exception:
logger.exception(
"Failed to calculate hosts in room for event: %s",
event.event_id,
# We check the external cache for the destinations, which is
# stored per state group.
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
destinations = None
sg = await self._external_cache.get_cache(
"event_to_prev_state_group", event.event_id
)
if sg:
destinations = await self._external_cache.get_cache(
"get_joined_hosts", str(sg)
)
return

if destinations is None:
try:
# Get the state from before the event.
# We need to make sure that this is the state from before
# the event and not from after it.
# Otherwise if the last member on a server in a room is
# banned then it won't receive the event because it won't
# be in the room after the ban.
destinations = await self.state.get_hosts_in_room_at_events(
event.room_id, event_ids=event.prev_event_ids()
)
except Exception:
logger.exception(
"Failed to calculate hosts in room for event: %s",
event.event_id,
)
return

destinations = {
d
Expand Down
40 changes: 40 additions & 0 deletions synapse/handlers/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,9 @@ def __init__(self, hs: "HomeServer"):

self._ephemeral_events_enabled = hs.config.enable_ephemeral_messages

self._external_cache = hs.get_external_cache()
self._use_external_cache = hs.config.redis.redis_enabled
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if this should be redis enabled and more than one federation sender?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or at the very least if the federation sending is done by the main process it probably doesn't make sense to use this?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe, though it is worth noting that it's a) probably cheaper to calculate it on event creator, as you'll already have the state in the cache, and b) means that you can run multiple event creators to split up the work without having to run multiple federation senders.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, right that makes sense. I guess it is a reasonable assumption that anyone running workers would be running at least one of an event creator or federation sender?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, and its really not the end of the world if they aren't.


async def create_event(
self,
requester: Requester,
Expand Down Expand Up @@ -939,6 +942,43 @@ async def handle_new_client_event(

await self.action_generator.handle_push_actions_for_event(event, context)

if self._use_external_cache:
# We precalculate the joined hosts at the event, when using Redis,
# so that external federation senders don't have to recalculate it
# themselves.
#
# We actually store two mappings, event ID -> prev state group,
# state group -> joined hosts, which is much more space efficient
# than event ID -> joined hosts.
#
# Note: We have to cache event ID -> prev state group, as we don't
# store that in the DB.
#
# Note: We always set the state group -> joined hosts cache, even if
# we already set it, so that the expiry time is reset.

state_entry = await self.state.resolve_state_groups_for_events(
event.room_id, event_ids=event.prev_event_ids()
)

if state_entry.state_group:
joined_hosts = await self.store.get_joined_hosts(
event.room_id, state_entry
)

await self._external_cache.set_cache(
"event_to_prev_state_group",
event.event_id,
state_entry.state_group,
expire_seconds=60 * 60,
)
await self._external_cache.set_cache(
"get_joined_hosts",
str(state_entry.state_group),
list(joined_hosts),
expire_seconds=60 * 60,
)

try:
# If we're a worker we need to hit out to the master.
writer_instance = self._events_shard_config.get_instance(event.room_id)
Expand Down
112 changes: 112 additions & 0 deletions synapse/replication/tcp/external_cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# -*- coding: utf-8 -*-
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import logging
from typing import TYPE_CHECKING, Any, Optional

from prometheus_client import Counter

from synapse.util import json_decoder, json_encoder

if TYPE_CHECKING:
from synapse.server import HomeServer

set_counter = Counter(
"synapse_external_cache_set",
"Number of times we set a cache",
labelnames=["cache_name"],
)

get_counter = Counter(
"synapse_external_cache_get",
"Number of times we get a cache",
labelnames=["cache_name", "hit"],
)


logger = logging.getLogger(__name__)


class ExternalCache:
"""A cache backed by an external Redis. Does nothing if no Redis is
configured.
"""

def __init__(self, hs: "HomeServer"):
self._redis_connection = None

if hs.config.redis.redis_enabled:
from synapse.replication.tcp.redis import lazyConnection

logger.info(
"Connecting to redis (host=%r port=%r) for external cache",
hs.config.redis_host,
hs.config.redis_port,
)

# First create the connection for sending commands.
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
self._redis_connection = lazyConnection(
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
reactor=hs.get_reactor(),
host=hs.config.redis_host,
port=hs.config.redis_port,
password=hs.config.redis.redis_password,
reconnect=True,
)

def _get_redis_key(self, cache_name: str, key: str) -> str:
return "cache_v1:%s:%s" % (cache_name, key)

async def set_cache(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Our other caches seem to name these methods as simply set and get

I also wonder if instead of expire_seconds we should do expiry_ms as the ExpiringCache does.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Annoyingly the ExpiringCache uses expiry_ms (with a y). I don't know if matters much.

Also do you have thoughts on set/get vs. set_cache/get_cache?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I KNEW THERE WAS SOMETHING I FORGOT TO DO

self, cache_name: str, key: str, value: Any, expire_seconds: int
) -> None:
"""Add the key/value to the named cache, with the expiry time given.
"""

if self._redis_connection is None:
return

set_counter.labels(cache_name).inc()

# txredisapi requires the value to be string, bytes or numbers, so we
# encode stuff in JSON.
encoded_value = json_encoder.encode(value)

logger.debug("Caching %s %s: %r", cache_name, key, encoded_value)

return await self._redis_connection.set(
self._get_redis_key(cache_name, key), encoded_value, expire=expire_seconds,
)

async def get_cache(self, cache_name: str, key: str) -> Optional[Any]:
"""Look up a key/value in the named cache.
"""

if self._redis_connection is None:
return None

result = await self._redis_connection.get(self._get_redis_key(cache_name, key))

logger.debug("Got cache result %s %s: %r", cache_name, key, result)

get_counter.labels(cache_name, result is not None).inc()
clokep marked this conversation as resolved.
Show resolved Hide resolved

if not result:
return None

# For some reason the integers get magically converted back to integers
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

at a quick glance, txredisapi encodes integers as strings (starting with $<len>\r\n) rather than ints (starting with :) for the redis protocol, so the json-encoding of an int is the same as the int itself.

possibly to counter that bit of hackery, txredisapi also has a convertNumbers setting, which is enabled for us, which will make it try to convert things-that-look-like-numbers back into numbers. That sounds like a CPU sink and a thing we should turn off...

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh wow, that's quite special. Yeah, I agree we should turn it off.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll do it in a separate PR though I think

if isinstance(result, int):
return result

return json_decoder.decode(result)
5 changes: 5 additions & 0 deletions synapse/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@
from synapse.push.action_generator import ActionGenerator
from synapse.push.pusherpool import PusherPool
from synapse.replication.tcp.client import ReplicationDataHandler
from synapse.replication.tcp.external_cache import ExternalCache
from synapse.replication.tcp.handler import ReplicationCommandHandler
from synapse.replication.tcp.resource import ReplicationStreamer
from synapse.replication.tcp.streams import STREAMS_MAP, Stream
Expand Down Expand Up @@ -716,6 +717,10 @@ def get_module_api(self) -> ModuleApi:
def get_account_data_handler(self) -> AccountDataHandler:
return AccountDataHandler(self)

@cache_in_self
def get_external_cache(self) -> ExternalCache:
return ExternalCache(self)

async def remove_pusher(self, app_id: str, push_key: str, user_id: str):
return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id)

Expand Down
41 changes: 25 additions & 16 deletions tests/replication/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,9 @@ def setUp(self):
# Fake in memory Redis server that servers can connect to.
self._redis_server = FakeRedisPubSubServer()

# We may have an attempt to connect to redis for the external cache already.
self.connect_any_redis_attempts()

store = self.hs.get_datastore()
self.database_pool = store.db_pool

Expand Down Expand Up @@ -401,25 +404,23 @@ def connect_any_redis_attempts(self):
fake one.
"""
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "localhost")
self.assertEqual(port, 6379)
while clients:
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
self.assertEqual(host, "localhost")
self.assertEqual(port, 6379)

client_protocol = client_factory.buildProtocol(None)
server_protocol = self._redis_server.buildProtocol(None)
client_protocol = client_factory.buildProtocol(None)
server_protocol = self._redis_server.buildProtocol(None)

client_to_server_transport = FakeTransport(
server_protocol, self.reactor, client_protocol
)
client_protocol.makeConnection(client_to_server_transport)

server_to_client_transport = FakeTransport(
client_protocol, self.reactor, server_protocol
)
server_protocol.makeConnection(server_to_client_transport)
client_to_server_transport = FakeTransport(
server_protocol, self.reactor, client_protocol
)
client_protocol.makeConnection(client_to_server_transport)

return client_to_server_transport, server_to_client_transport
server_to_client_transport = FakeTransport(
client_protocol, self.reactor, server_protocol
)
server_protocol.makeConnection(server_to_client_transport)


class TestReplicationDataHandler(GenericWorkerReplicationHandler):
Expand Down Expand Up @@ -624,6 +625,12 @@ def handle_command(self, command, *args):
(channel,) = args
self._server.add_subscriber(self)
self.send(["subscribe", channel, 1])

# Since we use SET/GET to cache things we can safely no-op them.
elif command == b"SET":
self.send("OK")
elif command == b"GET":
self.send(None)
else:
raise Exception("Unknown command")

Expand All @@ -645,6 +652,8 @@ def encode(self, obj):
# We assume bytes are just unicode strings.
obj = obj.decode("utf-8")

if obj is None:
return "$-1\r\n"
if isinstance(obj, str):
return "${len}\r\n{str}\r\n".format(len=len(obj), str=obj)
if isinstance(obj, int):
Expand Down