diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index aab7be7f329..5f73753758d 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,3 +1,18 @@ +#### 1.4.10 August 20th 2020 #### +**Maintenance Release for Akka.NET 1.4** + +Akka.NET v1.4.10 includes some minor bug fixes and some major feature additions to Akka.Persistence.Query: + +* [Fixed: Akka.Persistence.Sql SqlJournal caching all Persistence Ids in memory does not scale](https://github.com/akkadotnet/akka.net/issues/4524) +* [Fixed Akka.Persistence.Query PersistenceIds queries now work across all nodes, rather than local node](https://github.com/akkadotnet/akka.net/pull/4531) +* [Akka.Actor: Akka.Pattern: Pass in clearer error message into OpenCircuitException](https://github.com/akkadotnet/akka.net/issues/4314) +* [Akka.Persistence: Allow persistence plugins to customize JournalPerfSpec's default values](https://github.com/akkadotnet/akka.net/pull/4544) +* [Akka.Remote: Racy RemotingTerminator actor crash in Akka.Remote initialization](https://github.com/akkadotnet/akka.net/issues/4530) + +To see the [full set of fixes in Akka.NET v1.4.10, please see the milestone on Github](https://github.com/akkadotnet/akka.net/milestone/41). + + + #### 1.4.9 July 21 2020 #### **Maintenance Release for Akka.NET 1.4** @@ -10,7 +25,7 @@ Akka.NET v1.4.9 features some important bug fixes for Akka.NET v1.4: * [Akka.Cluster: Cluster event listener that logs all events](https://github.com/akkadotnet/akka.net/pull/4502) * [Akka.Cluster.Tools.Singleton.ClusterSingletonManager bug: An element with the same key but a different value already exists](https://github.com/akkadotnet/akka.net/issues/4474) -To see the [full set of fixes in Akka.NET v1.4.8, please see the milestone on Github](https://github.com/akkadotnet/akka.net/milestone/40). +To see the [full set of fixes in Akka.NET v1.4.9, please see the milestone on Github](https://github.com/akkadotnet/akka.net/milestone/40). | COMMITS | LOC+ | LOC- | AUTHOR | | --- | --- | --- | --- | diff --git a/docs/articles/actors/routers.md b/docs/articles/actors/routers.md index 0ae2fe33b05..2eb5e78ebc7 100644 --- a/docs/articles/actors/routers.md +++ b/docs/articles/actors/routers.md @@ -606,6 +606,15 @@ As with the `PoisonPill` messasge, there is a distinction between killing a rout See [Noisy on Purpose: Kill the Actor](xref:receive-actor-api#killing-an-actor) for more details on how `Kill` message works. +### Management Messages + +Sending one of the following messages to a router can be used to manage its routees. + +- `Akka.Routing.GetRoutees` The router actor will respond with a `Akka.Routing.Routees` message, which contains a list of currently used routees. +- `Akka.Routing.AddRoutee` The router actor will add the provided to its collection of routees. +- `Akka.Routing.RemoveRoutee` The router actor will remove the provided routee to its collection of routees. +- `Akka.Routing.AdjustPoolSize` The pool router actor will add or remove that number of routees to its collection of routees. + ## Advanced ### How Routing is Designed within Akka.NET diff --git a/docs/articles/clustering/cluster-overview.md b/docs/articles/clustering/cluster-overview.md index d020da19222..ce6b4b79116 100644 --- a/docs/articles/clustering/cluster-overview.md +++ b/docs/articles/clustering/cluster-overview.md @@ -13,7 +13,7 @@ The best way to begin introducing Akka.Cluster is with brief overview of what it - Makes it easy to create peer-to-peer networks of Akka.NET applications - Allows peers to automatically discover new nodes and removed dead ones automatically with no configuration changes - Allows user-defined classes to subscribe to notifications about changes in the availability of nodes in the cluster -- Introduces the concept of "roles" to distinguish different Akka.NET applications within a cluster; and +- Introduces the concept of "roles" to distinguish different Akka.NET applications within a cluster - Allows you to create clustered routers, which are an extension of the built-in Akka.NET routers, except that clustered routers automatically adjust their routees list based on node availability. ## Benefits of Akka.Cluster @@ -38,7 +38,7 @@ Akka.Cluster lends itself naturally to [high availability](https://en.wikipedia. To put it bluntly, you should use clustering in any scenario where you have some or all of the following conditions: - A sizable traffic load -- Non-trivial to perform +- A non-trivial task to perform - An expectation of fast response times - The need for elastic scaling (e.g. bursty workloads) - A microservices architecture diff --git a/docs/articles/clustering/cluster-sharding.md b/docs/articles/clustering/cluster-sharding.md index 931190ddb40..e8b9b337a7f 100644 --- a/docs/articles/clustering/cluster-sharding.md +++ b/docs/articles/clustering/cluster-sharding.md @@ -82,7 +82,7 @@ To reduce memory consumption, you may decide to stop entities after some period ### Automatic Passivation -The entities can be configured to be automatically passivated if they haven't received a message for a while using the `akka.cluster.sharding.passivate-idle-entity-after` setting, or by explicitly setting `ClusterShardingSettings.PassivateIdleEntityAfter` to a suitable time to keep the actor alive. Note that only messages sent through sharding are counted, so direct messages to the `ActorRef` of the actor or messages that it sends to itself are not counted as activity. Passivation can be disabled by setting `akka.cluster.sharding.passivate-idle-entity-after = off`. It is always disabled if @ref:[Remembering Entities](#remembering-entities) is enabled. +The entities can be configured to be automatically passivated if they haven't received a message for a while using the `akka.cluster.sharding.passivate-idle-entity-after` setting, or by explicitly setting `ClusterShardingSettings.PassivateIdleEntityAfter` to a suitable time to keep the actor alive. Note that only messages sent through sharding are counted, so direct messages to the `ActorRef` of the actor or messages that it sends to itself are not counted as activity. Passivation can be disabled by setting `akka.cluster.sharding.passivate-idle-entity-after = off`. It is always disabled if [Remembering Entities](#remembering-entities) is enabled. ## Remembering entities diff --git a/docs/articles/persistence/persistence-query.md b/docs/articles/persistence/persistence-query.md index f181d7e6127..d466e80f8c1 100644 --- a/docs/articles/persistence/persistence-query.md +++ b/docs/articles/persistence/persistence-query.md @@ -48,9 +48,9 @@ Akka persistence query comes with a number of query interfaces built in and sugg The predefined queries are: -**AllPersistenceIdsQuery and CurrentPersistenceIdsQuery** +**AllPersistenceIdsQuery (PersistentIds) and CurrentPersistenceIdsQuery** -`AllPersistenceIds` is used for retrieving all persistenceIds of all persistent actors. +`AllPersistenceIds`, or `PersistenceIds` in `IPersistenceIdsQuery`, is used to retrieve all cached persistenceIds of all persistent actors inside the `ActorSystem` where the journal actor is instantiated. Note that since this is a cached value, this query will only report `PersistentIds` that passed to the journal since the journal creation time (local cache). ```csharp var queries = PersistenceQuery.Get(actorSystem) @@ -157,6 +157,42 @@ As you can see, we can use all the usual stream combinators available from Akka If your usage does not require a live stream, you can use the `CurrentEventsByTag` query. +**AllEvents and CurrentAllEvents** + +`AllEvents` allows replaying and monitoring all events regardless of which `PersistenceId` they are associated with. The goal of this query is to allow replaying and monitoring for all events that are stored inside a journal, regardless of its source.Please refer to your read journal plugin's documentation to find out if and how it is supported. + +The stream is not completed when it reaches the last event recorded, but it continues to push new events when new event are persisted. Corresponding query that is completed when it reaches the end of the last event persisted when the query id called is provided by `CurrentAllEvents`. + +The write journal is notifying the query side as soon as new events are created and there is no periodic polling or batching involved in this query. + +> [!NOTE] +> A very important thing to keep in mind when using queries spanning multiple `PersistenceIds`, such as `AllEvents` is that the order of events at which the events appear in the stream rarely is guaranteed (or stable between materializations). + +Journals may choose to opt for strict ordering of the events, and should then document explicitly what kind of ordering guarantee they provide - for example "ordered by timestamp ascending, independently of `PersistenceId`" is easy to achieve on relational databases, yet may be hard to implement efficiently on plain key-value datastores. + +In the example below we query all events which have been stored inside the journal. + +```csharp +// assuming journal is able to work with numeric offsets we can: +Source allEvents = readJournal.AllEvents(offset: 0L); + +// replay the first 10 things stored: +Task> first10Things = allEvents + .Select(c => c.Event) + .Take(10) // cancels the query stream after pulling 10 elements + .RunAggregate( + ImmutableHashSet.Empty, + (acc, c) => acc.Add(c), + mat); + +// start another query, from the known offset +var next10Things = readJournal.AllEvents(offset: 10); +``` + +As you can see, we can use all the usual stream combinators available from Akka Streams on the resulting query stream, including for example taking the first 10 and cancelling the stream. It is worth pointing out that the built-in `AllEvents` query has an optionally supported offset parameter (of type Long) which the journals can use to implement resumable-streams. For example a journal may be able to use a WHERE clause to begin the read starting from a specific row, or in a datastore that is able to order events by insertion time it could treat the Long as a timestamp and select only older events. + +If your usage does not require a live stream, you can use the `CurrentEventsByTag` query. + ### Materialized values of queries Journals are able to provide additional information related to a query by exposing materialized values, which are a feature of Akka Streams that allows to expose additional values at stream materialization time. diff --git a/src/Akka.sln.DotSettings b/src/Akka.sln.DotSettings index 41a06c2ddfc..c5ab510a684 100644 --- a/src/Akka.sln.DotSettings +++ b/src/Akka.sln.DotSettings @@ -24,4 +24,5 @@ True True True + \ No newline at end of file diff --git a/src/common.props b/src/common.props index 12cf95ec503..87e938eb2a1 100644 --- a/src/common.props +++ b/src/common.props @@ -10,11 +10,11 @@ 2.4.1 - 16.6.1 + 16.7.0 0.9.16 12.0.3 2.0.1 - 3.12.3 + 3.13.0 netcoreapp3.1 net461 netstandard2.0 diff --git a/src/contrib/cluster/Akka.DistributedData.LightningDB/Akka.DistributedData.LightningDB.csproj b/src/contrib/cluster/Akka.DistributedData.LightningDB/Akka.DistributedData.LightningDB.csproj index 8d4691f4650..4b61c74aa43 100644 --- a/src/contrib/cluster/Akka.DistributedData.LightningDB/Akka.DistributedData.LightningDB.csproj +++ b/src/contrib/cluster/Akka.DistributedData.LightningDB/Akka.DistributedData.LightningDB.csproj @@ -16,7 +16,7 @@ - + diff --git a/src/contrib/cluster/Akka.DistributedData.LightningDB/LmdbDurableStore.cs b/src/contrib/cluster/Akka.DistributedData.LightningDB/LmdbDurableStore.cs index 60453f1722b..d32adc707fd 100644 --- a/src/contrib/cluster/Akka.DistributedData.LightningDB/LmdbDurableStore.cs +++ b/src/contrib/cluster/Akka.DistributedData.LightningDB/LmdbDurableStore.cs @@ -18,6 +18,7 @@ using Akka.DistributedData.Internal; using LightningDB; using System.Diagnostics; +using System.Linq; namespace Akka.DistributedData.LightningDB { @@ -145,8 +146,8 @@ protected override void PostStop() if(IsDbInitialized) { var (env, db, _) = Lmdb; - try { db.Dispose(); } catch { } - try { env.Dispose(); } catch { } + try { db?.Dispose(); } catch { } + try { env?.Dispose(); } catch { } } } @@ -196,26 +197,24 @@ private void Init() return; } - var l = Lmdb; + var (environment, db, _) = Lmdb; var t0 = Stopwatch.StartNew(); - using (var tx = l.env.BeginTransaction(TransactionBeginFlags.ReadOnly)) - using (var cursor = tx.CreateCursor(l.db)) + using (var tx = environment.BeginTransaction(TransactionBeginFlags.ReadOnly)) + using (var cursor = tx.CreateCursor(db)) { try { - var n = 0; - var builder = ImmutableDictionary.CreateBuilder(); - foreach (var entry in cursor) + var data = cursor.AsEnumerable().Select((x, i) + => { + var (key, value) = x; + return new KeyValuePair( + Encoding.UTF8.GetString(key.CopyToNewArray()), + (DurableDataEnvelope)_serializer.FromBinary(value.CopyToNewArray(), _manifest)); + }).ToImmutableDictionary(); + + if (data.Count > 0) { - n++; - var key = Encoding.UTF8.GetString(entry.Key.CopyToNewArray()); - var envelope = (DurableDataEnvelope)_serializer.FromBinary(entry.Value.CopyToNewArray(), _manifest); - builder.Add(key, envelope); - } - - if (builder.Count > 0) - { - var loadData = new LoadData(builder.ToImmutable()); + var loadData = new LoadData(data); Sender.Tell(loadData); } @@ -223,7 +222,7 @@ private void Init() t0.Stop(); if (_log.IsDebugEnabled) - _log.Debug($"Load all of [{n}] entries took [{t0.ElapsedMilliseconds}]"); + _log.Debug($"Load all of [{data.Count}] entries took [{t0.ElapsedMilliseconds}]"); Become(Active); } diff --git a/src/contrib/persistence/Akka.Persistence.Query.Sql/AllEventsPublisher.cs b/src/contrib/persistence/Akka.Persistence.Query.Sql/AllEventsPublisher.cs index 316e8729e50..ea6fd7ce3df 100644 --- a/src/contrib/persistence/Akka.Persistence.Query.Sql/AllEventsPublisher.cs +++ b/src/contrib/persistence/Akka.Persistence.Query.Sql/AllEventsPublisher.cs @@ -8,7 +8,7 @@ namespace Akka.Persistence.Query.Sql { - internal sealed class AllEventsPublisher : ActorPublisher + internal static class AllEventsPublisher { [Serializable] public sealed class Continue @@ -18,52 +18,63 @@ public sealed class Continue private Continue() { } } - public static Props Props(long fromOffset, int maxBufferSize, string writeJournalPluginId) - => Actor.Props.Create(() => new AllEventsPublisher(fromOffset, maxBufferSize, writeJournalPluginId)); - - private readonly ILoggingAdapter _log; + public static Props Props(long fromOffset, TimeSpan? refreshInterval, int maxBufferSize, string writeJournalPluginId) + { + return refreshInterval.HasValue ? + Actor.Props.Create(() => new LiveAllEventsPublisher(fromOffset, refreshInterval.Value, maxBufferSize, writeJournalPluginId)) : + Actor.Props.Create(() => new CurrentAllEventsPublisher(fromOffset, maxBufferSize, writeJournalPluginId)); + } + } - private readonly DeliveryBuffer _buffer; - private readonly IActorRef _journalRef; - private readonly int _maxBufferSize; - private bool _completed = false; + internal abstract class AbstractAllEventsPublisher : ActorPublisher + { - private readonly long _fromOffset; - private long _currentOffset; + private ILoggingAdapter _log; + protected long CurrentOffset; - public AllEventsPublisher(long fromOffset, int maxBufferSize, string writeJournalPluginId) + protected AbstractAllEventsPublisher(long fromOffset, int maxBufferSize, string writeJournalPluginId) { - _currentOffset = _fromOffset = fromOffset; - _maxBufferSize = maxBufferSize; - _buffer = new DeliveryBuffer(OnNext); - _journalRef = Persistence.Instance.Apply(Context.System).JournalFor(writeJournalPluginId); - - _log = Context.GetLogger(); + CurrentOffset = FromOffset = fromOffset; + MaxBufferSize = maxBufferSize; + Buffer = new DeliveryBuffer(OnNext); + JournalRef = Persistence.Instance.Apply(Context.System).JournalFor(writeJournalPluginId); } - private bool IsTimeForReplay => (_buffer.IsEmpty || _buffer.Length <= _maxBufferSize / 2) && !_completed; + protected ILoggingAdapter Log => _log ?? (_log = Context.GetLogger()); + protected IActorRef JournalRef { get; } + protected DeliveryBuffer Buffer { get; } + protected long FromOffset { get; } + protected abstract long ToOffset { get; } + protected int MaxBufferSize { get; } + protected bool IsTimeForReplay => (Buffer.IsEmpty || Buffer.Length <= MaxBufferSize / 2) && (CurrentOffset <= ToOffset); + + protected abstract void ReceiveInitialRequest(); + protected abstract void ReceiveIdleRequest(); + protected abstract void ReceiveRecoverySuccess(long highestSequenceNr); protected override bool Receive(object message) { switch (message) { case Request _: - Replay(); - return true; - case Continue _: + ReceiveInitialRequest(); return true; case Cancel _: Context.Stop(Self); return true; + case AllEventsPublisher.Continue _: + return true; default: return false; } } - private bool Idle(object message) + + protected bool Idle(object message) { switch (message) { - case Continue _: + case AllEventsPublisher.Continue _: + case NewEventAppended _: if (IsTimeForReplay) Replay(); return true; case Request _: @@ -77,76 +88,129 @@ private bool Idle(object message) } } - private void Replay() + protected void Replay() { - var limit = _maxBufferSize - _buffer.Length; - _log.Debug("replay all events request from [{0}], limit [{1}]", _currentOffset, limit); - _journalRef.Tell(new ReplayAllEvents(_currentOffset, limit, Self)); + var limit = MaxBufferSize - Buffer.Length; + Log.Debug("replay all events request from [{0}] to [{1}], limit [{2}]", CurrentOffset, ToOffset, limit); + JournalRef.Tell(new ReplayAllEvents(CurrentOffset, ToOffset, limit, Self)); Context.Become(Replaying); } - private bool Replaying( object message ) + protected bool Replaying( object message ) { switch (message) { case ReplayedEvent replayed: - _buffer.Add(new EventEnvelope( + Buffer.Add(new EventEnvelope( offset: new Sequence(replayed.Offset), persistenceId: replayed.Persistent.PersistenceId, sequenceNr: replayed.Persistent.SequenceNr, @event: replayed.Persistent.Payload)); - _currentOffset = replayed.Offset; - _buffer.DeliverBuffer(TotalDemand); + CurrentOffset = replayed.Offset; + Buffer.DeliverBuffer(TotalDemand); return true; case EventReplaySuccess success: - _log.Debug("event replay completed, currOffset [{0}]", _currentOffset); + Log.Debug("event replay completed, currOffset [{0}], highestSequenceNr [{1}]", CurrentOffset, success.HighestSequenceNr); ReceiveRecoverySuccess(success.HighestSequenceNr); return true; case EventReplayFailure failure: - _log.Debug("event replay failed, due to [{0}]", failure.Cause.Message); - _buffer.DeliverBuffer(TotalDemand); + Log.Debug("event replay failed, due to [{0}]", failure.Cause.Message); + Buffer.DeliverBuffer(TotalDemand); OnErrorThenStop(failure.Cause); return true; - case ReplayedAllEvents _: - _completed = true; - if (_buffer.IsEmpty) - OnCompleteThenStop(); - - _buffer.DeliverBuffer(TotalDemand); - return true; case Request _: - _buffer.DeliverBuffer(TotalDemand); - return true; - case Continue _: + Buffer.DeliverBuffer(TotalDemand); return true; case Cancel _: Context.Stop(Self); return true; + case AllEventsPublisher.Continue _: + case NewEventAppended _: + return true; default: return false; } } - private void ReceiveIdleRequest() + } + + internal sealed class LiveAllEventsPublisher : AbstractAllEventsPublisher + { + private readonly ICancelable _tickCancelable; + public LiveAllEventsPublisher(long fromOffset, TimeSpan refreshInterval, int maxBufferSize, string writeJournalPluginId) + : base(fromOffset, maxBufferSize, writeJournalPluginId) + { + _tickCancelable = Context.System.Scheduler.ScheduleTellRepeatedlyCancelable(refreshInterval, refreshInterval, Self, AllEventsPublisher.Continue.Instance, Self); + } + + protected override long ToOffset => long.MaxValue; + + protected override void PostStop() + { + _tickCancelable.Cancel(); + base.PostStop(); + } + + protected override void ReceiveInitialRequest() { - _buffer.DeliverBuffer(TotalDemand); - if (_buffer.IsEmpty && _completed) + JournalRef.Tell(SubscribeNewEvents.Instance); + Replay(); + } + + protected override void ReceiveIdleRequest() + { + Buffer.DeliverBuffer(TotalDemand); + if (Buffer.IsEmpty && CurrentOffset > ToOffset) + OnCompleteThenStop(); + } + + protected override void ReceiveRecoverySuccess(long highestSequenceNr) + { + Buffer.DeliverBuffer(TotalDemand); + if (Buffer.IsEmpty && CurrentOffset > ToOffset) + OnCompleteThenStop(); + + Context.Become(Idle); + } + } + + internal sealed class CurrentAllEventsPublisher : AbstractAllEventsPublisher + { + public CurrentAllEventsPublisher(long fromOffset, int maxBufferSize, string writeJournalPluginId) + : base(fromOffset, maxBufferSize, writeJournalPluginId) + { } + + private long _toOffset = long.MaxValue; + protected override long ToOffset => _toOffset; + + protected override void ReceiveInitialRequest() + { + Replay(); + } + + protected override void ReceiveIdleRequest() + { + Buffer.DeliverBuffer(TotalDemand); + if (Buffer.IsEmpty && CurrentOffset > ToOffset) OnCompleteThenStop(); else - Self.Tell(Continue.Instance); + Self.Tell(AllEventsPublisher.Continue.Instance); } - private void ReceiveRecoverySuccess(long highestSequenceNr) + protected override void ReceiveRecoverySuccess(long highestSequenceNr) { - _buffer.DeliverBuffer(TotalDemand); - if (_buffer.IsEmpty && _completed) + Buffer.DeliverBuffer(TotalDemand); + + if (highestSequenceNr < ToOffset) + _toOffset = highestSequenceNr; + + if (Buffer.IsEmpty && CurrentOffset >= ToOffset) OnCompleteThenStop(); else - Self.Tell(Continue.Instance); + Self.Tell(AllEventsPublisher.Continue.Instance); Context.Become(Idle); } } - } diff --git a/src/contrib/persistence/Akka.Persistence.Query.Sql/AllPersistenceIdsPublisher.cs b/src/contrib/persistence/Akka.Persistence.Query.Sql/AllPersistenceIdsPublisher.cs index d4e36826f3b..74a2eba3c8f 100644 --- a/src/contrib/persistence/Akka.Persistence.Query.Sql/AllPersistenceIdsPublisher.cs +++ b/src/contrib/persistence/Akka.Persistence.Query.Sql/AllPersistenceIdsPublisher.cs @@ -5,64 +5,193 @@ // //----------------------------------------------------------------------- +using System; using Akka.Actor; using Akka.Persistence.Sql.Common.Journal; using Akka.Streams.Actors; namespace Akka.Persistence.Query.Sql { - internal sealed class AllPersistenceIdsPublisher : ActorPublisher + internal sealed class CurrentPersistenceIdsPublisher : ActorPublisher, IWithUnboundedStash { - public static Props Props(bool liveQuery, string writeJournalPluginId) + public static Props Props(string writeJournalPluginId) { - return Actor.Props.Create(() => new AllPersistenceIdsPublisher(liveQuery, writeJournalPluginId)); + return Actor.Props.Create(() => new CurrentPersistenceIdsPublisher(writeJournalPluginId)); } - private readonly bool _liveQuery; private readonly IActorRef _journalRef; private readonly DeliveryBuffer _buffer; - public AllPersistenceIdsPublisher(bool liveQuery, string writeJournalPluginId) + public IStash Stash { get; set; } + + public CurrentPersistenceIdsPublisher(string writeJournalPluginId) { - _liveQuery = liveQuery; _buffer = new DeliveryBuffer(OnNext); _journalRef = Persistence.Instance.Apply(Context.System).JournalFor(writeJournalPluginId); } - protected override bool Receive(object message) => message.Match() - .With(_ => + protected override bool Receive(object message) + { + switch (message) { - _journalRef.Tell(SubscribeAllPersistenceIds.Instance); - Become(Active); - }) - .With(_ => Context.Stop(Self)) - .WasHandled; - - private bool Active(object message) => message.Match() - .With(current => + case Request _: + _journalRef.Tell(new SelectCurrentPersistenceIds(0, Self)); + Become(Initializing); + return true; + case Cancel _: + Context.Stop(Self); + return true; + default: + return false; + } + } + + private bool Initializing(object message) + { + switch (message) { - _buffer.AddRange(current.AllPersistenceIds); - _buffer.DeliverBuffer(TotalDemand); + case CurrentPersistenceIds current: + _buffer.AddRange(current.AllPersistenceIds); + _buffer.DeliverBuffer(TotalDemand); - if (!_liveQuery && _buffer.IsEmpty) - OnCompleteThenStop(); - }) - .With(added => + if (_buffer.IsEmpty) + { + OnCompleteThenStop(); + return true; + } + + Become(Active); + Stash.UnstashAll(); + return true; + case Cancel _: + Context.Stop(Self); + return true; + default: + Stash.Stash(); + return true; + } + } + + private bool Active(object message) + { + switch (message) { - if (_liveQuery) - { - _buffer.Add(added.PersistenceId); + case Request _: _buffer.DeliverBuffer(TotalDemand); - } - }) - .With(_ => + if (_buffer.IsEmpty) + OnCompleteThenStop(); + return true; + case Cancel _: + Context.Stop(Self); + return true; + default: + return false; + } + } + } + + internal sealed class LivePersistenceIdsPublisher : ActorPublisher, IWithUnboundedStash + { + private class Continue + { + public static readonly Continue Instance = new Continue(); + + private Continue() { } + } + + public static Props Props(TimeSpan refreshInterval, string writeJournalPluginId) + { + return Actor.Props.Create(() => new LivePersistenceIdsPublisher(refreshInterval, writeJournalPluginId)); + } + + private long _lastOrderingOffset; + private readonly ICancelable _tickCancelable; + private readonly IActorRef _journalRef; + private readonly DeliveryBuffer _buffer; + + public IStash Stash { get; set; } + + public LivePersistenceIdsPublisher(TimeSpan refreshInterval, string writeJournalPluginId) + { + _tickCancelable = Context.System.Scheduler.ScheduleTellRepeatedlyCancelable( + refreshInterval, + refreshInterval, + Self, + Continue.Instance, + Self); + _buffer = new DeliveryBuffer(OnNext); + _journalRef = Persistence.Instance.Apply(Context.System).JournalFor(writeJournalPluginId); + } + + protected override void PostStop() + { + _tickCancelable.Cancel(); + base.PostStop(); + } + + protected override bool Receive(object message) + { + switch (message) { - _buffer.DeliverBuffer(TotalDemand); - if (!_liveQuery && _buffer.IsEmpty) - OnCompleteThenStop(); - }) - .With(_ => Context.Stop(Self)) - .WasHandled; + case Request _: + _journalRef.Tell(new SelectCurrentPersistenceIds(0, Self)); + Become(Initializing); + return true; + case Continue _: + return true; + case Cancel _: + Context.Stop(Self); + return true; + default: + return false; + } + } + + private bool Initializing(object message) + { + switch (message) + { + case CurrentPersistenceIds current: + _lastOrderingOffset = current.HighestOrderingNumber; + _buffer.AddRange(current.AllPersistenceIds); + _buffer.DeliverBuffer(TotalDemand); + + Become(Active); + Stash.UnstashAll(); + return true; + case Continue _: + return true; + case Cancel _: + Context.Stop(Self); + return true; + default: + Stash.Stash(); + return true; + } + } + + private bool Active(object message) + { + switch (message) + { + case CurrentPersistenceIds added: + _lastOrderingOffset = added.HighestOrderingNumber; + _buffer.AddRange(added.AllPersistenceIds); + _buffer.DeliverBuffer(TotalDemand); + return true; + case Request _: + _buffer.DeliverBuffer(TotalDemand); + return true; + case Continue _: + _journalRef.Tell(new SelectCurrentPersistenceIds(_lastOrderingOffset, Self)); + return true; + case Cancel _: + Context.Stop(Self); + return true; + default: + return false; + } + } } } diff --git a/src/contrib/persistence/Akka.Persistence.Query.Sql/SqlReadJournal.cs b/src/contrib/persistence/Akka.Persistence.Query.Sql/SqlReadJournal.cs index 4a937f8b389..4bf46e0b5b2 100644 --- a/src/contrib/persistence/Akka.Persistence.Query.Sql/SqlReadJournal.cs +++ b/src/contrib/persistence/Akka.Persistence.Query.Sql/SqlReadJournal.cs @@ -6,24 +6,25 @@ //----------------------------------------------------------------------- using System; +using System.Threading; using Reactive.Streams; using Akka.Actor; using Akka.Configuration; using Akka.Persistence.Journal; using Akka.Streams.Dsl; -using Akka.Util.Internal; +using Akka.Streams; namespace Akka.Persistence.Query.Sql { public class SqlReadJournal : - IReadJournal, IPersistenceIdsQuery, ICurrentPersistenceIdsQuery, IEventsByPersistenceIdQuery, ICurrentEventsByPersistenceIdQuery, IEventsByTagQuery, ICurrentEventsByTagQuery, - IAllEventsQuery + IAllEventsQuery, + ICurrentAllEventsQuery { public static string Identifier = "akka.persistence.query.journal.sql"; @@ -39,12 +40,20 @@ public static Config DefaultConfiguration() private readonly TimeSpan _refreshInterval; private readonly string _writeJournalPluginId; private readonly int _maxBufferSize; + private readonly ExtendedActorSystem _system; + + private readonly object _lock = new object(); + private IPublisher _persistenceIdsPublisher; public SqlReadJournal(ExtendedActorSystem system, Config config) { _refreshInterval = config.GetTimeSpan("refresh-interval", null); _writeJournalPluginId = config.GetString("write-plugin", null); _maxBufferSize = config.GetInt("max-buffer-size", 0); + _system = system; + + _lock = new ReaderWriterLockSlim(); + _persistenceIdsPublisher = null; } /// @@ -67,20 +76,45 @@ public SqlReadJournal(ExtendedActorSystem system, Config config) /// backend journal. /// /// - public Source PersistenceIds() => - Source.ActorPublisher(AllPersistenceIdsPublisher.Props(true, _writeJournalPluginId)) - .MapMaterializedValue(_ => NotUsed.Instance) - .Named("AllPersistenceIds") as Source; + public Source PersistenceIds() + { + lock (_lock) + { + if (_persistenceIdsPublisher is null) + { + var graph = + Source.ActorPublisher( + LivePersistenceIdsPublisher.Props( + _refreshInterval, + _writeJournalPluginId)) + .ToMaterialized(Sink.DistinctRetainingFanOutPublisher(PersistenceIdsShutdownCallback), Keep.Right); + + _persistenceIdsPublisher = graph.Run(_system.Materializer()); + } + return Source.FromPublisher(_persistenceIdsPublisher) + .MapMaterializedValue(_ => NotUsed.Instance) + .Named("AllPersistenceIds"); + } + + } + + private void PersistenceIdsShutdownCallback() + { + lock (_lock) + { + _persistenceIdsPublisher = null; + } + } /// /// Same type of query as but the stream /// is completed immediately when it reaches the end of the "result set". Persistent /// actors that are created after the query is completed are not included in the stream. /// - public Source CurrentPersistenceIds() => - Source.ActorPublisher(AllPersistenceIdsPublisher.Props(false, _writeJournalPluginId)) - .MapMaterializedValue(_ => NotUsed.Instance) - .Named("CurrentPersistenceIds") as Source; + public Source CurrentPersistenceIds() + => Source.ActorPublisher(CurrentPersistenceIdsPublisher.Props(_writeJournalPluginId)) + .MapMaterializedValue(_ => NotUsed.Instance) + .Named("CurrentPersistenceIds"); /// /// is used for retrieving events for a specific @@ -111,7 +145,7 @@ public Source CurrentPersistenceIds() => public Source EventsByPersistenceId(string persistenceId, long fromSequenceNr, long toSequenceNr) => Source.ActorPublisher(EventsByPersistenceIdPublisher.Props(persistenceId, fromSequenceNr, toSequenceNr, _refreshInterval, _maxBufferSize, _writeJournalPluginId)) .MapMaterializedValue(_ => NotUsed.Instance) - .Named("EventsByPersistenceId-" + persistenceId) as Source; + .Named("EventsByPersistenceId-" + persistenceId); /// /// Same type of query as but the event stream @@ -121,7 +155,7 @@ public Source EventsByPersistenceId(string persistenceId public Source CurrentEventsByPersistenceId(string persistenceId, long fromSequenceNr, long toSequenceNr) => Source.ActorPublisher(EventsByPersistenceIdPublisher.Props(persistenceId, fromSequenceNr, toSequenceNr, null, _maxBufferSize, _writeJournalPluginId)) .MapMaterializedValue(_ => NotUsed.Instance) - .Named("CurrentEventsByPersistenceId-" + persistenceId) as Source; + .Named("CurrentEventsByPersistenceId-" + persistenceId); /// /// is used for retrieving events that were marked with @@ -215,9 +249,30 @@ public Source AllEvents(Offset offset = null) throw new ArgumentException($"SqlReadJournal does not support {offset.GetType().Name} offsets"); } - return Source.ActorPublisher(AllEventsPublisher.Props(seq.Value, _maxBufferSize, _writeJournalPluginId)) + return Source.ActorPublisher(AllEventsPublisher.Props(seq.Value, _refreshInterval, _maxBufferSize, _writeJournalPluginId)) .MapMaterializedValue(_ => NotUsed.Instance) .Named("AllEvents"); } + + public Source CurrentAllEvents(Offset offset) + { + Sequence seq; + switch (offset) + { + case null: + case NoOffset _: + seq = new Sequence(0L); + break; + case Sequence s: + seq = s; + break; + default: + throw new ArgumentException($"SqlReadJournal does not support {offset.GetType().Name} offsets"); + } + + return Source.ActorPublisher(AllEventsPublisher.Props(seq.Value, null, _maxBufferSize, _writeJournalPluginId)) + .MapMaterializedValue(_ => NotUsed.Instance) + .Named("CurrentAllEvents"); + } } } diff --git a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/BatchingSqlJournal.cs b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/BatchingSqlJournal.cs index 5ac68b4a1cf..eef79a89168 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/BatchingSqlJournal.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/BatchingSqlJournal.cs @@ -383,13 +383,6 @@ public BatchComplete(int chunkId, int operationCount, TimeSpan timeSpent, Except } } - // this little guy will be called only once, only by the current journal - private sealed class GetCurrentPersistenceIds - { - public static readonly GetCurrentPersistenceIds Instance = new GetCurrentPersistenceIds(); - private GetCurrentPersistenceIds() { } - } - private struct RequestChunk { public readonly int ChunkId; @@ -463,7 +456,7 @@ public RequestChunk(int chunkId, IJournalRequest[] requests) protected virtual string InsertEventSql { get; } /// - /// SQL query executed as result of request to journal. + /// SQL query executed as result of request to journal. /// It's a part of persistence query protocol. /// protected virtual string AllPersistenceIdsSql { get; } @@ -486,9 +479,16 @@ public RequestChunk(int chunkId, IJournalRequest[] requests) /// protected virtual string ByTagSql { get; } - protected virtual string AllEventsSql { get; set; } + /// + /// SQL query executed as result of request to journal. + /// It's a part of persistence query protocol. + /// + protected virtual string AllEventsSql { get; } - protected virtual string HighestOrderingSql { get; set; } + /// + /// TBD + /// + protected virtual string HighestOrderingSql { get; } /// /// A named collection of SQL statements to be executed once journal actor gets initialized @@ -512,10 +512,9 @@ public RequestChunk(int chunkId, IJournalRequest[] requests) protected bool HasTagSubscribers => _tagSubscribers.Count != 0; /// - /// Flag determining if current journal has any subscribers for and - /// messages. + /// Flag determining if current journal has any subscribers for and /// - protected bool HasAllIdsSubscribers => _allIdsSubscribers.Count != 0; + protected bool HasNewEventsSubscribers => _newEventSubscriber.Count != 0; /// /// Flag determining if incoming journal requests should be published in current actor system event stream. @@ -536,8 +535,7 @@ public RequestChunk(int chunkId, IJournalRequest[] requests) private readonly Dictionary> _persistenceIdSubscribers; private readonly Dictionary> _tagSubscribers; - private readonly HashSet _allIdsSubscribers; - private readonly HashSet _allPersistenceIds; + private readonly HashSet _newEventSubscriber; private readonly Akka.Serialization.Serialization _serialization; private readonly CircuitBreaker _circuitBreaker; @@ -554,8 +552,7 @@ protected BatchingSqlJournal(BatchingSqlJournalSetup setup) _persistenceIdSubscribers = new Dictionary>(); _tagSubscribers = new Dictionary>(); - _allIdsSubscribers = new HashSet(); - _allPersistenceIds = new HashSet(); + _newEventSubscriber = new HashSet(); _remainingOperations = Setup.MaxConcurrentOperations; Buffer = new Queue(Setup.MaxBatchSize); @@ -578,8 +575,15 @@ protected BatchingSqlJournal(BatchingSqlJournalSetup setup) e.{conventions.SerializerIdColumnName} as SerializerId"; AllPersistenceIdsSql = $@" - SELECT DISTINCT e.{conventions.PersistenceIdColumnName} as PersistenceId - FROM {conventions.FullJournalTableName} e;"; + SELECT DISTINCT u.Id as PersistenceId + FROM ( + SELECT DISTINCT e.{conventions.PersistenceIdColumnName} as Id + FROM {conventions.FullJournalTableName} e + WHERE e.{conventions.OrderingColumnName} > @Ordering + UNION + SELECT DISTINCT e.{conventions.PersistenceIdColumnName} as Id + FROM {conventions.FullMetaTableName} e + ) as u"; HighestSequenceNrSql = $@" SELECT MAX(u.SeqNr) as SequenceNr @@ -623,8 +627,7 @@ WHERE e.{conventions.OrderingColumnName} > @Ordering HighestOrderingSql = $@" SELECT MAX(e.{conventions.OrderingColumnName}) as Ordering - FROM {conventions.FullJournalTableName} e - WHERE e.{conventions.OrderingColumnName} > @Ordering"; + FROM {conventions.FullJournalTableName} e"; InsertEventSql = $@" INSERT INTO {conventions.FullJournalTableName} ( @@ -696,27 +699,24 @@ protected sealed override bool Receive(object message) case ReplayAllEvents msg: BatchRequest(msg); return true; + case SelectCurrentPersistenceIds msg: + BatchRequest(msg); + return true; case BatchComplete msg: CompleteBatch(msg); return true; case SubscribePersistenceId msg: AddPersistenceIdSubscriber(msg); return true; - case SubscribeAllPersistenceIds msg: - AddAllSubscriber(msg); - return true; case SubscribeTag msg: AddTagSubscriber(msg); return true; + case SubscribeNewEvents msg: + AddNewEventsSubscriber(msg); + return true; case Terminated msg: RemoveSubscriber(msg.ActorRef); return true; - case GetCurrentPersistenceIds _: - InitializePersistenceIds(); - return true; - case CurrentPersistenceIds msg: - SendCurrentPersistenceIds(msg); - return true; case ChunkExecutionFailure msg: FailChunkExecution(msg); return true; @@ -753,82 +753,25 @@ private void FailChunkExecution(ChunkExecutionFailure message) } } - private void SendCurrentPersistenceIds(CurrentPersistenceIds message) - { - foreach (var persistenceId in message.AllPersistenceIds) - { - _allPersistenceIds.Add(persistenceId); - } - - foreach (var subscriber in _allIdsSubscribers) - { - subscriber.Tell(message); - } - } - #region subscriptions - - private void InitializePersistenceIds() - { - var self = Self; - GetAllPersistenceIdsAsync() - .ContinueWith(task => - { - if (task.IsCanceled || task.IsFaulted) - { - var cause = (Exception)task.Exception ?? new OperationCanceledException("Cancellation occurred while trying to retrieve current persistence ids"); - Log.Error(cause, "Couldn't retrieve current persistence ids"); - } - else - { - self.Tell(new CurrentPersistenceIds(task.Result)); - } - }); - } - - private async Task> GetAllPersistenceIdsAsync() - { - var result = new List(256); - using (var connection = CreateConnection(Setup.ConnectionString)) - { - await connection.OpenAsync(); - using (var command = connection.CreateCommand()) - { - command.CommandText = AllPersistenceIdsSql; - - var reader = await command.ExecuteReaderAsync(); - while (await reader.ReadAsync()) - { - result.Add(reader.GetString(0)); - } - } - } - return result; - } - private void RemoveSubscriber(IActorRef subscriberRef) { - _allIdsSubscribers.Remove(subscriberRef); _persistenceIdSubscribers.RemoveItem(subscriberRef); _tagSubscribers.RemoveItem(subscriberRef); + _newEventSubscriber.Remove(subscriberRef); } - private void AddTagSubscriber(SubscribeTag message) + private void AddNewEventsSubscriber(SubscribeNewEvents message) { var subscriber = Sender; - _tagSubscribers.AddItem(message.Tag, subscriber); + _newEventSubscriber.Add(subscriber); Context.Watch(subscriber); } - private void AddAllSubscriber(SubscribeAllPersistenceIds message) + private void AddTagSubscriber(SubscribeTag message) { - if (!HasAllIdsSubscribers) - { - Self.Tell(GetCurrentPersistenceIds.Instance); - } - var subscriber = Sender; - _allIdsSubscribers.Add(subscriber); + _tagSubscribers.AddItem(message.Tag, subscriber); Context.Watch(subscriber); } @@ -839,6 +782,17 @@ private void AddPersistenceIdSubscriber(SubscribePersistenceId message) Context.Watch(subscriber); } + private void NotifyNewEventAppended() + { + if (HasNewEventsSubscribers) + { + foreach (var subscriber in _newEventSubscriber) + { + subscriber.Tell(NewEventAppended.Instance); + } + } + } + private void NotifyTagChanged(string tag) { if (_tagSubscribers.TryGetValue(tag, out var bucket)) @@ -859,18 +813,6 @@ private void NotifyPersistenceIdChanged(string persistenceId) } } - protected void NotifyNewPersistenceIdAdded(string persistenceId) - { - if (_allPersistenceIds.Add(persistenceId) && HasAllIdsSubscribers) - { - var added = new PersistenceIdAdded(persistenceId); - foreach (var subscriber in _allIdsSubscribers) - { - subscriber.Tell(added, ActorRefs.NoSender); - } - } - } - #endregion /// @@ -968,6 +910,9 @@ private async Task ExecuteChunk(RequestChunk chunk, IActorContext case ReplayAllEvents msg: await HandleReplayAllMessages(msg, command); break; + case SelectCurrentPersistenceIds msg: + await HandleSelectCurrentPersistenceIds(msg, command); + break; default: Unhandled(req); break; @@ -1004,8 +949,6 @@ protected virtual async Task HandleDeleteMessagesTo(DeleteMessagesTo req, TComma var toSequenceNr = req.ToSequenceNr; var persistenceId = req.PersistenceId; - NotifyNewPersistenceIdAdded(persistenceId); - try { var highestSequenceNr = await ReadHighestSequenceNr(persistenceId, command); @@ -1050,6 +993,34 @@ protected virtual async Task ReadHighestSequenceNr(string persistenceId, T return highestSequenceNr; } + protected virtual async Task ReadHighestSequenceNr(TCommand command) + { + command.CommandText = HighestOrderingSql; + command.Parameters.Clear(); + + var result = await command.ExecuteScalarAsync(); + var highestSequenceNr = result is long ? Convert.ToInt64(result) : 0L; + return highestSequenceNr; + } + + protected virtual async Task HandleSelectCurrentPersistenceIds(SelectCurrentPersistenceIds message, TCommand command) + { + long highestOrderingNumber = await ReadHighestSequenceNr(command); + + var result = new List(256); + command.CommandText = AllPersistenceIdsSql; + command.Parameters.Clear(); + AddParameter(command, "@Ordering", DbType.Int64, message.Offset); + + var reader = await command.ExecuteReaderAsync(); + while (await reader.ReadAsync()) + { + result.Add(reader.GetString(0)); + } + + message.ReplyTo.Tell(new CurrentPersistenceIds(result, highestOrderingNumber)); + } + protected virtual async Task HandleReplayTaggedMessages(ReplayTaggedMessages req, TCommand command) { var replyTo = req.ReplyTo; @@ -1109,10 +1080,8 @@ protected virtual async Task HandleReplayAllMessages(ReplayAllEvents req, TComma using (var reader = await command.ExecuteReaderAsync()) { - long rowCounter = 0; while (await reader.ReadAsync()) { - ++rowCounter; var persistent = ReadEvent(reader); var ordering = reader.GetInt64(OrderingIndex); maxSequenceNr = Math.Max(maxSequenceNr, persistent.SequenceNr); @@ -1122,8 +1091,6 @@ protected virtual async Task HandleReplayAllMessages(ReplayAllEvents req, TComma replyTo.Tell(new ReplayedEvent(adapted, ordering), ActorRefs.NoSender); } } - if (rowCounter < req.Max) - replyTo.Tell(ReplayedAllEvents.Instance); } replyTo.Tell(new EventReplaySuccess(maxSequenceNr)); @@ -1147,8 +1114,6 @@ protected virtual async Task HandleReplayMessages(ReplayMessages req, TCommand c : req.PersistentActor; var persistenceId = req.PersistenceId; - NotifyNewPersistenceIdAdded(persistenceId); - try { var highestSequenceNr = await ReadHighestSequenceNr(persistenceId, command); @@ -1236,8 +1201,6 @@ private async Task HandleWriteMessages(WriteMessages req, TCommand command) var response = (new WriteMessageSuccess(unadapted, actorInstanceId), unadapted.Sender); responses.Add(response); persistenceIds.Add(persistent.PersistenceId); - - NotifyNewPersistenceIdAdded(persistent.PersistenceId); } catch (DbException cause) { @@ -1280,6 +1243,11 @@ private async Task HandleWriteMessages(WriteMessages req, TCommand command) } } + if (HasNewEventsSubscribers) + { + NotifyNewEventAppended(); + } + summary = summary ?? WriteMessagesSuccessful.Instance; } catch (Exception cause) diff --git a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryApi.cs b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryApi.cs index c8c15f9c27b..5e993190323 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryApi.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryApi.cs @@ -64,20 +64,17 @@ public EventAppended(string persistenceId) } } - /// - /// Subscribe the `sender` to current and new persistenceIds. - /// Used by query-side. The journal will send one to the - /// subscriber followed by messages when new persistenceIds - /// are created. - /// [Serializable] - public sealed class SubscribeAllPersistenceIds : ISubscriptionCommand + public sealed class SelectCurrentPersistenceIds : IJournalRequest { - /// - /// TBD - /// - public static readonly SubscribeAllPersistenceIds Instance = new SubscribeAllPersistenceIds(); - private SubscribeAllPersistenceIds() { } + public IActorRef ReplyTo { get; } + public long Offset { get; } + + public SelectCurrentPersistenceIds(long offset, IActorRef replyTo) + { + Offset = offset; + ReplyTo = replyTo; + } } /// @@ -91,35 +88,39 @@ public sealed class CurrentPersistenceIds : IDeadLetterSuppression /// public readonly IEnumerable AllPersistenceIds; + public readonly long HighestOrderingNumber; + /// /// TBD /// /// TBD - public CurrentPersistenceIds(IEnumerable allPersistenceIds) + /// TBD + public CurrentPersistenceIds(IEnumerable allPersistenceIds, long highestOrderingNumber) { AllPersistenceIds = allPersistenceIds.ToImmutableHashSet(); + HighestOrderingNumber = highestOrderingNumber; } } /// - /// TBD + /// Subscribe the `sender` to new appended events. + /// Used by query-side. The journal will send messages to + /// the subscriber when `asyncWriteMessages` has been called. /// [Serializable] - public sealed class PersistenceIdAdded : IDeadLetterSuppression + public sealed class SubscribeNewEvents : ISubscriptionCommand { - /// - /// TBD - /// - public readonly string PersistenceId; + public static SubscribeNewEvents Instance = new SubscribeNewEvents(); - /// - /// TBD - /// - /// TBD - public PersistenceIdAdded(string persistenceId) - { - PersistenceId = persistenceId; - } + private SubscribeNewEvents() { } + } + + [Serializable] + public sealed class NewEventAppended : IDeadLetterSuppression + { + public static NewEventAppended Instance = new NewEventAppended(); + + private NewEventAppended() { } } /// @@ -181,6 +182,10 @@ public sealed class ReplayAllEvents : IJournalRequest /// /// TBD /// + public readonly long ToOffset; + /// + /// TBD + /// public readonly long Max; /// /// TBD @@ -188,12 +193,11 @@ public sealed class ReplayAllEvents : IJournalRequest public readonly IActorRef ReplyTo; /// - /// Initializes a new instance of the class. + /// Initializes a new instance of the class. /// /// TBD /// TBD /// TBD - /// TBD /// TBD /// /// This exception is thrown for a number of reasons. These include the following: @@ -203,27 +207,19 @@ public sealed class ReplayAllEvents : IJournalRequest ///
  • The specified is less than or equal to zero.
  • /// ///
    - /// - /// This exception is thrown when the specified is null or empty. - /// - public ReplayAllEvents(long fromOffset, long max, IActorRef replyTo) + public ReplayAllEvents(long fromOffset, long toOffset, long max, IActorRef replyTo) { if (fromOffset < 0) throw new ArgumentException("From offset may not be a negative number", nameof(fromOffset)); + if (toOffset <= 0) throw new ArgumentException("To offset must be a positive number", nameof(toOffset)); if (max <= 0) throw new ArgumentException("Maximum number of replayed messages must be a positive number", nameof(max)); FromOffset = fromOffset; + ToOffset = toOffset; Max = max; ReplyTo = replyTo; } } - public sealed class ReplayedAllEvents - { - public static ReplayedAllEvents Instance = new ReplayedAllEvents(); - - private ReplayedAllEvents() { } - } - /// /// TBD /// diff --git a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryExecutor.cs b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryExecutor.cs index e796d4b14d9..aa155b70483 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryExecutor.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/QueryExecutor.cs @@ -6,6 +6,7 @@ //----------------------------------------------------------------------- using System; +using System.Collections.Generic; using System.Collections.Immutable; using System.Data; using System.Data.Common; @@ -34,8 +35,9 @@ public interface IJournalQueryExecutor ///
    /// TBD /// TBD + /// TBD /// TBD - Task> SelectAllPersistenceIdsAsync(DbConnection connection, CancellationToken cancellationToken); + Task> SelectAllPersistenceIdsAsync(DbConnection connection, CancellationToken cancellationToken, long offset); /// /// Asynchronously replays a on all selected events for provided @@ -69,12 +71,12 @@ public interface IJournalQueryExecutor Task SelectByTagAsync(DbConnection connection, CancellationToken cancellationToken, string tag, long fromOffset, long toOffset, long max, Action callback); Task SelectAllEventsAsync( - DbConnection connection, + DbConnection connection, CancellationToken cancellationToken, long fromOffset, + long toOffset, long max, - Action callback, - Action completeCallback); + Action callback); /// /// Asynchronously returns single number considered as the highest sequence number in current journal for the provided . @@ -84,7 +86,9 @@ Task SelectAllEventsAsync( /// TBD /// TBD Task SelectHighestSequenceNrAsync(DbConnection connection, CancellationToken cancellationToken, string persistenceId); - + + Task SelectHighestSequenceNrAsync(DbConnection connection, CancellationToken cancellationToken); + /// /// Asynchronously inserts a collection of events and theirs tags into a journal table. /// @@ -326,8 +330,15 @@ protected AbstractQueryExecutor(QueryConfiguration configuration, Akka.Serializa e.{Configuration.SerializerIdColumnName} as SerializerId"; AllPersistenceIdsSql = $@" - SELECT DISTINCT e.{Configuration.PersistenceIdColumnName} as PersistenceId - FROM {Configuration.FullJournalTableName} e;"; + SELECT DISTINCT u.Id as PersistenceId + FROM ( + SELECT DISTINCT e.{Configuration.PersistenceIdColumnName} as Id + FROM {Configuration.FullJournalTableName} e + WHERE e.{Configuration.OrderingColumnName} > @Ordering + UNION + SELECT DISTINCT e.{Configuration.PersistenceIdColumnName} as Id + FROM {Configuration.FullMetaTableName} e + ) as u"; HighestSequenceNrSql = $@" SELECT MAX(u.SeqNr) as SequenceNr @@ -377,8 +388,7 @@ WHERE e.{Configuration.OrderingColumnName} > @Ordering HighestOrderingSql = $@" SELECT MAX(e.{Configuration.OrderingColumnName}) as Ordering - FROM {Configuration.FullJournalTableName} e - WHERE e.{Configuration.OrderingColumnName} > @Ordering"; + FROM {Configuration.FullJournalTableName} e"; InsertEventSql = $@" INSERT INTO {Configuration.FullJournalTableName} ( @@ -474,19 +484,23 @@ SELECT MAX(e.{Configuration.OrderingColumnName}) as Ordering /// /// TBD /// TBD + /// TBD /// TBD - public virtual async Task> SelectAllPersistenceIdsAsync(DbConnection connection, CancellationToken cancellationToken) + public virtual async Task> SelectAllPersistenceIdsAsync(DbConnection connection, CancellationToken cancellationToken, long offset) { using (var command = GetCommand(connection, AllPersistenceIdsSql)) - using (var reader = await command.ExecuteReaderAsync(cancellationToken)) { - var builder = ImmutableArray.CreateBuilder(); - while (await reader.ReadAsync(cancellationToken)) + AddParameter(command, "@Ordering", DbType.Int64, offset); + + using (var reader = await command.ExecuteReaderAsync(cancellationToken)) { - builder.Add(reader.GetString(0)); + var builder = ImmutableArray.CreateBuilder(); + while (await reader.ReadAsync(cancellationToken)) + { + builder.Add(reader.GetString(0)); + } + return builder.ToImmutable(); } - - return builder.ToImmutable(); } } @@ -586,17 +600,25 @@ public virtual async Task SelectByTagAsync(DbConnection connection, Cancel } public async Task SelectAllEventsAsync( - DbConnection connection, + DbConnection connection, CancellationToken cancellationToken, long fromOffset, + long toOffset, long max, - Action callback, - Action completeCallback) + Action callback) { + long maxOrdering; + using (var command = GetCommand(connection, HighestOrderingSql)) + { + maxOrdering = (await command.ExecuteScalarAsync(cancellationToken)) as long? ?? 0L; + } + using (var command = GetCommand(connection, AllEventsSql)) { + var take = Math.Min(toOffset - fromOffset, max); + AddParameter(command, "@Ordering", DbType.Int64, fromOffset); - AddParameter(command, "@Take", DbType.Int64, max); + AddParameter(command, "@Take", DbType.Int64, take); var commandBehavior = Configuration.UseSequentialAccess ? CommandBehavior.SequentialAccess : @@ -604,25 +626,16 @@ public async Task SelectAllEventsAsync( using (var reader = await command.ExecuteReaderAsync(commandBehavior, cancellationToken)) { - long rowCounter = 0; while (await reader.ReadAsync(cancellationToken)) { - ++rowCounter; var persistent = ReadEvent(reader); var ordering = reader.GetInt64(OrderingIndex); callback(new ReplayedEvent(persistent, ordering)); } - if(rowCounter < max) - completeCallback(ReplayedAllEvents.Instance); } } - using (var command = GetCommand(connection, HighestOrderingSql)) - { - AddParameter(command, "@Ordering", DbType.Int64, fromOffset); - var maxOrdering = (await command.ExecuteScalarAsync(cancellationToken)) as long? ?? 0L; - return maxOrdering; - } + return maxOrdering; } /// @@ -643,6 +656,15 @@ public virtual async Task SelectHighestSequenceNrAsync(DbConnection connec } } + public virtual async Task SelectHighestSequenceNrAsync(DbConnection connection, CancellationToken cancellationToken) + { + using (var command = GetCommand(connection, HighestOrderingSql)) + { + var result = await command.ExecuteScalarAsync(cancellationToken); + return result is long ? Convert.ToInt64(result) : 0L; + } + } + /// /// TBD /// diff --git a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs index 98facd42f56..72c5ae0dff9 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs @@ -10,6 +10,7 @@ using System.Collections.Immutable; using System.Data.Common; using System.Linq; +using System.Reflection; using System.Threading; using System.Threading.Tasks; using Akka.Actor; @@ -26,13 +27,11 @@ public abstract class SqlJournal : AsyncWriteJournal, IWithUnboundedStash { private ImmutableDictionary> _persistenceIdSubscribers = ImmutableDictionary.Create>(); private ImmutableDictionary> _tagSubscribers = ImmutableDictionary.Create>(); - private readonly HashSet _allPersistenceIdSubscribers = new HashSet(); - private readonly ReaderWriterLockSlim _allPersistenceIdsLock = new ReaderWriterLockSlim(); - private HashSet _allPersistenceIds = new HashSet(); + private readonly HashSet _newEventsSubscriber = new HashSet(); private IImmutableDictionary _tagSequenceNr = ImmutableDictionary.Empty; private readonly CancellationTokenSource _pendingRequestsCancellation; - private JournalSettings _settings; + private readonly JournalSettings _settings; private ILoggingAdapter _log; @@ -46,16 +45,8 @@ protected SqlJournal(Config journalConfig) _pendingRequestsCancellation = new CancellationTokenSource(); } - /// - /// TBD - /// public IStash Stash { get; set; } - /// - /// TBD - /// - public IEnumerable AllPersistenceIds => _allPersistenceIds; - /// /// TBD /// @@ -67,7 +58,7 @@ protected SqlJournal(Config journalConfig) /// /// TBD /// - protected bool HasAllPersistenceIdSubscribers => _allPersistenceIdSubscribers.Count != 0; + protected bool HasNewEventSubscribers => _newEventsSubscriber.Count != 0; /// /// Returns a HOCON config path to associated journal. @@ -98,35 +89,39 @@ protected SqlJournal(Config journalConfig) /// TBD protected override bool ReceivePluginInternal(object message) { - return message.Match() - .With(replay => - { + switch (message) + { + case ReplayTaggedMessages replay: ReplayTaggedMessagesAsync(replay) - .PipeTo(replay.ReplyTo, success: h => new RecoverySuccess(h), failure: e => new ReplayMessagesFailure(e)); - }) - .With(replay => - { + .PipeTo(replay.ReplyTo, success: h => new RecoverySuccess(h), failure: e => new ReplayMessagesFailure(e)); + return true; + case ReplayAllEvents replay: ReplayAllEventsAsync(replay) .PipeTo(replay.ReplyTo, success: h => new EventReplaySuccess(h), failure: e => new EventReplayFailure(e)); - }) - .With(subscribe => - { + return true; + case SubscribePersistenceId subscribe: AddPersistenceIdSubscriber(Sender, subscribe.PersistenceId); Context.Watch(Sender); - }) - .With(subscribe => - { - AddAllPersistenceIdSubscriber(Sender); - Context.Watch(Sender); - }) - .With(subscribe => - { + return true; + case SelectCurrentPersistenceIds request: + SelectAllPersistenceIdsAsync(request.Offset) + .PipeTo(request.ReplyTo, success: result => new CurrentPersistenceIds(result.Ids, request.Offset)); + return true; + case SubscribeTag subscribe: AddTagSubscriber(Sender, subscribe.Tag); Context.Watch(Sender); - }) - .With(terminated => RemoveSubscriber(terminated.ActorRef)) - .WasHandled; + return true; + case SubscribeNewEvents _: + AddNewEventsSubscriber(Sender); + Context.Watch(Sender); + return true; + case Terminated terminated: + RemoveSubscriber(terminated.ActorRef); + return true; + default: + return false; + } } /// @@ -169,8 +164,6 @@ protected override async Task> WriteMessagesAsync(IEnu if (IsTagId(p.PersistenceId)) throw new InvalidOperationException($"Persistence Id {p.PersistenceId} must not start with {QueryExecutor.Configuration.TagsColumnName}"); - - NotifyNewPersistenceIdAdded(p.PersistenceId); } var batch = new WriteJournalBatch(eventToTags); @@ -200,6 +193,9 @@ protected override async Task> WriteMessagesAsync(IEnu } } + if (HasNewEventSubscribers) + NotifyNewEventAppended(); + return result; } @@ -234,17 +230,32 @@ protected virtual async Task ReplayAllEventsAsync(ReplayAllEvents replay) using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) { return await QueryExecutor - .SelectAllEventsAsync(connection, + .SelectAllEventsAsync( + connection, cancellationToken.Token, replay.FromOffset, + replay.ToOffset, replay.Max, replayedEvent => { foreach (var adapted in AdaptFromJournal(replayedEvent.Persistent)) { replay.ReplyTo.Tell(new ReplayedEvent(adapted, replayedEvent.Offset), ActorRefs.NoSender); } - }, - complete => replay.ReplyTo.Tell(complete)); + }); + } + } + } + + protected virtual async Task<(IEnumerable Ids, long LastOrdering)> SelectAllPersistenceIdsAsync(long offset) + { + using (var connection = CreateDbConnection()) + { + await connection.OpenAsync(); + using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) + { + var lastOrdering = await QueryExecutor.SelectHighestSequenceNrAsync(connection, cancellationToken.Token); + var ids = await QueryExecutor.SelectAllPersistenceIdsAsync(connection, cancellationToken.Token, offset); + return (ids, lastOrdering); } } } @@ -262,7 +273,6 @@ protected virtual async Task ReplayAllEventsAsync(ReplayAllEvents replay) public override async Task ReplayMessagesAsync(IActorContext context, string persistenceId, long fromSequenceNr, long toSequenceNr, long max, Action recoveryCallback) { - NotifyNewPersistenceIdAdded(persistenceId); using (var connection = CreateDbConnection()) { await connection.OpenAsync(); @@ -299,24 +309,27 @@ protected override void PostStop() /// TBD protected bool WaitingForInitialization(object message) { - return message.Match() - .With(all => - { - _allPersistenceIds = new HashSet(all.Ids); + switch (message) + { + case Status.Success _: UnbecomeStacked(); Stash.UnstashAll(); - }) - .With(fail => - { - Log.Error(fail.Exception, "Failure during {0} initialization.", Self); + return true; + case Status.Failure fail: + Log.Error(fail.Cause, "Failure during {0} initialization.", Self); Context.Stop(Self); - }) - .Default(_ => Stash.Stash()) - .WasHandled; + return true; + default: + Stash.Stash(); + return true; + } } private async Task Initialize() { + if (!_settings.AutoInitialize) + return new Status.Success(NotUsed.Instance); + try { using (var connection = CreateDbConnection()) @@ -324,20 +337,15 @@ private async Task Initialize() await connection.OpenAsync(); using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) { - if (_settings.AutoInitialize) - { - await QueryExecutor.CreateTablesAsync(connection, cancellationToken.Token); - } - - var ids = await QueryExecutor.SelectAllPersistenceIdsAsync(connection, cancellationToken.Token); - return new AllPersistenceIds(ids); + await QueryExecutor.CreateTablesAsync(connection, cancellationToken.Token); } } } catch (Exception e) { - return new Failure {Exception = e}; + return new Status.Failure(e); } + return new Status.Success(NotUsed.Instance); } /// @@ -364,7 +372,12 @@ public void RemoveSubscriber(IActorRef subscriber) .Where(kv => kv.Value.Contains(subscriber)) .Select(kv => new KeyValuePair>(kv.Key, kv.Value.Remove(subscriber)))); - _allPersistenceIdSubscribers.Remove(subscriber); + _newEventsSubscriber.Remove(subscriber); + } + + public void AddNewEventsSubscriber(IActorRef subscriber) + { + _newEventsSubscriber.Add(subscriber); } /// @@ -384,16 +397,6 @@ public void AddTagSubscriber(IActorRef subscriber, string tag) } } - /// - /// TBD - /// - /// TBD - public void AddAllPersistenceIdSubscriber(IActorRef subscriber) - { - _allPersistenceIdSubscribers.Add(subscriber); - subscriber.Tell(new CurrentPersistenceIds(AllPersistenceIds)); - } - /// /// TBD /// @@ -423,44 +426,6 @@ private async Task NextTagSequenceNr(string tag) private string TagId(string tag) => QueryExecutor.Configuration.TagsColumnName + tag; - private void NotifyNewPersistenceIdAdded(string persistenceId) - { - var isNew = TryAddPersistenceId(persistenceId); - if (isNew && HasAllPersistenceIdSubscribers && !IsTagId(persistenceId)) - { - var added = new PersistenceIdAdded(persistenceId); - foreach (var subscriber in _allPersistenceIdSubscribers) - subscriber.Tell(added); - } - } - - private bool TryAddPersistenceId(string persistenceId) - { - try - { - _allPersistenceIdsLock.EnterUpgradeableReadLock(); - - if (_allPersistenceIds.Contains(persistenceId)) return false; - else - { - try - { - _allPersistenceIdsLock.EnterWriteLock(); - _allPersistenceIds.Add(persistenceId); - return true; - } - finally - { - _allPersistenceIdsLock.ExitWriteLock(); - } - } - } - finally - { - _allPersistenceIdsLock.ExitUpgradeableReadLock(); - } - } - private bool IsTagId(string persistenceId) { return persistenceId.StartsWith(QueryExecutor.Configuration.TagsColumnName); @@ -486,6 +451,17 @@ private void NotifyTagChange(string tag) } } + private void NotifyNewEventAppended() + { + if (HasNewEventSubscribers) + { + foreach (var subscriber in _newEventsSubscriber) + { + subscriber.Tell(NewEventAppended.Instance); + } + } + } + /// /// Asynchronously deletes all persisted messages identified by provided /// up to provided message sequence number (inclusive). @@ -495,7 +471,6 @@ private void NotifyTagChange(string tag) /// TBD protected override async Task DeleteMessagesToAsync(string persistenceId, long toSequenceNr) { - NotifyNewPersistenceIdAdded(persistenceId); using (var connection = CreateDbConnection()) { await connection.OpenAsync(); @@ -514,7 +489,6 @@ protected override async Task DeleteMessagesToAsync(string persistenceId, long t /// TBD public override async Task ReadHighestSequenceNrAsync(string persistenceId, long fromSequenceNr) { - NotifyNewPersistenceIdAdded(persistenceId); using (var connection = CreateDbConnection()) { await connection.OpenAsync(); @@ -553,14 +527,10 @@ protected virtual string GetConnectionString() protected ITimestampProvider GetTimestampProvider(string typeName) { var type = Type.GetType(typeName, true); - try - { - return (ITimestampProvider)Activator.CreateInstance(type, Context.System); - } - catch (Exception) - { - return (ITimestampProvider)Activator.CreateInstance(type); - } + var withSystem = type.GetConstructor(new[] { Context.System.GetType() }) != null; + return withSystem ? + (ITimestampProvider)Activator.CreateInstance(type, Context.System) : + (ITimestampProvider)Activator.CreateInstance(type); } #endregion } diff --git a/src/contrib/persistence/Akka.Persistence.Sql.TestKit/SqlJournalConnectionFailureSpec.cs b/src/contrib/persistence/Akka.Persistence.Sql.TestKit/SqlJournalConnectionFailureSpec.cs index b71cf98dbc9..45affcb0c9e 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.TestKit/SqlJournalConnectionFailureSpec.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.TestKit/SqlJournalConnectionFailureSpec.cs @@ -19,7 +19,7 @@ public abstract class SqlJournalConnectionFailureSpec : Akka.TestKit.Xunit2.Test protected static readonly string DefaultInvalidConnectionString = "INVALID_CONNECTION_STRING"; public SqlJournalConnectionFailureSpec(Config config = null, ITestOutputHelper output = null) - : base(config) + : base(config, null, output) { } diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Akka.Persistence.Sqlite.Tests.csproj b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Akka.Persistence.Sqlite.Tests.csproj index 2084f8515f3..69486b773af 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Akka.Persistence.Sqlite.Tests.csproj +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Akka.Persistence.Sqlite.Tests.csproj @@ -9,6 +9,7 @@ + diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/BatchingSqliteAllEventsSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqliteAllEventsSpec.cs similarity index 80% rename from src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/BatchingSqliteAllEventsSpec.cs rename to src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqliteAllEventsSpec.cs index 878584f1b2f..567413d6374 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/BatchingSqliteAllEventsSpec.cs +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqliteAllEventsSpec.cs @@ -12,9 +12,7 @@ namespace Akka.Persistence.Sqlite.Tests.Query { public class BatchingSqliteAllEventsSpec : AllEventsSpec { - public static readonly AtomicCounter Counter = new AtomicCounter(0); - - public static Config Config(int id) => ConfigurationFactory.ParseString($@" + public static Config Config => ConfigurationFactory.ParseString($@" akka.loglevel = INFO akka.persistence.journal.plugin = ""akka.persistence.journal.sqlite"" akka.persistence.journal.sqlite {{ @@ -23,13 +21,13 @@ class = ""Akka.Persistence.Sqlite.Journal.BatchingSqliteJournal, Akka.Persistenc table-name = event_journal metadata-table-name = journal_metadata auto-initialize = on - connection-string = ""Filename=file:memdb-journal-eventsbytag-{id}.db;Mode=Memory;Cache=Shared"" + connection-string = ""Filename=file:memdb-journal-eventsbytag-{Guid.NewGuid()}.db;Mode=Memory;Cache=Shared"" refresh-interval = 1s }} akka.test.single-expect-default = 10s") .WithFallback(SqlReadJournal.DefaultConfiguration()); - public BatchingSqliteAllEventsSpec(ITestOutputHelper output) : base(Config(Counter.GetAndIncrement()), nameof(BatchingSqliteAllEventsSpec), output) + public BatchingSqliteAllEventsSpec(ITestOutputHelper output) : base(Config, nameof(BatchingSqliteAllEventsSpec), output) { ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); } diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqliteCurrentAllEventsSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqliteCurrentAllEventsSpec.cs new file mode 100644 index 00000000000..cf8d1416e50 --- /dev/null +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqliteCurrentAllEventsSpec.cs @@ -0,0 +1,37 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Akka.Configuration; +using Akka.Persistence.Query; +using Akka.Persistence.Query.Sql; +using Akka.Persistence.TCK.Query; +using Akka.Util.Internal; +using Xunit.Abstractions; + +namespace Akka.Persistence.Sqlite.Tests.Batching +{ + public class BatchingCurrentSqliteAllEventsSpec : CurrentAllEventsSpec + { + public static readonly AtomicCounter Counter = new AtomicCounter(0); + + public static Config Config(int id) => ConfigurationFactory.ParseString($@" + akka.loglevel = INFO + akka.persistence.journal.plugin = ""akka.persistence.journal.sqlite"" + akka.persistence.journal.sqlite {{ + class = ""Akka.Persistence.Sqlite.Journal.BatchingSqliteJournal, Akka.Persistence.Sqlite"" + plugin-dispatcher = ""akka.actor.default-dispatcher"" + table-name = event_journal + metadata-table-name = journal_metadata + auto-initialize = on + connection-string = ""Filename=file:memdb-journal-eventsbytag-{id}.db;Mode=Memory;Cache=Shared"" + refresh-interval = 1s + }} + akka.test.single-expect-default = 10s") + .WithFallback(SqlReadJournal.DefaultConfiguration()); + + public BatchingCurrentSqliteAllEventsSpec(ITestOutputHelper output) : base(Config(Counter.GetAndIncrement()), nameof(BatchingCurrentSqliteAllEventsSpec), output) + { + ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); + } + } +} diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqlitePersistenceIdSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqlitePersistenceIdSpec.cs index 9f9ffd335d3..abdd2f069bf 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqlitePersistenceIdSpec.cs +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Batching/BatchingSqlitePersistenceIdSpec.cs @@ -5,6 +5,7 @@ // //----------------------------------------------------------------------- +using System; using Akka.Configuration; using Akka.Persistence.Query; using Akka.Persistence.Query.Sql; @@ -16,24 +17,48 @@ namespace Akka.Persistence.Sqlite.Tests.Batching { public class BatchingSqlitePersistenceIdSpec : PersistenceIdsSpec { - public static readonly AtomicCounter Counter = new AtomicCounter(0); - public static Config Config(int id) => ConfigurationFactory.ParseString($@" + public static string ConnectionString(string type) => $"Filename=file:memdb-persistenceids-{type}-{Guid.NewGuid()}.db;Mode=Memory;Cache=Shared"; + + public static Config Config => ConfigurationFactory.ParseString($@" akka.loglevel = INFO - akka.persistence.journal.plugin = ""akka.persistence.journal.sqlite"" - akka.persistence.journal.sqlite {{ - class = ""Akka.Persistence.Sqlite.Journal.BatchingSqliteJournal, Akka.Persistence.Sqlite"" - plugin-dispatcher = ""akka.actor.default-dispatcher"" - table-name = event_journal - metadata-table-name = journal_metadata - auto-initialize = on - connection-string = ""Datasource=memdb-journal-batch-persistenceids-{id}.db;Mode=Memory;Cache=Shared"" - refresh-interval = 1s + akka.actor{{ + serializers{{ + persistence-tck-test=""Akka.Persistence.TCK.Serialization.TestSerializer,Akka.Persistence.TCK"" + }} + serialization-bindings {{ + ""Akka.Persistence.TCK.Serialization.TestPayload,Akka.Persistence.TCK"" = persistence-tck-test + }} + }} + akka.persistence {{ + publish-plugin-commands = on + journal {{ + plugin = ""akka.persistence.journal.sqlite"" + sqlite = {{ + class = ""Akka.Persistence.Sqlite.Journal.SqliteJournal, Akka.Persistence.Sqlite"" + plugin-dispatcher = ""akka.actor.default-dispatcher"" + table-name = event_journal + metadata-table-name = journal_metadata + auto-initialize = on + connection-string = ""{ConnectionString("journal")}"" + refresh-interval = 200ms + }} + }} + snapshot-store {{ + plugin = ""akka.persistence.snapshot-store.sqlite"" + sqlite {{ + class = ""Akka.Persistence.Sqlite.Snapshot.SqliteSnapshotStore, Akka.Persistence.Sqlite"" + plugin-dispatcher = ""akka.actor.default-dispatcher"" + table-name = snapshot_store + auto-initialize = on + connection-string = ""{ConnectionString("snapshot")}"" + }} + }} }} akka.test.single-expect-default = 10s") .WithFallback(SqlReadJournal.DefaultConfiguration()); - public BatchingSqlitePersistenceIdSpec(ITestOutputHelper output) : base(Config(Counter.GetAndIncrement()), nameof(BatchingSqlitePersistenceIdSpec), output) + public BatchingSqlitePersistenceIdSpec(ITestOutputHelper output) : base(Config, nameof(BatchingSqlitePersistenceIdSpec), output) { ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); } diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteAllEventsSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteAllEventsSpec.cs index 1b7ebf08c95..186faae968b 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteAllEventsSpec.cs +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteAllEventsSpec.cs @@ -12,9 +12,7 @@ namespace Akka.Persistence.Sqlite.Tests.Query { public class SqliteAllEventsSpec:AllEventsSpec { - public static readonly AtomicCounter Counter = new AtomicCounter(0); - - public static Config Config(int id) => ConfigurationFactory.ParseString($@" + public static Config Config => ConfigurationFactory.ParseString($@" akka.loglevel = INFO akka.persistence.journal.plugin = ""akka.persistence.journal.sqlite"" akka.persistence.journal.sqlite {{ @@ -23,13 +21,13 @@ class = ""Akka.Persistence.Sqlite.Journal.SqliteJournal, Akka.Persistence.Sqlite table-name = event_journal metadata-table-name = journal_metadata auto-initialize = on - connection-string = ""Filename=file:memdb-journal-eventsbytag-{id}.db;Mode=Memory;Cache=Shared"" + connection-string = ""Filename=file:memdb-journal-eventsbytag-{Guid.NewGuid()}.db;Mode=Memory;Cache=Shared"" refresh-interval = 1s }} akka.test.single-expect-default = 10s") .WithFallback(SqlReadJournal.DefaultConfiguration()); - public SqliteAllEventsSpec(ITestOutputHelper output) : base(Config(Counter.GetAndIncrement()), nameof(SqliteAllEventsSpec), output) + public SqliteAllEventsSpec(ITestOutputHelper output) : base(Config, nameof(SqliteAllEventsSpec), output) { ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); } diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentAllEventsSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentAllEventsSpec.cs new file mode 100644 index 00000000000..c48d678dca8 --- /dev/null +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentAllEventsSpec.cs @@ -0,0 +1,38 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Akka.Configuration; +using Akka.Persistence.Query; +using Akka.Persistence.Query.Sql; +using Akka.Persistence.TCK.Query; +using Akka.Util.Internal; +using Xunit.Abstractions; + +namespace Akka.Persistence.Sqlite.Tests.Query +{ + public class SqliteCurrentAllEventsSpec:CurrentAllEventsSpec + { + public static readonly AtomicCounter Counter = new AtomicCounter(0); + + public static Config Config(int id) => ConfigurationFactory.ParseString($@" + akka.loglevel = INFO + akka.persistence.journal.plugin = ""akka.persistence.journal.sqlite"" + akka.persistence.journal.sqlite {{ + class = ""Akka.Persistence.Sqlite.Journal.SqliteJournal, Akka.Persistence.Sqlite"" + plugin-dispatcher = ""akka.actor.default-dispatcher"" + table-name = event_journal + metadata-table-name = journal_metadata + auto-initialize = on + connection-string = ""Filename=file:memdb-journal-eventsbytag-{id}.db;Mode=Memory;Cache=Shared"" + refresh-interval = 1s + }} + akka.test.single-expect-default = 10s") + .WithFallback(SqlReadJournal.DefaultConfiguration()); + + public SqliteCurrentAllEventsSpec(ITestOutputHelper output) : base(Config(Counter.GetAndIncrement()), nameof(SqliteCurrentAllEventsSpec), output) + { + ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); + } + + } +} diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentPersistenceIdsSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentPersistenceIdsSpec.cs index cf2ad4a878a..ca46a9f0dee 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentPersistenceIdsSpec.cs +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqliteCurrentPersistenceIdsSpec.cs @@ -38,10 +38,5 @@ public SqliteCurrentPersistenceIdsSpec(ITestOutputHelper output) : base(Config(C { ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); } - - [Fact(Skip = "Not implemented, due to bugs on NetCore")] - public override void ReadJournal_query_CurrentPersistenceIds_should_not_see_new_events_after_complete() - { - } } } diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqlitePersistenceIdsSpec.cs b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqlitePersistenceIdsSpec.cs index fdd5c91e611..43200b64713 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqlitePersistenceIdsSpec.cs +++ b/src/contrib/persistence/Akka.Persistence.Sqlite.Tests/Query/SqlitePersistenceIdsSpec.cs @@ -5,6 +5,8 @@ // //----------------------------------------------------------------------- +using System; +using Akka.Actor; using Akka.Configuration; using Akka.Persistence.Query; using Akka.Persistence.Query.Sql; @@ -16,24 +18,47 @@ namespace Akka.Persistence.Sqlite.Tests.Query { public class SqlitePersistenceIdsSpec : PersistenceIdsSpec { - public static readonly AtomicCounter Counter = new AtomicCounter(0); + public static string ConnectionString(string type) => $"Filename=file:memdb-persistenceids-{type}-{Guid.NewGuid()}.db;Mode=Memory;Cache=Shared"; - public static Config Config(int id) => ConfigurationFactory.ParseString($@" + public static Config Config => ConfigurationFactory.ParseString($@" akka.loglevel = INFO - akka.persistence.journal.plugin = ""akka.persistence.journal.sqlite"" - akka.persistence.journal.sqlite {{ - class = ""Akka.Persistence.Sqlite.Journal.SqliteJournal, Akka.Persistence.Sqlite"" - plugin-dispatcher = ""akka.actor.default-dispatcher"" - table-name = event_journal - metadata-table-name = journal_metadata - auto-initialize = on - connection-string = ""Filename=file:memdb-journal-persistenceids-{id}.db;Mode=Memory;Cache=Shared"" - refresh-interval = 1s + akka.actor{{ + serializers{{ + persistence-tck-test=""Akka.Persistence.TCK.Serialization.TestSerializer,Akka.Persistence.TCK"" + }} + serialization-bindings {{ + ""Akka.Persistence.TCK.Serialization.TestPayload,Akka.Persistence.TCK"" = persistence-tck-test + }} + }} + akka.persistence {{ + publish-plugin-commands = on + journal {{ + plugin = ""akka.persistence.journal.sqlite"" + sqlite = {{ + class = ""Akka.Persistence.Sqlite.Journal.SqliteJournal, Akka.Persistence.Sqlite"" + plugin-dispatcher = ""akka.actor.default-dispatcher"" + table-name = event_journal + metadata-table-name = journal_metadata + auto-initialize = on + connection-string = ""{ConnectionString("journal")}"" + refresh-interval = 200ms + }} + }} + snapshot-store {{ + plugin = ""akka.persistence.snapshot-store.sqlite"" + sqlite {{ + class = ""Akka.Persistence.Sqlite.Snapshot.SqliteSnapshotStore, Akka.Persistence.Sqlite"" + plugin-dispatcher = ""akka.actor.default-dispatcher"" + table-name = snapshot_store + auto-initialize = on + connection-string = ""{ConnectionString("snapshot")}"" + }} + }} }} akka.test.single-expect-default = 10s") .WithFallback(SqlReadJournal.DefaultConfiguration()); - public SqlitePersistenceIdsSpec(ITestOutputHelper output) : base(Config(Counter.GetAndIncrement()), nameof(SqlitePersistenceIdsSpec), output) + public SqlitePersistenceIdsSpec(ITestOutputHelper output) : base(Config, nameof(SqlitePersistenceIdsSpec), output) { ReadJournal = Sys.ReadJournalFor(SqlReadJournal.Identifier); } diff --git a/src/contrib/persistence/Akka.Persistence.Sqlite/Akka.Persistence.Sqlite.csproj b/src/contrib/persistence/Akka.Persistence.Sqlite/Akka.Persistence.Sqlite.csproj index 61076863c98..21c09d3f850 100644 --- a/src/contrib/persistence/Akka.Persistence.Sqlite/Akka.Persistence.Sqlite.csproj +++ b/src/contrib/persistence/Akka.Persistence.Sqlite/Akka.Persistence.Sqlite.csproj @@ -16,7 +16,7 @@ - + diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt index c020afa03b5..64588a5c5dc 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt @@ -3891,6 +3891,7 @@ namespace Akka.Pattern public bool IsClosed { get; } public bool IsHalfOpen { get; } public bool IsOpen { get; } + public System.Exception LastCaughtException { get; } public int MaxFailures { get; } public System.TimeSpan ResetTimeout { get; } public static Akka.Pattern.CircuitBreaker Create(int maxFailures, System.TimeSpan callTimeout, System.TimeSpan resetTimeout) { } @@ -3917,9 +3918,14 @@ namespace Akka.Pattern public class OpenCircuitException : Akka.Actor.AkkaException { public OpenCircuitException() { } + public OpenCircuitException(System.Exception cause) { } public OpenCircuitException(string message) { } public OpenCircuitException(string message, System.Exception cause) { } } + public class UserCalledFailException : Akka.Actor.AkkaException + { + public UserCalledFailException() { } + } } namespace Akka.Routing { diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceQuery.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceQuery.approved.txt index 7587ac7a224..abd84a1099e 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceQuery.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceQuery.approved.txt @@ -19,6 +19,10 @@ namespace Akka.Persistence.Query { Akka.Streams.Dsl.Source AllEvents(Akka.Persistence.Query.Offset offset); } + public interface ICurrentAllEventsQuery : Akka.Persistence.Query.IReadJournal + { + Akka.Streams.Dsl.Source CurrentAllEvents(Akka.Persistence.Query.Offset offset); + } public interface ICurrentEventsByPersistenceIdQuery : Akka.Persistence.Query.IReadJournal { Akka.Streams.Dsl.Source CurrentEventsByPersistenceId(string persistenceId, long fromSequenceNr, long toSequenceNr); diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommon.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommon.approved.txt index 464f5d16b11..cd950333acd 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommon.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommon.approved.txt @@ -39,11 +39,12 @@ namespace Akka.Persistence.Sql.Common.Journal protected System.Data.Common.DbCommand GetCommand(System.Data.Common.DbConnection connection, string sql) { } public virtual System.Threading.Tasks.Task InsertBatchAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, Akka.Persistence.Sql.Common.Journal.WriteJournalBatch write) { } protected virtual Akka.Persistence.IPersistentRepresentation ReadEvent(System.Data.Common.DbDataReader reader) { } - public System.Threading.Tasks.Task SelectAllEventsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, long fromOffset, long max, System.Action callback, System.Action completeCallback) { } - public virtual System.Threading.Tasks.Task> SelectAllPersistenceIdsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken) { } + public System.Threading.Tasks.Task SelectAllEventsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, long fromOffset, long toOffset, long max, System.Action callback) { } + public virtual System.Threading.Tasks.Task> SelectAllPersistenceIdsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, long offset) { } public virtual System.Threading.Tasks.Task SelectByPersistenceIdAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string persistenceId, long fromSequenceNr, long toSequenceNr, long max, System.Action callback) { } public virtual System.Threading.Tasks.Task SelectByTagAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string tag, long fromOffset, long toOffset, long max, System.Action callback) { } public virtual System.Threading.Tasks.Task SelectHighestSequenceNrAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string persistenceId) { } + public virtual System.Threading.Tasks.Task SelectHighestSequenceNrAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken) { } protected virtual void WriteEvent(System.Data.Common.DbCommand command, Akka.Persistence.IPersistentRepresentation e, System.Collections.Immutable.IImmutableSet tags) { } } public sealed class AllPersistenceIds @@ -82,15 +83,15 @@ namespace Akka.Persistence.Sql.Common.Journal protected const int SequenceNrIndex = 1; protected const int SerializerIdIndex = 6; protected BatchingSqlJournal(Akka.Persistence.Sql.Common.Journal.BatchingSqlJournalSetup setup) { } - protected virtual string AllEventsSql { get; set; } + protected virtual string AllEventsSql { get; } protected virtual string AllPersistenceIdsSql { get; } protected virtual string ByPersistenceIdSql { get; } protected virtual string ByTagSql { get; } protected virtual string DeleteBatchSql { get; } - protected bool HasAllIdsSubscribers { get; } + protected bool HasNewEventsSubscribers { get; } protected bool HasPersistenceIdSubscribers { get; } protected bool HasTagSubscribers { get; } - protected virtual string HighestOrderingSql { get; set; } + protected virtual string HighestOrderingSql { get; } protected virtual string HighestSequenceNrSql { get; } protected abstract System.Collections.Immutable.ImmutableDictionary Initializers { get; } protected virtual string InsertEventSql { get; } @@ -103,11 +104,12 @@ namespace Akka.Persistence.Sql.Common.Journal protected virtual System.Threading.Tasks.Task HandleReplayAllMessages(Akka.Persistence.Sql.Common.Journal.ReplayAllEvents req, TCommand command) { } protected virtual System.Threading.Tasks.Task HandleReplayMessages(Akka.Persistence.ReplayMessages req, TCommand command, Akka.Actor.IActorContext context) { } protected virtual System.Threading.Tasks.Task HandleReplayTaggedMessages(Akka.Persistence.Sql.Common.Journal.ReplayTaggedMessages req, TCommand command) { } - protected void NotifyNewPersistenceIdAdded(string persistenceId) { } + protected virtual System.Threading.Tasks.Task HandleSelectCurrentPersistenceIds(Akka.Persistence.Sql.Common.Journal.SelectCurrentPersistenceIds message, TCommand command) { } protected virtual void OnBufferOverflow(Akka.Persistence.IJournalMessage request) { } protected override void PreStart() { } protected virtual Akka.Persistence.IPersistentRepresentation ReadEvent(System.Data.Common.DbDataReader reader) { } protected virtual System.Threading.Tasks.Task ReadHighestSequenceNr(string persistenceId, TCommand command) { } + protected virtual System.Threading.Tasks.Task ReadHighestSequenceNr(TCommand command) { } protected virtual bool Receive(object message) { } protected virtual void WriteEvent(TCommand command, Akka.Persistence.IPersistentRepresentation persistent, string tags = "") { } } @@ -122,7 +124,8 @@ namespace Akka.Persistence.Sql.Common.Journal public sealed class CurrentPersistenceIds : Akka.Event.IDeadLetterSuppression { public readonly System.Collections.Generic.IEnumerable AllPersistenceIds; - public CurrentPersistenceIds(System.Collections.Generic.IEnumerable allPersistenceIds) { } + public readonly long HighestOrderingNumber; + public CurrentPersistenceIds(System.Collections.Generic.IEnumerable allPersistenceIds, long highestOrderingNumber) { } } public sealed class DefaultTimestampProvider : Akka.Persistence.Sql.Common.Journal.ITimestampProvider { @@ -165,11 +168,12 @@ namespace Akka.Persistence.Sql.Common.Journal System.Threading.Tasks.Task CreateTablesAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken); System.Threading.Tasks.Task DeleteBatchAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string persistenceId, long toSequenceNr); System.Threading.Tasks.Task InsertBatchAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, Akka.Persistence.Sql.Common.Journal.WriteJournalBatch write); - System.Threading.Tasks.Task SelectAllEventsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, long fromOffset, long max, System.Action callback, System.Action completeCallback); - System.Threading.Tasks.Task> SelectAllPersistenceIdsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken); + System.Threading.Tasks.Task SelectAllEventsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, long fromOffset, long toOffset, long max, System.Action callback); + System.Threading.Tasks.Task> SelectAllPersistenceIdsAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, long offset); System.Threading.Tasks.Task SelectByPersistenceIdAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string persistenceId, long fromSequenceNr, long toSequenceNr, long max, System.Action callback); System.Threading.Tasks.Task SelectByTagAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string tag, long fromOffset, long toOffset, long max, System.Action callback); System.Threading.Tasks.Task SelectHighestSequenceNrAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken, string persistenceId); + System.Threading.Tasks.Task SelectHighestSequenceNrAsync(System.Data.Common.DbConnection connection, System.Threading.CancellationToken cancellationToken); } public interface ISubscriptionCommand { } public interface ITimestampProvider @@ -191,10 +195,9 @@ namespace Akka.Persistence.Sql.Common.Journal public readonly System.DateTime Timestamp; public JournalEntry(string persistenceId, long sequenceNr, bool isDeleted, string manifest, System.DateTime timestamp, object payload) { } } - public sealed class PersistenceIdAdded : Akka.Event.IDeadLetterSuppression + public sealed class NewEventAppended : Akka.Event.IDeadLetterSuppression { - public readonly string PersistenceId; - public PersistenceIdAdded(string persistenceId) { } + public static Akka.Persistence.Sql.Common.Journal.NewEventAppended Instance; } public class QueryConfiguration { @@ -222,7 +225,8 @@ namespace Akka.Persistence.Sql.Common.Journal public readonly long FromOffset; public readonly long Max; public readonly Akka.Actor.IActorRef ReplyTo; - public ReplayAllEvents(long fromOffset, long max, Akka.Actor.IActorRef replyTo) { } + public readonly long ToOffset; + public ReplayAllEvents(long fromOffset, long toOffset, long max, Akka.Actor.IActorRef replyTo) { } } public sealed class ReplayFilterSettings { @@ -243,10 +247,6 @@ namespace Akka.Persistence.Sql.Common.Journal public readonly long ToOffset; public ReplayTaggedMessages(long fromOffset, long toOffset, long max, string tag, Akka.Actor.IActorRef replyTo) { } } - public sealed class ReplayedAllEvents - { - public static Akka.Persistence.Sql.Common.Journal.ReplayedAllEvents Instance; - } public sealed class ReplayedEvent : Akka.Actor.INoSerializationVerificationNeeded, Akka.Event.IDeadLetterSuppression { public readonly long Offset; @@ -260,18 +260,23 @@ namespace Akka.Persistence.Sql.Common.Journal public readonly string Tag; public ReplayedTaggedMessage(Akka.Persistence.IPersistentRepresentation persistent, string tag, long offset) { } } + public sealed class SelectCurrentPersistenceIds : Akka.Actor.INoSerializationVerificationNeeded, Akka.Persistence.IJournalMessage, Akka.Persistence.IJournalRequest, Akka.Persistence.IPersistenceMessage + { + public SelectCurrentPersistenceIds(long offset, Akka.Actor.IActorRef replyTo) { } + public long Offset { get; } + public Akka.Actor.IActorRef ReplyTo { get; } + } public abstract class SqlJournal : Akka.Persistence.Journal.AsyncWriteJournal, Akka.Actor.IActorStash, Akka.Actor.IWithUnboundedStash, Akka.Dispatch.IRequiresMessageQueue { protected SqlJournal(Akka.Configuration.Config journalConfig) { } - public System.Collections.Generic.IEnumerable AllPersistenceIds { get; } - protected bool HasAllPersistenceIdSubscribers { get; } + protected bool HasNewEventSubscribers { get; } protected bool HasPersistenceIdSubscribers { get; } protected bool HasTagSubscribers { get; } protected abstract string JournalConfigPath { get; } protected Akka.Event.ILoggingAdapter Log { get; } public abstract Akka.Persistence.Sql.Common.Journal.IJournalQueryExecutor QueryExecutor { get; } public Akka.Actor.IStash Stash { get; set; } - public void AddAllPersistenceIdSubscriber(Akka.Actor.IActorRef subscriber) { } + public void AddNewEventsSubscriber(Akka.Actor.IActorRef subscriber) { } public void AddPersistenceIdSubscriber(Akka.Actor.IActorRef subscriber, string persistenceId) { } public void AddTagSubscriber(Akka.Actor.IActorRef subscriber, string tag) { } protected abstract System.Data.Common.DbConnection CreateDbConnection(string connectionString); @@ -287,12 +292,16 @@ namespace Akka.Persistence.Sql.Common.Journal protected virtual System.Threading.Tasks.Task ReplayAllEventsAsync(Akka.Persistence.Sql.Common.Journal.ReplayAllEvents replay) { } public override System.Threading.Tasks.Task ReplayMessagesAsync(Akka.Actor.IActorContext context, string persistenceId, long fromSequenceNr, long toSequenceNr, long max, System.Action recoveryCallback) { } protected virtual System.Threading.Tasks.Task ReplayTaggedMessagesAsync(Akka.Persistence.Sql.Common.Journal.ReplayTaggedMessages replay) { } + [return: System.Runtime.CompilerServices.TupleElementNamesAttribute(new string[] { + "Ids", + "LastOrdering"})] + protected virtual System.Threading.Tasks.Task, long>> SelectAllPersistenceIdsAsync(long offset) { } protected bool WaitingForInitialization(object message) { } protected override System.Threading.Tasks.Task> WriteMessagesAsync(System.Collections.Generic.IEnumerable messages) { } } - public sealed class SubscribeAllPersistenceIds : Akka.Persistence.Sql.Common.Journal.ISubscriptionCommand + public sealed class SubscribeNewEvents : Akka.Persistence.Sql.Common.Journal.ISubscriptionCommand { - public static readonly Akka.Persistence.Sql.Common.Journal.SubscribeAllPersistenceIds Instance; + public static Akka.Persistence.Sql.Common.Journal.SubscribeNewEvents Instance; } public sealed class SubscribePersistenceId : Akka.Persistence.Sql.Common.Journal.ISubscriptionCommand { diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommonQuery.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommonQuery.approved.txt index 1346c68f938..7b297cf6c9c 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommonQuery.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistenceSqlCommonQuery.approved.txt @@ -3,11 +3,12 @@ [assembly: System.Runtime.Versioning.TargetFrameworkAttribute(".NETStandard,Version=v2.0", FrameworkDisplayName="")] namespace Akka.Persistence.Query.Sql { - public class SqlReadJournal : Akka.Persistence.Query.IAllEventsQuery, Akka.Persistence.Query.ICurrentEventsByPersistenceIdQuery, Akka.Persistence.Query.ICurrentEventsByTagQuery, Akka.Persistence.Query.ICurrentPersistenceIdsQuery, Akka.Persistence.Query.IEventsByPersistenceIdQuery, Akka.Persistence.Query.IEventsByTagQuery, Akka.Persistence.Query.IPersistenceIdsQuery, Akka.Persistence.Query.IReadJournal + public class SqlReadJournal : Akka.Persistence.Query.IAllEventsQuery, Akka.Persistence.Query.ICurrentAllEventsQuery, Akka.Persistence.Query.ICurrentEventsByPersistenceIdQuery, Akka.Persistence.Query.ICurrentEventsByTagQuery, Akka.Persistence.Query.ICurrentPersistenceIdsQuery, Akka.Persistence.Query.IEventsByPersistenceIdQuery, Akka.Persistence.Query.IEventsByTagQuery, Akka.Persistence.Query.IPersistenceIdsQuery, Akka.Persistence.Query.IReadJournal { public static string Identifier; public SqlReadJournal(Akka.Actor.ExtendedActorSystem system, Akka.Configuration.Config config) { } public Akka.Streams.Dsl.Source AllEvents(Akka.Persistence.Query.Offset offset = null) { } + public Akka.Streams.Dsl.Source CurrentAllEvents(Akka.Persistence.Query.Offset offset) { } public Akka.Streams.Dsl.Source CurrentEventsByPersistenceId(string persistenceId, long fromSequenceNr, long toSequenceNr) { } public Akka.Streams.Dsl.Source CurrentEventsByTag(string tag, Akka.Persistence.Query.Offset offset = null) { } public Akka.Streams.Dsl.Source CurrentPersistenceIds() { } diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt index 1400a378611..f446d828a43 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt @@ -1,4 +1,6 @@ [assembly: System.Runtime.CompilerServices.InternalsVisibleToAttribute("Akka.Benchmarks")] +[assembly: System.Runtime.CompilerServices.InternalsVisibleToAttribute("Akka.Persistence.Query.Sql")] +[assembly: System.Runtime.CompilerServices.InternalsVisibleToAttribute("Akka.Persistence.TCK")] [assembly: System.Runtime.CompilerServices.InternalsVisibleToAttribute("Akka.Streams.TestKit")] [assembly: System.Runtime.CompilerServices.InternalsVisibleToAttribute("Akka.Streams.Tests")] [assembly: System.Runtime.InteropServices.ComVisibleAttribute(false)] @@ -2632,7 +2634,7 @@ namespace Akka.Streams.Implementation public class ActorSubscriptionWithCursor : Akka.Streams.Implementation.ActorSubscription, Akka.Streams.Implementation.ICursor, Reactive.Streams.ISubscription { public ActorSubscriptionWithCursor(Akka.Actor.IActorRef implementor, Reactive.Streams.ISubscriber subscriber) { } - public int Cursor { get; } + public long Cursor { get; } public bool IsActive { get; } public long TotalDemand { get; } public void Dispatch(object element) { } @@ -2750,6 +2752,12 @@ namespace Akka.Streams.Implementation protected override Akka.Streams.Stage.GraphStageLogic CreateLogic(Akka.Streams.Attributes inheritedAttributes) { } public override string ToString() { } } + public class DistinctRetainingMultiReaderBuffer : Akka.Streams.Implementation.RetainingMultiReaderBuffer + { + public DistinctRetainingMultiReaderBuffer(long initialSize, long maxSize, Akka.Streams.Implementation.ICursors cursors) { } + public override string ToString() { } + public override bool Write(T value) { } + } public sealed class EmptyModule : Akka.Streams.Implementation.Module { public static readonly Akka.Streams.Implementation.EmptyModule Instance; @@ -2962,7 +2970,7 @@ namespace Akka.Streams.Implementation public interface IActorSubscription : Reactive.Streams.ISubscription { } public interface ICursor { - int Cursor { get; set; } + long Cursor { get; set; } } public interface ICursors { @@ -3320,20 +3328,37 @@ namespace Akka.Streams.Implementation public class ResizableMultiReaderRingBuffer { protected readonly Akka.Streams.Implementation.ICursors Cursors; - public ResizableMultiReaderRingBuffer(int initialSize, int maxSize, Akka.Streams.Implementation.ICursors cursors) { } - public int CapacityLeft { get; } - public int ImmediatelyAvailable { get; } + public ResizableMultiReaderRingBuffer(long initialSize, long maxSize, Akka.Streams.Implementation.ICursors cursors) { } + public long AvailableData { get; } + public long CapacityLeft { get; } + public long ImmediatelyAvailable { get; } public bool IsEmpty { get; } - public int Length { get; } + public long Length { get; } public bool NonEmpty { get; } - protected object[] UnderlyingArray { get; } - public int Count(Akka.Streams.Implementation.ICursor cursor) { } + protected T[] UnderlyingArray { get; } + public long Count(Akka.Streams.Implementation.ICursor cursor) { } public void InitCursor(Akka.Streams.Implementation.ICursor cursor) { } public void OnCursorRemoved(Akka.Streams.Implementation.ICursor cursor) { } public T Read(Akka.Streams.Implementation.ICursor cursor) { } public override string ToString() { } public bool Write(T value) { } } + public class RetainingMultiReaderBuffer + { + protected readonly Akka.Streams.Implementation.ICursors Cursors; + public RetainingMultiReaderBuffer(long initialSize, long maxSize, Akka.Streams.Implementation.ICursors cursors) { } + public long AvailableData { get; } + protected T[] Buffer { get; } + public long CapacityLeft { get; } + public bool IsEmpty { get; } + public long Length { get; } + public long Count(Akka.Streams.Implementation.ICursor cursor) { } + public void InitCursor(Akka.Streams.Implementation.ICursor cursor) { } + public void OnCursorRemoved(Akka.Streams.Implementation.ICursor cursor) { } + public T Read(Akka.Streams.Implementation.ICursor cursor) { } + public override string ToString() { } + public virtual bool Write(T value) { } + } [Akka.Annotations.InternalApiAttribute()] public sealed class SeqStage : Akka.Streams.Stage.GraphStageWithMaterializedValue, System.Threading.Tasks.Task>> { diff --git a/src/core/Akka.Persistence.Query/Interfaces.cs b/src/core/Akka.Persistence.Query/Interfaces.cs index 71ba7657f0e..bbe0862b575 100644 --- a/src/core/Akka.Persistence.Query/Interfaces.cs +++ b/src/core/Akka.Persistence.Query/Interfaces.cs @@ -74,6 +74,11 @@ public interface ICurrentEventsByTagQuery : IReadJournal Source CurrentEventsByTag(string tag, Offset offset); } + public interface ICurrentAllEventsQuery : IReadJournal + { + Source CurrentAllEvents(Offset offset); + } + public interface IAllEventsQuery : IReadJournal { Source AllEvents(Offset offset); diff --git a/src/core/Akka.Persistence.TCK/Performance/JournalPerfSpec.cs b/src/core/Akka.Persistence.TCK/Performance/JournalPerfSpec.cs index e9884b27239..4bb5429fbd8 100644 --- a/src/core/Akka.Persistence.TCK/Performance/JournalPerfSpec.cs +++ b/src/core/Akka.Persistence.TCK/Performance/JournalPerfSpec.cs @@ -18,20 +18,36 @@ namespace Akka.Persistence.TestKit.Performance { + /// + /// This spec measures execution times of the basic operations that an provides, + /// using the provided Journal (plugin). + /// + /// It is *NOT* meant to be a comprehensive benchmark, but rather aims to help plugin developers to easily determine + /// if their plugin's performance is roughly as expected. It also validates the plugin still works under "more messages" scenarios. + /// + /// In case your journal plugin needs some kind of teardown, override the `AfterAll` method (don't forget to call `base` in your overridden methods). + /// public abstract class JournalPerfSpec : Akka.TestKit.Xunit2.TestKit { - private TestProbe testProbe; - - // Number of messages sent to the PersistentActor under test for each test iteration - private const int EventsCount = 10 * 1000; - - // Number of measurement iterations each test will be run. - private const int MeasurementIterations = 10; + private readonly TestProbe testProbe; + + /// + /// Number of messages sent to the PersistentActor under test for each test iteration + /// + protected int EventsCount = 10 * 1000; + + /// + /// Number of measurement iterations each test will be run. + /// + protected int MeasurementIterations = 10; + + /// + /// Override in order to customize timeouts used for ExpectMsg, in order to tune the awaits to your journal's perf + /// + protected TimeSpan ExpectDuration = TimeSpan.FromSeconds(10); private IReadOnlyList Commands => Enumerable.Range(1, EventsCount).ToList(); - private TimeSpan ExpectDuration = TimeSpan.FromSeconds(10); - protected JournalPerfSpec(Config config, string actorSystem, ITestOutputHelper output) : base(config ?? Config.Empty, actorSystem, output) { @@ -49,6 +65,9 @@ internal void FeedAndExpectLast(IActorRef actor, string mode, IReadOnlyList testProbe.ExpectMsg(commands.Last(), ExpectDuration); } + /// + /// Executes a block of code multiple times (no warm-up) + /// internal void Measure(Func msg, Action block) { var measurements = new List(MeasurementIterations); diff --git a/src/core/Akka.Persistence.TCK/Query/AllEventsSpec.cs b/src/core/Akka.Persistence.TCK/Query/AllEventsSpec.cs index 87d4115e684..54438f1bd4c 100644 --- a/src/core/Akka.Persistence.TCK/Query/AllEventsSpec.cs +++ b/src/core/Akka.Persistence.TCK/Query/AllEventsSpec.cs @@ -13,8 +13,6 @@ namespace Akka.Persistence.TCK.Query { public class AllEventsSpec : Akka.TestKit.Xunit2.TestKit { - private static Config InternalConfig = "akka.persistence.query.journal.sql.max-buffer-size = 3"; - protected ActorMaterializer Materializer { get; } protected IReadJournal ReadJournal { get; set; } @@ -23,9 +21,7 @@ protected AllEventsSpec( Config config = null, string actorSystemName = null, ITestOutputHelper output = null) - : base( - config == null ? InternalConfig : InternalConfig.WithFallback(config), - actorSystemName, output) + : base(config ?? Config.Empty, actorSystemName, output) { Materializer = Sys.Materializer(); } @@ -37,7 +33,7 @@ public void ReadJournal_should_implement_IAllEventsQuery() } [Fact] - public virtual void ReadJournal_query_AllEvents_should_find_existing_events() + public virtual void ReadJournal_query_AllEvents_should_find_new_events() { var queries = ReadJournal as IAllEventsQuery; var a = Sys.ActorOf(Query.TestActor.Props("a")); @@ -45,34 +41,27 @@ public virtual void ReadJournal_query_AllEvents_should_find_existing_events() a.Tell("hello"); ExpectMsg("hello-done"); - a.Tell("world"); - ExpectMsg("world-done"); - b.Tell("test"); - ExpectMsg("test-done"); var eventSrc = queries.AllEvents(NoOffset.Instance); var probe = eventSrc.RunWith(this.SinkProbe(), Materializer); - probe.Request(2); probe.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 1L && p.Event.Equals("hello")); + probe.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + + a.Tell("world"); + ExpectMsg("world-done"); + b.Tell("test"); + ExpectMsg("test-done"); + probe.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 2L && p.Event.Equals("world")); - probe.ExpectNoMsg(TimeSpan.FromMilliseconds(500)); + probe.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); - probe.Request(2); + probe.Request(10); probe.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 1L && p.Event.Equals("test")); - probe.ExpectComplete(); + probe.Cancel(); } - [Fact] - public virtual void ReadJournal_query_AllEvents_should_complete_when_no_events() - { - var queries = ReadJournal as IAllEventsQuery; - - var eventSrc = queries.AllEvents(NoOffset.Instance); - var probe = eventSrc.RunWith(this.SinkProbe(), Materializer); - probe.Request(2).ExpectComplete(); - } [Fact] public virtual void ReadJournal_query_AllEvents_should_find_events_from_offset_exclusive() @@ -81,54 +70,42 @@ public virtual void ReadJournal_query_AllEvents_should_find_events_from_offset_e var a = Sys.ActorOf(Query.TestActor.Props("a")); var b = Sys.ActorOf(Query.TestActor.Props("b")); - - a.Tell("hello"); - ExpectMsg("hello-done"); - a.Tell("world"); - ExpectMsg("world-done"); - b.Tell("test"); - ExpectMsg("test-done"); + var c = Sys.ActorOf(Query.TestActor.Props("c")); + + a.Tell("keep"); + ExpectMsg("keep-done"); + a.Tell("calm"); + ExpectMsg("calm-done"); + b.Tell("and"); + ExpectMsg("and-done"); + a.Tell("keep"); + ExpectMsg("keep-done"); + a.Tell("streaming"); + ExpectMsg("streaming-done"); var eventSrc1 = queries.AllEvents(NoOffset.Instance); var probe1 = eventSrc1.RunWith(this.SinkProbe(), Materializer); - probe1.Request(2); - probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 1L && p.Event.Equals("hello")); - var offs = probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 2L && p.Event.Equals("world")).Offset; + probe1.Request(4); + probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 1L && p.Event.Equals("keep")); + probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 2L && p.Event.Equals("calm")); + probe1.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 1L && p.Event.Equals("and")); + var offs = probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 3L && p.Event.Equals("keep")).Offset; probe1.Cancel(); var eventSrc2 = queries.AllEvents(offs); var probe2 = eventSrc2.RunWith(this.SinkProbe(), Materializer); probe2.Request(10); - // hello and world is not included, since exclusive offset - probe2.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 1L && p.Event.Equals("test")); - probe2.Cancel(); - } - [Fact] - public virtual void ReadJournal_query_AllEvents_should_see_all_150_events() - { - var queries = ReadJournal as IAllEventsQuery; - var a = Sys.ActorOf(Query.TestActor.Props("a")); + b.Tell("new"); + ExpectMsg("new-done"); + c.Tell("events"); + ExpectMsg("events-done"); - for (var i = 0; i < 150; ++i) - { - a.Tell("a green apple"); - ExpectMsg("a green apple-done"); - } - - var greenSrc = queries.AllEvents(NoOffset.Instance); - var probe = greenSrc.RunWith(this.SinkProbe(), Materializer); - probe.Request(150); - for (var i = 0; i < 150; ++i) - { - var idx = i + 1; - probe.ExpectNext(p => - p.PersistenceId == "a" && p.SequenceNr == idx && p.Event.Equals("a green apple")); - Output.WriteLine(idx.ToString()); - } - - probe.ExpectComplete(); - probe.ExpectNoMsg(TimeSpan.FromMilliseconds(500)); + // everything before "streaming" are not included, since exclusive offset + probe2.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 4L && p.Event.Equals("streaming")); + probe2.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 2L && p.Event.Equals("new")); + probe2.ExpectNext(p => p.PersistenceId == "c" && p.SequenceNr == 1L && p.Event.Equals("events")); + probe2.Cancel(); } } } diff --git a/src/core/Akka.Persistence.TCK/Query/CurrentAllEventsSpec.cs b/src/core/Akka.Persistence.TCK/Query/CurrentAllEventsSpec.cs new file mode 100644 index 00000000000..fb07f87fca7 --- /dev/null +++ b/src/core/Akka.Persistence.TCK/Query/CurrentAllEventsSpec.cs @@ -0,0 +1,157 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Akka.Actor; +using Akka.Configuration; +using Akka.Persistence.Query; +using Akka.Streams; +using Akka.Streams.TestKit; +using Xunit; +using Xunit.Abstractions; + +namespace Akka.Persistence.TCK.Query +{ + public class CurrentAllEventsSpec : Akka.TestKit.Xunit2.TestKit + { + private static readonly Config InternalConfig = "akka.persistence.query.journal.sql.max-buffer-size = 3"; + + protected ActorMaterializer Materializer { get; } + + protected IReadJournal ReadJournal { get; set; } + + protected CurrentAllEventsSpec( + Config config = null, + string actorSystemName = null, + ITestOutputHelper output = null) + : base( + config == null ? InternalConfig : InternalConfig.WithFallback(config), + actorSystemName, output) + { + Materializer = Sys.Materializer(); + } + + [Fact] + public void ReadJournal_should_implement_ICurrentAllEventsQuery() + { + Assert.IsAssignableFrom(ReadJournal); + } + + [Fact] + public virtual void ReadJournal_query_CurrentAllEvents_should_find_existing_events() + { + var queries = ReadJournal as ICurrentAllEventsQuery; + var a = Sys.ActorOf(Query.TestActor.Props("a")); + var b = Sys.ActorOf(Query.TestActor.Props("b")); + + a.Tell("hello"); + ExpectMsg("hello-done"); + a.Tell("world"); + ExpectMsg("world-done"); + b.Tell("test"); + ExpectMsg("test-done"); + + var eventSrc = queries.CurrentAllEvents(NoOffset.Instance); + var probe = eventSrc.RunWith(this.SinkProbe(), Materializer); + + probe.Request(2); + probe.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 1L && p.Event.Equals("hello")); + probe.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 2L && p.Event.Equals("world")); + + probe.ExpectNoMsg(TimeSpan.FromMilliseconds(500)); + + probe.Request(2); + probe.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 1L && p.Event.Equals("test")); + probe.ExpectComplete(); + } + + [Fact] + public virtual void ReadJournal_query_AllEvents_should_complete_when_no_events() + { + var queries = ReadJournal as ICurrentAllEventsQuery; + + var eventSrc = queries.CurrentAllEvents(NoOffset.Instance); + var probe = eventSrc.RunWith(this.SinkProbe(), Materializer); + probe.Request(2).ExpectComplete(); + } + + [Fact] + public virtual void ReadJournal_query_CurrentAllEvents_should_not_see_new_events_after_complete() + { + var queries = ReadJournal as ICurrentAllEventsQuery; + ReadJournal_query_CurrentAllEvents_should_find_existing_events(); + + var c = Sys.ActorOf(Query.TestActor.Props("c")); + + var eventSrc = queries.CurrentAllEvents(NoOffset.Instance); + var probe = eventSrc.RunWith(this.SinkProbe(), Materializer); + probe.Request(2); + probe.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 1L && p.Event.Equals("hello")); + probe.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 2L && p.Event.Equals("world")); + probe.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + + c.Tell("a green cucumber"); + ExpectMsg("a green cucumber-done"); + + probe.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + probe.Request(5); + probe.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 1L && p.Event.Equals("test")); + probe.ExpectComplete(); // green cucumber not seen + } + + [Fact] + public virtual void ReadJournal_query_CurrentAllEvents_should_find_events_from_offset_exclusive() + { + var queries = ReadJournal as ICurrentAllEventsQuery; + + var a = Sys.ActorOf(Query.TestActor.Props("a")); + var b = Sys.ActorOf(Query.TestActor.Props("b")); + + a.Tell("hello"); + ExpectMsg("hello-done"); + a.Tell("world"); + ExpectMsg("world-done"); + b.Tell("test"); + ExpectMsg("test-done"); + + var eventSrc1 = queries.CurrentAllEvents(NoOffset.Instance); + var probe1 = eventSrc1.RunWith(this.SinkProbe(), Materializer); + probe1.Request(2); + probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 1L && p.Event.Equals("hello")); + var offs = probe1.ExpectNext(p => p.PersistenceId == "a" && p.SequenceNr == 2L && p.Event.Equals("world")).Offset; + probe1.Cancel(); + + var eventSrc2 = queries.CurrentAllEvents(offs); + var probe2 = eventSrc2.RunWith(this.SinkProbe(), Materializer); + probe2.Request(10); + // hello and world is not included, since exclusive offset + probe2.ExpectNext(p => p.PersistenceId == "b" && p.SequenceNr == 1L && p.Event.Equals("test")); + probe2.Cancel(); + } + + [Fact] + public virtual void ReadJournal_query_CurrentAllEvents_should_see_all_150_events() + { + var queries = ReadJournal as ICurrentAllEventsQuery; + var a = Sys.ActorOf(Query.TestActor.Props("a")); + + for (var i = 0; i < 150; ++i) + { + a.Tell("a green apple"); + ExpectMsg("a green apple-done"); + } + + var greenSrc = queries.CurrentAllEvents(NoOffset.Instance); + var probe = greenSrc.RunWith(this.SinkProbe(), Materializer); + probe.Request(150); + for (var i = 0; i < 150; ++i) + { + var idx = i + 1; + probe.ExpectNext(p => + p.PersistenceId == "a" && p.SequenceNr == idx && p.Event.Equals("a green apple")); + } + + probe.ExpectComplete(); + probe.ExpectNoMsg(TimeSpan.FromMilliseconds(500)); + } + } +} diff --git a/src/core/Akka.Persistence.TCK/Query/CurrentPersistenceIdsSpec.cs b/src/core/Akka.Persistence.TCK/Query/CurrentPersistenceIdsSpec.cs index e0014a27375..9d2b432839d 100644 --- a/src/core/Akka.Persistence.TCK/Query/CurrentPersistenceIdsSpec.cs +++ b/src/core/Akka.Persistence.TCK/Query/CurrentPersistenceIdsSpec.cs @@ -6,6 +6,7 @@ //----------------------------------------------------------------------- using System; +using System.Collections.Generic; using Akka.Actor; using Akka.Configuration; using Akka.Persistence.Query; @@ -78,16 +79,17 @@ public virtual void ReadJournal_query_CurrentPersistenceIds_should_not_see_new_e var greenSrc = queries.CurrentPersistenceIds(); var probe = greenSrc.RunWith(this.SinkProbe(), Materializer); + var set = new List { "a", "b", "c" }; probe.Request(2) - .ExpectNext("a") - .ExpectNext("c") + .ExpectNextWithinSet(set) + .ExpectNextWithinSet(set) .ExpectNoMsg(TimeSpan.FromMilliseconds(100)); Setup("d", 1); probe.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); probe.Request(5) - .ExpectNext("b") + .ExpectNextWithinSet(set) .ExpectComplete(); } diff --git a/src/core/Akka.Persistence.TCK/Query/PersistenceIdsSpec.cs b/src/core/Akka.Persistence.TCK/Query/PersistenceIdsSpec.cs index 1a288c9c012..7017557cc0c 100644 --- a/src/core/Akka.Persistence.TCK/Query/PersistenceIdsSpec.cs +++ b/src/core/Akka.Persistence.TCK/Query/PersistenceIdsSpec.cs @@ -6,12 +6,17 @@ //----------------------------------------------------------------------- using System; +using System.Collections.Generic; +using System.Reflection; +using System.Threading.Tasks; using Akka.Actor; using Akka.Configuration; using Akka.Persistence.Query; using Akka.Streams; using Akka.Streams.TestKit; +using Akka.TestKit; using Akka.Util.Internal; +using Reactive.Streams; using Xunit; using Xunit.Abstractions; @@ -22,11 +27,17 @@ public abstract class PersistenceIdsSpec : Akka.TestKit.Xunit2.TestKit protected ActorMaterializer Materializer { get; } protected IReadJournal ReadJournal { get; set; } + protected IActorRef SnapshotStore => Extension.SnapshotStoreFor(null); + protected PersistenceExtension Extension { get; } + + private readonly TestProbe _senderProbe; protected PersistenceIdsSpec(Config config = null, string actorSystemName = null, ITestOutputHelper output = null) : base(config ?? Config.Empty, actorSystemName, output) { Materializer = Sys.Materializer(); + Extension = Persistence.Instance.Apply(Sys as ExtendedActorSystem); + _senderProbe = CreateTestProbe(); } [Fact] @@ -77,6 +88,36 @@ public virtual void ReadJournal_AllPersistenceIds_should_find_new_events_after_d }); } + [Fact] + public virtual void ReadJournal_AllPersistenceIds_should_find_events_on_both_journal_and_snapshot_store() + { + var queries = ReadJournal.AsInstanceOf(); + + WriteSnapshot("a", 2); + WriteSnapshot("b", 2); + WriteSnapshot("c", 2); + Setup("d", 2); + Setup("e", 2); + Setup("f", 2); + + var source = queries.PersistenceIds(); + var probe = source.RunWith(this.SinkProbe(), Materializer); + + var expectedUniqueList = new List(){"a", "b", "c", "d", "e", "f"}; + + probe.Within(TimeSpan.FromSeconds(10), () => probe.Request(3) + .ExpectNextWithinSet(expectedUniqueList) + .ExpectNextWithinSet(expectedUniqueList) + .ExpectNextWithinSet(expectedUniqueList) + .ExpectNoMsg(TimeSpan.FromMilliseconds(200))); + + probe.Within(TimeSpan.FromSeconds(10), () => probe.Request(3) + .ExpectNextWithinSet(expectedUniqueList) + .ExpectNextWithinSet(expectedUniqueList) + .ExpectNextWithinSet(expectedUniqueList) + .ExpectNoMsg(TimeSpan.FromMilliseconds(200))); + } + [Fact] public virtual void ReadJournal_AllPersistenceIds_should_only_deliver_what_requested_if_there_is_more_in_the_buffer() { @@ -112,7 +153,7 @@ public virtual void ReadJournal_AllPersistenceIds_should_deliver_persistenceId_o { var queries = ReadJournal.AsInstanceOf(); - Setup("p", 1000); + Setup("p", 10); var source = queries.PersistenceIds(); var probe = source.RunWith(this.SinkProbe(), Materializer); @@ -121,20 +162,55 @@ public virtual void ReadJournal_AllPersistenceIds_should_deliver_persistenceId_o { return probe.Request(10) .ExpectNext("p") - .ExpectNoMsg(TimeSpan.FromMilliseconds(1000)); + .ExpectNoMsg(TimeSpan.FromMilliseconds(200)); }); - Setup("q", 1000); + Setup("q", 10); probe.Within(TimeSpan.FromSeconds(10), () => { return probe.Request(10) .ExpectNext("q") - .ExpectNoMsg(TimeSpan.FromMilliseconds(1000)); + .ExpectNoMsg(TimeSpan.FromMilliseconds(200)); }); } - private IActorRef Setup(string persistenceId, int n) + [Fact] + public virtual async Task ReadJournal_should_deallocate_AllPersistenceIds_publisher_when_the_last_subscriber_left() + { + var journal = ReadJournal.AsInstanceOf(); + + Setup("a", 1); + Setup("b", 1); + + var source = journal.PersistenceIds(); + var probe = source.RunWith(this.SinkProbe(), Materializer); + var probe2 = source.RunWith(this.SinkProbe(), Materializer); + + var fieldInfo = journal.GetType().GetField("_persistenceIdsPublisher", BindingFlags.NonPublic | BindingFlags.Instance); + Assert.True(fieldInfo != null); + + // Assert that publisher is running. + probe.Within(TimeSpan.FromSeconds(10), () => probe.Request(10) + .ExpectNextUnordered("a", "b") + .ExpectNoMsg(TimeSpan.FromMilliseconds(200))); + + probe.Cancel(); + + // Assert that publisher is still alive when it still have a subscriber + Assert.True(fieldInfo.GetValue(journal) is IPublisher); + + probe2.Within(TimeSpan.FromSeconds(10), () => probe2.Request(4) + .ExpectNextUnordered("a", "b") + .ExpectNoMsg(TimeSpan.FromMilliseconds(200))); + + // Assert that publisher is de-allocated when the last subscriber left + probe2.Cancel(); + await Task.Delay(400); + Assert.True(fieldInfo.GetValue(journal) is null); + } + + protected IActorRef Setup(string persistenceId, int n) { var pref = Sys.ActorOf(Query.TestActor.Props(persistenceId)); for (int i = 1; i <= n; i++) @@ -146,6 +222,23 @@ private IActorRef Setup(string persistenceId, int n) return pref; } + protected IActorRef WriteSnapshot(string persistenceId, int n) + { + var pref = Sys.ActorOf(Query.TestActor.Props(persistenceId)); + for (var i = 1; i <= n; i++) + { + pref.Tell($"{persistenceId}-{i}"); + ExpectMsg($"{persistenceId}-{i}-done"); + } + + var metadata = new SnapshotMetadata(persistenceId, n + 10); + SnapshotStore.Tell(new SaveSnapshot(metadata, $"s-{n}"), _senderProbe.Ref); + _senderProbe.ExpectMsg(); + + return pref; + } + + protected override void Dispose(bool disposing) { Materializer.Dispose(); diff --git a/src/core/Akka.Remote/RemoteActorRefProvider.cs b/src/core/Akka.Remote/RemoteActorRefProvider.cs index 11cab8191db..54e8e7938de 100644 --- a/src/core/Akka.Remote/RemoteActorRefProvider.cs +++ b/src/core/Akka.Remote/RemoteActorRefProvider.cs @@ -698,7 +698,9 @@ private class RemotingTerminator : FSM, IRequiresMes public RemotingTerminator(IActorRef systemGuardian) { _systemGuardian = systemGuardian; - _log = Context.GetLogger(); + + // can't use normal Logger.GetLogger(this IActorContext) here due to https://github.com/akkadotnet/akka.net/issues/4530 + _log = Logging.GetLogger(Context.System.EventStream, "remoting-terminator"); InitFSM(); } diff --git a/src/core/Akka.Streams.TestKit/TestSubscriber.cs b/src/core/Akka.Streams.TestKit/TestSubscriber.cs index 62ce71f513e..dd2c1ecdef2 100644 --- a/src/core/Akka.Streams.TestKit/TestSubscriber.cs +++ b/src/core/Akka.Streams.TestKit/TestSubscriber.cs @@ -148,9 +148,9 @@ public T ExpectNext(TimeSpan timeout) /// /// Fluent DSL. Expect a stream element. /// - public ManualProbe ExpectNext(T element) + public ManualProbe ExpectNext(T element, TimeSpan? timeout = null) { - _probe.ExpectMsg>(x => AssertEquals(x.Element, element, "Expected '{0}', but got '{1}'", element, x.Element)); + _probe.ExpectMsg>(x => AssertEquals(x.Element, element, "Expected '{0}', but got '{1}'", element, x.Element), timeout); return this; } @@ -176,9 +176,12 @@ public ManualProbe ExpectNext(T element, TimeSpan timeout) /// Fluent DSL. Expect multiple stream elements. /// public ManualProbe ExpectNext(T e1, T e2, params T[] elems) + => ExpectNext(null, e1, e2, elems); + + public ManualProbe ExpectNext(TimeSpan? timeout, T e1, T e2, params T[] elems) { var len = elems.Length + 2; - var e = ExpectNextN(len).ToArray(); + var e = ExpectNextN(len, timeout).ToArray(); AssertEquals(e.Length, len, "expected to get {0} events, but got {1}", len, e.Length); AssertEquals(e[0], e1, "expected [0] element to be {0} but found {1}", e1, e[0]); AssertEquals(e[1], e2, "expected [1] element to be {0} but found {1}", e2, e[1]); @@ -195,27 +198,42 @@ public ManualProbe ExpectNext(T e1, T e2, params T[] elems) /// FluentDSL. Expect multiple stream elements in arbitrary order. /// public ManualProbe ExpectNextUnordered(T e1, T e2, params T[] elems) + { + return ExpectNextUnordered(null, e1, e2, elems); + } + + public ManualProbe ExpectNextUnordered(TimeSpan? timeout, T e1, T e2, params T[] elems) { var len = elems.Length + 2; - var e = ExpectNextN(len).ToArray(); + var e = ExpectNextN(len, timeout).ToArray(); AssertEquals(e.Length, len, "expected to get {0} events, but got {1}", len, e.Length); - var expectedSet = new HashSet(elems) {e1, e2}; + var expectedSet = new HashSet(elems) { e1, e2 }; expectedSet.ExceptWith(e); Assert(expectedSet.Count == 0, "unexpected elements [{0}] found in the result", string.Join(", ", expectedSet)); return this; } + public ManualProbe ExpectNextWithinSet(List elems) + { + var next = _probe.ExpectMsg>(); + if(!elems.Contains(next.Element)) + Assert(false, "unexpected elements [{0}] found in the result", next.Element); + elems.Remove(next.Element); + _probe.Log.Info($"Received '{next.Element}' within OnNext()."); + return this; + } + /// /// Expect and return the next stream elements. /// - public IEnumerable ExpectNextN(long n) + public IEnumerable ExpectNextN(long n, TimeSpan? timeout = null) { var res = new List((int)n); for (int i = 0; i < n; i++) { - var next = _probe.ExpectMsg>(); + var next = _probe.ExpectMsg>(timeout); res.Add(next.Element); } return res; @@ -224,10 +242,10 @@ public IEnumerable ExpectNextN(long n) /// /// Fluent DSL. Expect the given elements to be signalled in order. /// - public ManualProbe ExpectNextN(IEnumerable all) + public ManualProbe ExpectNextN(IEnumerable all, TimeSpan? timeout = null) { foreach (var x in all) - _probe.ExpectMsg>(y => AssertEquals(y.Element, x, "Expected one of ({0}), but got '{1}'", string.Join(", ", all), y.Element)); + _probe.ExpectMsg>(y => AssertEquals(y.Element, x, "Expected one of ({0}), but got '{1}'", string.Join(", ", all), y.Element), timeout); return this; } @@ -235,12 +253,12 @@ public ManualProbe ExpectNextN(IEnumerable all) /// /// Fluent DSL. Expect the given elements to be signalled in any order. /// - public ManualProbe ExpectNextUnorderedN(IEnumerable all) + public ManualProbe ExpectNextUnorderedN(IEnumerable all, TimeSpan? timeout = null) { var collection = new HashSet(all); while (collection.Count > 0) { - var next = ExpectNext(); + var next = timeout.HasValue ? ExpectNext(timeout.Value) : ExpectNext(); Assert(collection.Contains(next), $"expected one of (${string.Join(", ", collection)}), but received {next}"); collection.Remove(next); } @@ -480,6 +498,11 @@ private void Assert(bool predicate, string format, params object[] args) if (!predicate) throw new Exception(string.Format(format, args)); } + private void Assert(Func predicate, string format, params object[] args) + { + if (!predicate()) throw new Exception(string.Format(format, args)); + } + private void AssertEquals(T1 x, T2 y, string format, params object[] args) { if (!Equals(x, y)) throw new Exception(string.Format(format, args)); diff --git a/src/core/Akka.Streams.Tests/Implementation/DistinctRetainingMultiReaderBufferSpec.cs b/src/core/Akka.Streams.Tests/Implementation/DistinctRetainingMultiReaderBufferSpec.cs new file mode 100644 index 00000000000..d56d8d42a82 --- /dev/null +++ b/src/core/Akka.Streams.Tests/Implementation/DistinctRetainingMultiReaderBufferSpec.cs @@ -0,0 +1,149 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Akka.Streams.Implementation; +using FluentAssertions; +using FluentAssertions.Execution; +using Xunit; + +namespace Akka.Streams.Tests.Implementation +{ + public class DistinctRetainingMultiReaderBufferSpec + { + // The rest of the tests are covered by ResizableMultiReaderRingBufferSpec + + [Fact] + public void A_DistinctRetainingMultiReaderBuffer_should_store_distinct_values_only() + { + var test = new Test(4, 4, 3); + test.Write(1).Should().BeTrue(); + test.Write(2).Should().BeTrue(); + test.Write(3).Should().BeTrue(); + test.Write(2).Should().BeTrue(); + test.Write(2).Should().BeTrue(); + test.Write(1).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(0).Should().Be(1); + test.Read(0).Should().Be(2); + test.Read(1).Should().Be(1); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(0).Should().Be(3); + test.Read(0).Should().Be(null); + test.Read(1).Should().Be(2); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(2).Should().Be(1); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(1).Should().Be(3); + test.Read(1).Should().Be(null); + test.Read(2).Should().Be(2); + test.Read(2).Should().Be(3); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + } + + private class TestBuffer : DistinctRetainingMultiReaderBuffer + { + public ICursors UnderlyingCursors { get; } + + public TestBuffer(int initialSize, int maxSize, ICursors cursors) : base(initialSize, maxSize, cursors) + { + UnderlyingCursors = cursors; + } + + public string Inspect() + { + return Buffer.Select(x => x ?? 0).Aggregate("", (s, i) => s + i + " ") + + ToString().SkipWhile(c => c != '(').Aggregate("", (s, c) => s + c); + } + } + + private class Test : TestBuffer + { + public Test(int initialSize, int maxSize, int cursorCount) : base(initialSize, maxSize, new SimpleCursors(cursorCount)) + { + } + + public int? Read(int cursorIx) + { + try + { + return Read(Cursors.Cursors.ElementAt(cursorIx)); + } + catch (NothingToReadException) + { + return null; + } + } + } + + private class SimpleCursors : ICursors + { + public SimpleCursors(IEnumerable cursors) + { + Cursors = cursors; + } + + public SimpleCursors(int cursorCount) + { + Cursors = Enumerable.Range(0, cursorCount).Select(_ => new SimpleCursor()).ToList(); + } + + public IEnumerable Cursors { get; } + } + + private class SimpleCursor : ICursor + { + public long Cursor { get; set; } + } + + private class StressTestCursor : ICursor + { + private readonly int _cursorNr; + private readonly int _run; + private readonly Action _log; + private readonly int _counterLimit; + private readonly StringBuilder _sb; + private int _counter = 1; + + public StressTestCursor(int cursorNr, int run, Action log, int counterLimit, StringBuilder sb) + { + _cursorNr = cursorNr; + _run = run; + _log = log; + _counterLimit = counterLimit; + _sb = sb; + } + + public bool TryReadAndReturnTrueIfDone(TestBuffer buf) + { + _log($" Try reading of {this}: "); + try + { + var x = buf.Read(this); + _log("OK\n"); + if (x != _counter) + { + throw new AssertionFailedException( + $@"|Run {_run}, cursorNr {_cursorNr}, counter {_counter}: got unexpected {x} + | Buf: {buf.Inspect()} + | Cursors: {buf.UnderlyingCursors.Cursors.Aggregate(" ", (s, cursor) => s + cursor + "\n ")} + |Log: {_sb} + "); + } + _counter++; + return _counter == _counterLimit; + } + catch (NothingToReadException) + { + _log("FAILED\n"); + return false; // ok, we currently can't read, try again later + } + } + + public long Cursor { get; set; } + + public override string ToString() => $"cursorNr {_cursorNr}, ix {Cursor}, counter {_counter}"; + } + } +} diff --git a/src/core/Akka.Streams.Tests/Implementation/ResizableMultiReaderRingBufferSpec.cs b/src/core/Akka.Streams.Tests/Implementation/ResizableMultiReaderRingBufferSpec.cs index bdaebbdf75b..f04ac789ace 100644 --- a/src/core/Akka.Streams.Tests/Implementation/ResizableMultiReaderRingBufferSpec.cs +++ b/src/core/Akka.Streams.Tests/Implementation/ResizableMultiReaderRingBufferSpec.cs @@ -244,7 +244,7 @@ public SimpleCursors(int cursorCount) private class SimpleCursor : ICursor { - public int Cursor { get; set; } + public long Cursor { get; set; } } private class StressTestCursor : ICursor @@ -291,7 +291,7 @@ public bool TryReadAndReturnTrueIfDone(TestBuffer buf) } } - public int Cursor { get; set; } + public long Cursor { get; set; } public override string ToString() => $"cursorNr {_cursorNr}, ix {Cursor}, counter {_counter}"; } diff --git a/src/core/Akka.Streams.Tests/Implementation/RetainingMultiReaderBufferSpec.cs b/src/core/Akka.Streams.Tests/Implementation/RetainingMultiReaderBufferSpec.cs new file mode 100644 index 00000000000..64a9e6b9761 --- /dev/null +++ b/src/core/Akka.Streams.Tests/Implementation/RetainingMultiReaderBufferSpec.cs @@ -0,0 +1,249 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Akka.Streams.Implementation; +using FluentAssertions; +using FluentAssertions.Execution; +using Xunit; + +namespace Akka.Streams.Tests.Implementation +{ + public class RetainingMultiReaderBufferSpec + { + [Theory] + [InlineData(2, 4, 1, "0 0 (size=0, cursors=1)")] + [InlineData(4, 4, 3, "0 0 0 0 (size=0, cursors=3)")] + public void A_RetainingMultiReaderBufferSpec_should_initially_be_empty(int iSize, int mSize, int cursorCount, string expected) + { + var test = new Test(iSize, mSize, cursorCount); + test.Inspect().Should().Be(expected); + } + + [Fact] + public void A_RetainingMultiReaderBufferSpec_should_fail_reads_if_nothing_can_be_read() + { + var test = new Test(4, 4, 3); + test.Write(1).Should().BeTrue(); + test.Write(2).Should().BeTrue(); + test.Write(3).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(0).Should().Be(1); + test.Read(0).Should().Be(2); + test.Read(1).Should().Be(1); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(0).Should().Be(3); + test.Read(0).Should().Be(null); + test.Read(1).Should().Be(2); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(2).Should().Be(1); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + test.Read(1).Should().Be(3); + test.Read(1).Should().Be(null); + test.Read(2).Should().Be(2); + test.Read(2).Should().Be(3); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=3)"); + } + + [Fact] + public void A_RetainingMultiReaderBufferSpec_should_automatically_grow_if_possible() + { + var test = new Test(2, 8, 2); + test.Write(1).Should().BeTrue(); + test.Inspect().Should().Be("1 0 (size=1, cursors=2)"); + test.Write(2).Should().BeTrue(); + test.Inspect().Should().Be("1 2 (size=2, cursors=2)"); + test.Write(3).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 0 (size=3, cursors=2)"); + test.Write(4).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 4 (size=4, cursors=2)"); + test.Read(0).Should().Be(1); + test.Read(0).Should().Be(2); + test.Read(0).Should().Be(3); + test.Read(1).Should().Be(1); + test.Read(1).Should().Be(2); + test.Write(5).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 4 5 0 0 0 (size=5, cursors=2)"); + test.Write(6).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 4 5 6 0 0 (size=6, cursors=2)"); + test.Write(7).Should().BeTrue(); + test.Inspect().Should().Be("1 2 3 4 5 6 7 0 (size=7, cursors=2)"); + test.Read(0).Should().Be(4); + test.Read(0).Should().Be(5); + test.Read(0).Should().Be(6); + test.Read(0).Should().Be(7); + test.Read(0).Should().Be(null); + test.Read(1).Should().Be(3); + test.Read(1).Should().Be(4); + test.Read(1).Should().Be(5); + test.Read(1).Should().Be(6); + test.Read(1).Should().Be(7); + test.Read(1).Should().Be(null); + test.Inspect().Should().Be("1 2 3 4 5 6 7 0 (size=7, cursors=2)"); + } + + [Fact] + public void A_RetainingMultiReaderBufferSpec_should_pass_the_stress_test() + { + // create 100 buffers with an initialSize of 1 and a maxSize of 1 to 64, + // for each one attach 1 to 8 cursors and randomly try reading and writing to the buffer; + // in total 200 elements need to be written to the buffer and read in the correct order by each cursor + var MAXSIZEBIT_LIMIT = 6; // 2 ^ (this number) + var COUNTER_LIMIT = 200; + var LOG = false; + var sb = new StringBuilder(); + var log = new Action(s => + { + if (LOG) + sb.Append(s); + }); + + var random = new Random(); + for (var bit = 1; bit <= MAXSIZEBIT_LIMIT; bit++) + for (var n = 1; n <= 2; n++) + { + var counter = 1; + var activeCoursors = + Enumerable.Range(0, random.Next(8) + 1) + .Select(i => new StressTestCursor(i, 1 << bit, log, COUNTER_LIMIT, sb)) + .ToList(); + var stillWriting = 2;// give writing a slight bias, so as to somewhat "stretch" the buffer + var buf = new TestBuffer(1, 1 << bit, new SimpleCursors(activeCoursors)); + sb.Clear(); + + while (activeCoursors.Count != 0) + { + log($"Buf: {buf.Inspect()}\n"); + var activeCursorCount = activeCoursors.Count; + var index = random.Next(activeCursorCount + stillWriting); + if (index >= activeCursorCount) + { + log($" Writing {counter}: "); + if (buf.Write(counter)) + { + log("OK\n"); + counter++; + } + else + { + log("FAILED\n"); + if (counter == COUNTER_LIMIT) + stillWriting = 0; + } + } + else + { + var cursor = activeCoursors[index]; + if (cursor.TryReadAndReturnTrueIfDone(buf)) + activeCoursors = activeCoursors.Where(c => c != cursor).ToList(); + } + } + } + } + + private class TestBuffer : RetainingMultiReaderBuffer + { + public ICursors UnderlyingCursors { get; } + + public TestBuffer(int initialSize, int maxSize, ICursors cursors) : base(initialSize, maxSize, cursors) + { + UnderlyingCursors = cursors; + } + + public string Inspect() + { + return Buffer.Select(x => x ?? 0).Aggregate("", (s, i) => s + i + " ") + + ToString().SkipWhile(c => c != '(').Aggregate("", (s, c) => s + c); + } + } + + private class Test : TestBuffer + { + public Test(int initialSize, int maxSize, int cursorCount) : base(initialSize, maxSize, new SimpleCursors(cursorCount)) + { + } + + public int? Read(int cursorIx) + { + try + { + return Read(Cursors.Cursors.ElementAt(cursorIx)); + } + catch (NothingToReadException) + { + return null; + } + } + } + + private class SimpleCursors : ICursors + { + public SimpleCursors(IEnumerable cursors) + { + Cursors = cursors; + } + + public SimpleCursors(int cursorCount) + { + Cursors = Enumerable.Range(0, cursorCount).Select(_ => new SimpleCursor()).ToList(); + } + + public IEnumerable Cursors { get; } + } + + private class SimpleCursor : ICursor + { + public long Cursor { get; set; } + } + + private class StressTestCursor : ICursor + { + private readonly int _cursorNr; + private readonly int _run; + private readonly Action _log; + private readonly int _counterLimit; + private readonly StringBuilder _sb; + private int _counter = 1; + + public StressTestCursor(int cursorNr, int run, Action log, int counterLimit, StringBuilder sb) + { + _cursorNr = cursorNr; + _run = run; + _log = log; + _counterLimit = counterLimit; + _sb = sb; + } + + public bool TryReadAndReturnTrueIfDone(TestBuffer buf) + { + _log($" Try reading of {this}: "); + try + { + var x = buf.Read(this); + _log("OK\n"); + if (x != _counter) + { + throw new AssertionFailedException( + $@"|Run {_run}, cursorNr {_cursorNr}, counter {_counter}: got unexpected {x} + | Buf: {buf.Inspect()} + | Cursors: {buf.UnderlyingCursors.Cursors.Aggregate(" ", (s, cursor) => s + cursor + "\n ")} + |Log: {_sb} + "); + } + _counter++; + return _counter == _counterLimit; + } + catch (NothingToReadException) + { + _log("FAILED\n"); + return false; // ok, we currently can't read, try again later + } + } + + public long Cursor { get; set; } + + public override string ToString() => $"cursorNr {_cursorNr}, ix {Cursor}, counter {_counter}"; + } + } +} diff --git a/src/core/Akka.Streams/Dsl/Sink.cs b/src/core/Akka.Streams/Dsl/Sink.cs index a7b400a8530..54925ccf2b4 100644 --- a/src/core/Akka.Streams/Dsl/Sink.cs +++ b/src/core/Akka.Streams/Dsl/Sink.cs @@ -282,7 +282,10 @@ public static Sink> Publisher() /// TBD /// TBD public static Sink> FanoutPublisher() - => new Sink>(new FanoutPublisherSink(DefaultAttributes.FanoutPublisherSink, Shape("FanoutPublisherSink"))); + => new Sink>(new FanoutPublisherSink>(DefaultAttributes.FanoutPublisherSink, Shape("FanoutPublisherSink"))); + + internal static Sink> DistinctRetainingFanOutPublisher(Action onTerminated = null) + => new Sink>(new FanoutPublisherSink>(DefaultAttributes.FanoutPublisherSink, Shape("DistinctRetainingFanOutPublisherSink"), onTerminated)); /// /// A that will consume the stream and discard the elements. @@ -592,7 +595,7 @@ public static Sink> AsPublisher(bool fanout) { SinkModule> publisherSink; if (fanout) - publisherSink = new FanoutPublisherSink(DefaultAttributes.FanoutPublisherSink, Shape("FanoutPublisherSink")); + publisherSink = new FanoutPublisherSink>(DefaultAttributes.FanoutPublisherSink, Shape("FanoutPublisherSink")); else publisherSink = new PublisherSink(DefaultAttributes.PublisherSink, Shape("PublisherSink")); diff --git a/src/core/Akka.Streams/Implementation/ActorPublisher.cs b/src/core/Akka.Streams/Implementation/ActorPublisher.cs index f6b6e9dfbac..b8a273e64b1 100644 --- a/src/core/Akka.Streams/Implementation/ActorPublisher.cs +++ b/src/core/Akka.Streams/Implementation/ActorPublisher.cs @@ -390,7 +390,7 @@ bool ISubscriptionWithCursor.IsActive /// /// TBD /// - public int Cursor { get; private set; } + public long Cursor { get; private set; } long ISubscriptionWithCursor.TotalDemand { @@ -409,7 +409,7 @@ long ISubscriptionWithCursor.TotalDemand /// TBD public void Dispatch(TIn element) => ReactiveStreamsCompliance.TryOnNext(Subscriber, element); - int ICursor.Cursor + long ICursor.Cursor { get { return Cursor; } set { Cursor = value; } diff --git a/src/core/Akka.Streams/Implementation/FanoutProcessorImpl.cs b/src/core/Akka.Streams/Implementation/FanoutProcessorImpl.cs index 4f7312f341c..c7da56b2adc 100644 --- a/src/core/Akka.Streams/Implementation/FanoutProcessorImpl.cs +++ b/src/core/Akka.Streams/Implementation/FanoutProcessorImpl.cs @@ -17,7 +17,8 @@ namespace Akka.Streams.Implementation /// TBD /// /// TBD - internal class FanoutOutputs : SubscriberManagement, IOutputs + /// TBD + internal class FanoutOutputs : SubscriberManagement, IOutputs where TStreamBuffer : IStreamBuffer { private long _downstreamBufferSpace; private bool _downstreamCompleted; @@ -87,8 +88,7 @@ public FanoutOutputs(int maxBufferSize, int initialBufferSize, IActorRef self, I NeedsDemandOrCancel = DefaultOutputTransferStates.NeedsDemandOrCancel(this); SubReceive = new SubReceive(message => { - var publisher = message as ExposedPublisher; - if (publisher == null) + if (!(message is ExposedPublisher publisher)) throw new IllegalStateException($"The first message must be ExposedPublisher but was {message}"); ExposedPublisher = publisher.Publisher; @@ -112,24 +112,22 @@ protected override ISubscriptionWithCursor CreateSubscription(ISubscriber /// TBD protected bool DownstreamRunning(object message) { - if (message is SubscribePending) - SubscribePending(); - else if (message is RequestMore) + switch (message) { - var requestMore = (RequestMore) message; - MoreRequested((ActorSubscriptionWithCursor) requestMore.Subscription, requestMore.Demand); - _pump.Pump(); + case SubscribePending _: + SubscribePending(); + return true; + case RequestMore requestMore: + MoreRequested((ActorSubscriptionWithCursor) requestMore.Subscription, requestMore.Demand); + _pump.Pump(); + return true; + case Cancel cancel: + UnregisterSubscription((ActorSubscriptionWithCursor) cancel.Subscription); + _pump.Pump(); + return true; + default: + return false; } - else if (message is Cancel) - { - var cancel = (Cancel) message; - UnregisterSubscription((ActorSubscriptionWithCursor) cancel.Subscription); - _pump.Pump(); - } - else - return false; - - return true; } /// @@ -217,15 +215,19 @@ public void Error(Exception e) /// TBD /// /// TBD - internal sealed class FanoutProcessorImpl : ActorProcessorImpl + /// TBD + internal sealed class FanoutProcessorImpl : ActorProcessorImpl where TStreamBuffer : IStreamBuffer { + private readonly Action _onTerminated; + /// /// TBD /// /// TBD + /// TBD /// TBD - public static Props Props(ActorMaterializerSettings settings) - => Actor.Props.Create(() => new FanoutProcessorImpl(settings)).WithDeploy(Deploy.Local); + public static Props Props(ActorMaterializerSettings settings, Action onTerminated = null) + => Actor.Props.Create(() => new FanoutProcessorImpl(settings, onTerminated)).WithDeploy(Deploy.Local); /// /// TBD @@ -236,11 +238,14 @@ public static Props Props(ActorMaterializerSettings settings) /// TBD /// /// TBD - public FanoutProcessorImpl(ActorMaterializerSettings settings) : base(settings) + /// TBD + public FanoutProcessorImpl(ActorMaterializerSettings settings, Action onTerminated) : base(settings) { - PrimaryOutputs = new FanoutOutputs(settings.MaxInputBufferSize, + PrimaryOutputs = new FanoutOutputs(settings.MaxInputBufferSize, settings.InitialInputBufferSize, Self, this, AfterFlush); + _onTerminated = onTerminated; + var running = new TransferPhase(PrimaryInputs.NeedsInput.And(PrimaryOutputs.NeedsDemand), () => PrimaryOutputs.EnqueueOutputElement(PrimaryInputs.DequeueInputElement())); InitialPhase(1, running); @@ -269,6 +274,10 @@ public override void PumpFinished() PrimaryOutputs.Complete(); } - private void AfterFlush() => Context.Stop(Self); + private void AfterFlush() + { + _onTerminated?.Invoke(); + Context.Stop(Self); + } } } diff --git a/src/core/Akka.Streams/Implementation/ResizableMultiReaderRingBuffer.cs b/src/core/Akka.Streams/Implementation/ResizableMultiReaderRingBuffer.cs index ccdf061e711..7e885623213 100644 --- a/src/core/Akka.Streams/Implementation/ResizableMultiReaderRingBuffer.cs +++ b/src/core/Akka.Streams/Implementation/ResizableMultiReaderRingBuffer.cs @@ -8,7 +8,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Runtime.Serialization; using Akka.Annotations; using Akka.Streams.Util; @@ -61,7 +60,149 @@ public interface ICursor /// /// TBD /// - int Cursor { get; set; } + long Cursor { get; set; } + } + + internal interface IStreamBuffer + { + bool IsEmpty { get; } + + long Length { get; } + + long AvailableData { get; } + + long CapacityLeft { get; } + + long Count(ICursor cursor); + + T Read(ICursor cursor); + + bool Write(T value); + + void InitCursor(ICursor cursor); + + void OnCursorRemoved(ICursor cursor); + } + + public class DistinctRetainingMultiReaderBuffer : RetainingMultiReaderBuffer + { + public DistinctRetainingMultiReaderBuffer(long initialSize, long maxSize, ICursors cursors) : base(initialSize, maxSize, cursors) + { } + + public override bool Write(T value) + { + return Buffer.Contains(value) || base.Write(value); + } + + /// + /// TBD + /// + /// TBD + public override string ToString() => $"DistinctRetainingMultiReaderBuffer(size={Length}, cursors={Cursors.Cursors.Count()})"; + } + + public class RetainingMultiReaderBuffer : IStreamBuffer + { + /// + /// TBD + /// + protected readonly ICursors Cursors; + + protected T[] Buffer { get; private set; } + + /// + /// The number of elements currently in the buffer. + /// + public long Length { get; private set; } + + public bool IsEmpty => Buffer.LongLength == 0; + + /// + /// The maximum number of elements the buffer can still take. + /// + public long CapacityLeft => long.MaxValue - Length; + + // DO NOT REMOVE maxSize parameter, the parameters are fixed and passed through reflection + public RetainingMultiReaderBuffer(long initialSize, long maxSize, ICursors cursors) + { + Cursors = cursors; + + if ((initialSize & (initialSize - 1)) != 0 || initialSize <= 0) + throw new ArgumentException("initialSize must be a power of 2 that is > 0"); + + // We don't care about the maximum size + Buffer = new T[initialSize]; + } + + /// + /// Returns the number of elements that the buffer currently contains for the given cursor. + /// + /// TBD + /// TBD + public long Count(ICursor cursor) => Length - cursor.Cursor; + + public long AvailableData + { + get + { + var lowest = 0L; + foreach (var cursor in Cursors.Cursors) + lowest = Math.Max(cursor.Cursor, lowest); + + return Length - lowest; + } + } + + public T Read(ICursor cursor) + { + var c = cursor.Cursor; + if (c < Length) + { + cursor.Cursor++; + return Buffer[c]; + } + + throw NothingToReadException.Instance; + } + + public virtual bool Write(T value) + { + if (Length < Buffer.Length) + { + // if we have space left we can simply write and be done + Buffer[Length] = value; + Length++; + return true; + } + + if (Buffer.LongLength >= long.MaxValue) return false; + + // if we are full but can grow we do so + // Array.Resize() does not work here, because it is limited to int.MaxValue + var newLength = unchecked(Buffer.LongLength << 1); + if (newLength < 0) + newLength = long.MaxValue; + var newArray = new T[newLength]; + + Array.Copy(Buffer, newArray, Buffer.LongLength); + Buffer = newArray; + Buffer[Length] = value; + Length++; + return true; + } + + public void InitCursor(ICursor cursor) => cursor.Cursor = 0; + + public void OnCursorRemoved(ICursor cursor) + { + // no op + } + + /// + /// TBD + /// + /// TBD + public override string ToString() => $"RetainingMultiReaderBuffer(size={Length}, cursors={Cursors.Cursors.Count()})"; } /// @@ -72,27 +213,27 @@ public interface ICursor /// /// TBD [InternalApi] - public class ResizableMultiReaderRingBuffer + public class ResizableMultiReaderRingBuffer : IStreamBuffer { private readonly int _maxSizeBit; - private object[] _array; + private T[] _array; /// /// Two counters counting the number of elements ever written and read; wrap-around is /// handled by always looking at differences or masked values /// - private int _writeIndex; + private long _writeIndex; + + private long _readIndex; // the "oldest" of all read cursor indices, i.e. the one that is most behind - private int _readIndex; // the "oldest" of all read cursor indices, i.e. the one that is most behind - /// /// Current array.length log2, we don't keep it as an extra field because /// is a JVM intrinsic compiling down to a `BSF` instruction on x86, which is very fast on modern CPUs /// - private int LengthBit => _array.Length.NumberOfTrailingZeros(); + private int LengthBit => BitOperations.TrailingZeroCount(_array.LongLength); // bit mask for converting a cursor into an array index - private int Mask => int.MaxValue >> (31 - LengthBit); + private long Mask => long.MaxValue >> (63 - LengthBit); /// /// TBD @@ -101,7 +242,7 @@ public class ResizableMultiReaderRingBuffer /// TBD /// TBD /// TBD - public ResizableMultiReaderRingBuffer(int initialSize, int maxSize, ICursors cursors) + public ResizableMultiReaderRingBuffer(long initialSize, long maxSize, ICursors cursors) { Cursors = cursors; if ((initialSize & (initialSize - 1)) != 0 || initialSize <= 0 || initialSize > maxSize) @@ -111,8 +252,8 @@ public ResizableMultiReaderRingBuffer(int initialSize, int maxSize, ICursors cur if ((maxSize & (maxSize - 1)) != 0 || maxSize <= 0 || maxSize > int.MaxValue / 2) throw new ArgumentException("maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2"); - _array = new object[initialSize]; - _maxSizeBit = maxSize.NumberOfTrailingZeros(); + _array = new T[initialSize]; + _maxSizeBit = BitOperations.TrailingZeroCount(maxSize); } /// @@ -123,12 +264,14 @@ public ResizableMultiReaderRingBuffer(int initialSize, int maxSize, ICursors cur /// /// TBD /// - protected object[] UnderlyingArray => _array; + protected T[] UnderlyingArray => _array; /// /// The number of elements currently in the buffer. /// - public int Length => _writeIndex - _readIndex; + public long Length => _writeIndex - _readIndex; + + public long AvailableData => Length; /// /// TBD @@ -143,19 +286,19 @@ public ResizableMultiReaderRingBuffer(int initialSize, int maxSize, ICursors cur /// /// The number of elements the buffer can still take without having to be resized. /// - public int ImmediatelyAvailable => _array.Length - Length; + public long ImmediatelyAvailable => _array.Length - Length; /// /// The maximum number of elements the buffer can still take. /// - public int CapacityLeft => (1 << _maxSizeBit) - Length; + public long CapacityLeft => (1 << _maxSizeBit) - Length; /// /// Returns the number of elements that the buffer currently contains for the given cursor. /// /// TBD /// TBD - public int Count(ICursor cursor) => _writeIndex - cursor.Cursor; + public long Count(ICursor cursor) => _writeIndex - cursor.Cursor; /// /// Initializes the given Cursor to the oldest buffer entry that is still available. @@ -184,7 +327,12 @@ public bool Write(T value) // the growing logic is quite simple: we assemble all current buffer entries in the new array // in their natural order (removing potential wrap around) and rebase all indices to zero var r = _readIndex & Mask; - var newArray = new object[_array.Length << 1]; + + var newLength = unchecked(_array.LongLength << 1); + if (newLength < 0) + newLength = long.MaxValue; + var newArray = new T[newLength]; + Array.Copy(_array, r, newArray, 0, _array.Length - r); Array.Copy(_array, 0, newArray, _array.Length - r, r); RebaseCursors(Cursors.Cursors); @@ -219,7 +367,7 @@ public T Read(ICursor cursor) if (c - _writeIndex < 0) { cursor.Cursor += 1; - var ret = (T)_array[c & Mask]; + var ret = _array[c & Mask]; if(c == _readIndex) UpdateReadIndex(); return ret; @@ -243,12 +391,12 @@ private void UpdateReadIndex() var newReadIx = _writeIndex + MinCursor(Cursors.Cursors, 0); while (_readIndex != newReadIx) { - _array[_readIndex & Mask] = null; + _array[_readIndex & Mask] = default; _readIndex++; } } - private int MinCursor(IEnumerable remaining, int result) + private long MinCursor(IEnumerable remaining, long result) { foreach (var cursor in remaining) result = Math.Min(cursor.Cursor - _writeIndex, result); diff --git a/src/core/Akka.Streams/Implementation/Sinks.cs b/src/core/Akka.Streams/Implementation/Sinks.cs index 46e585325b5..e3ba7b8e030 100644 --- a/src/core/Akka.Streams/Implementation/Sinks.cs +++ b/src/core/Akka.Streams/Implementation/Sinks.cs @@ -205,16 +205,21 @@ public override object Create(MaterializationContext context, out IPublisher /// TBD - internal sealed class FanoutPublisherSink : SinkModule> + /// TBD + internal sealed class FanoutPublisherSink : SinkModule> where TStreamBuffer : IStreamBuffer { + private readonly Action _onTerminated; + /// /// TBD /// /// TBD /// TBD - public FanoutPublisherSink(Attributes attributes, SinkShape shape) : base(shape) + /// TBD + public FanoutPublisherSink(Attributes attributes, SinkShape shape, Action onTerminated = null) : base(shape) { Attributes = attributes; + _onTerminated = onTerminated; } /// @@ -228,7 +233,7 @@ public FanoutPublisherSink(Attributes attributes, SinkShape shape) : base(s /// TBD /// TBD public override IModule WithAttributes(Attributes attributes) - => new FanoutPublisherSink(attributes, AmendShape(attributes)); + => new FanoutPublisherSink(attributes, AmendShape(attributes), _onTerminated); /// /// TBD @@ -236,7 +241,7 @@ public override IModule WithAttributes(Attributes attributes) /// TBD /// TBD protected override SinkModule> NewInstance(SinkShape shape) - => new FanoutPublisherSink(Attributes, shape); + => new FanoutPublisherSink(Attributes, shape, _onTerminated); /// /// TBD @@ -248,7 +253,7 @@ public override object Create(MaterializationContext context, out IPublisher.Props(settings)); + var impl = actorMaterializer.ActorOf(context, FanoutProcessorImpl.Props(settings, _onTerminated)); var fanoutProcessor = new ActorProcessor(impl); impl.Tell(new ExposedPublisher(fanoutProcessor)); // Resolve cyclic dependency with actor. This MUST be the first message no matter what. diff --git a/src/core/Akka.Streams/Implementation/SubscriberManagement.cs b/src/core/Akka.Streams/Implementation/SubscriberManagement.cs index 7bdeaca776f..1976c2ef71c 100644 --- a/src/core/Akka.Streams/Implementation/SubscriberManagement.cs +++ b/src/core/Akka.Streams/Implementation/SubscriberManagement.cs @@ -37,7 +37,7 @@ internal interface ISubscriptionWithCursor : ISubscription, ICursor bool IsActive { get; set; } /// - /// Do not increment directly, use instead (it provides overflow protection)! + /// Do not increment directly, use instead (it provides overflow protection)! /// long TotalDemand { get; set; } // number of requested but not yet dispatched elements } @@ -143,9 +143,10 @@ public ErrorCompleted(Exception cause) /// TBD /// /// TBD - internal abstract class SubscriberManagement : ICursors + /// TBD + internal abstract class SubscriberManagement : ICursors where TStreamBuffer : IStreamBuffer { - private readonly Lazy> _buffer; + private readonly Lazy> _buffer; // optimize for small numbers of subscribers by keeping subscribers in a plain list private ICollection> _subscriptions = new List>(); @@ -161,8 +162,8 @@ internal abstract class SubscriberManagement : ICursors /// protected SubscriberManagement() { - _buffer = new Lazy>(() => - new ResizableMultiReaderRingBuffer(InitialBufferSize, MaxBufferSize, this)); + _buffer = new Lazy>(() + => (IStreamBuffer) Activator.CreateInstance(typeof(TStreamBuffer), InitialBufferSize, MaxBufferSize, this)); } /// @@ -213,40 +214,39 @@ protected SubscriberManagement() /// TBD protected void MoreRequested(ISubscriptionWithCursor subscription, long elements) { - if (subscription.IsActive) + if (!subscription.IsActive) return; + + // check for illegal demand See 3.9 + if (elements < 1) { - // check for illegal demand See 3.9 - if (elements < 1) + try { - try - { - ReactiveStreamsCompliance.TryOnError(subscription.Subscriber, ReactiveStreamsCompliance.NumberOfElementsInRequestMustBePositiveException); - } - finally + ReactiveStreamsCompliance.TryOnError(subscription.Subscriber, ReactiveStreamsCompliance.NumberOfElementsInRequestMustBePositiveException); + } + finally + { + UnregisterSubscriptionInternal(subscription); + } + } + else + { + if (_endOfStream is SubscriberManagement.NotReached || _endOfStream is SubscriberManagement.Completed) + { + var d = subscription.TotalDemand + elements; + // Long overflow, Reactive Streams Spec 3:17: effectively unbounded + var demand = d < 1 ? long.MaxValue : d; + subscription.TotalDemand = demand; + // returns Long.MinValue if the subscription is to be terminated + var remainingRequested = DispatchFromBufferAndReturnRemainingRequested(demand, subscription, _endOfStream); + if (remainingRequested == long.MinValue) { + _endOfStream.Apply(subscription.Subscriber); UnregisterSubscriptionInternal(subscription); } - } - else - { - if (_endOfStream is SubscriberManagement.NotReached || _endOfStream is SubscriberManagement.Completed) + else { - var d = subscription.TotalDemand + elements; - // Long overflow, Reactive Streams Spec 3:17: effectively unbounded - var demand = d < 1 ? long.MaxValue : d; - subscription.TotalDemand = demand; - // returns Long.MinValue if the subscription is to be terminated - var remainingRequested = DispatchFromBufferAndReturnRemainingRequested(demand, subscription, _endOfStream); - if (remainingRequested == long.MinValue) - { - _endOfStream.Apply(subscription.Subscriber); - UnregisterSubscriptionInternal(subscription); - } - else - { - subscription.TotalDemand = remainingRequested; - RequestFromUpstreamIfRequired(); - } + subscription.TotalDemand = remainingRequested; + RequestFromUpstreamIfRequired(); } } } @@ -315,7 +315,7 @@ protected void PushToDownstream(T value) _pendingFromUpstream--; if (!_buffer.Value.Write(value)) throw new IllegalStateException("Output buffer overflow"); - if (Dispatch(_subscriptions)) + if (_buffer.Value.AvailableData > 0 && Dispatch(_subscriptions)) RequestFromUpstreamIfRequired(); } else throw new IllegalStateException("PushToDownStream(...) after CompleteDownstream() or AbortDownstream(...)"); diff --git a/src/core/Akka.Streams/Properties/AssemblyInfo.cs b/src/core/Akka.Streams/Properties/AssemblyInfo.cs index ff5be28a670..394606e42b5 100644 --- a/src/core/Akka.Streams/Properties/AssemblyInfo.cs +++ b/src/core/Akka.Streams/Properties/AssemblyInfo.cs @@ -12,6 +12,8 @@ // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. +[assembly: InternalsVisibleTo("Akka.Persistence.Query.Sql")] +[assembly: InternalsVisibleTo("Akka.Persistence.TCK")] [assembly: InternalsVisibleTo("Akka.Streams.Tests")] [assembly: InternalsVisibleTo("Akka.Streams.TestKit")] [assembly: InternalsVisibleTo("Akka.Benchmarks")] diff --git a/src/core/Akka.Streams/Util/BitOperations.cs b/src/core/Akka.Streams/Util/BitOperations.cs new file mode 100644 index 00000000000..47cb3510d90 --- /dev/null +++ b/src/core/Akka.Streams/Util/BitOperations.cs @@ -0,0 +1,279 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// Some routines inspired by the Stanford Bit Twiddling Hacks by Sean Eron Anderson: +// http://graphics.stanford.edu/~seander/bithacks.html +namespace Akka.Streams.Util +{ + // TODO: replace this with the official System.Numerics.BitOperations when we move on to .NET Core 3.0 + /// + /// Utility methods for intrinsic bit-twiddling operations. + /// + /// A copy of Microsoft .NET core 3.0 implementation, without the hardware optimization + /// + internal static class BitOperations + { + // C# no-alloc optimization that directly wraps the data section of the dll (similar to string constants) + // https://github.com/dotnet/roslyn/pull/24621 + + private static ReadOnlySpan TrailingZeroCountDeBruijn => new byte[32] + { + 00, 01, 28, 02, 29, 14, 24, 03, + 30, 22, 20, 15, 25, 17, 04, 08, + 31, 27, 13, 23, 21, 19, 16, 07, + 26, 12, 18, 06, 11, 05, 10, 09 + }; + + private static ReadOnlySpan Log2DeBruijn => new byte[32] + { + 00, 09, 01, 10, 13, 21, 02, 29, + 11, 14, 16, 18, 22, 25, 03, 30, + 08, 12, 20, 28, 15, 17, 24, 07, + 19, 27, 23, 06, 26, 05, 04, 31 + }; + + /// + /// Count the number of leading zero bits in a mask. + /// Similar in behavior to the x86 instruction LZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int LeadingZeroCount(uint value) + { + // Unguarded fallback contract is 0->31, BSR contract is 0->undefined + if (value == 0) + return 32; + + return 31 ^ Log2SoftwareFallback(value); + } + + /// + /// Count the number of leading zero bits in a mask. + /// Similar in behavior to the x86 instruction LZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int LeadingZeroCount(ulong value) + { + var hi = (uint)(value >> 32); + + if (hi == 0) + return 32 + LeadingZeroCount((uint)value); + + return LeadingZeroCount(hi); + } + + /// + /// Returns the integer (floor) log of the specified value, base 2. + /// Note that by convention, input value 0 returns 0 since log(0) is undefined. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int Log2(uint value) + { + // The 0->0 contract is fulfilled by setting the LSB to 1. + // Log(1) is 0, and setting the LSB for values > 1 does not change the log2 result. + value |= 1; + + // value lzcnt actual expected + // ..0001 31 31-31 0 + // ..0010 30 31-30 1 + // 0010.. 2 31-2 29 + // 0100.. 1 31-1 30 + // 1000.. 0 31-0 31 + + // Fallback contract is 0->0 + return Log2SoftwareFallback(value); + } + + /// + /// Returns the integer (floor) log of the specified value, base 2. + /// Note that by convention, input value 0 returns 0 since log(0) is undefined. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int Log2(ulong value) + { + value |= 1; + + var hi = (uint)(value >> 32); + + if (hi == 0) + return Log2((uint)value); + + return 32 + Log2(hi); + } + + /// + /// Returns the integer (floor) log of the specified value, base 2. + /// Note that by convention, input value 0 returns 0 since Log(0) is undefined. + /// Does not directly use any hardware intrinsics, nor does it incur branching. + /// + /// The value. + private static int Log2SoftwareFallback(uint value) + { + // No AggressiveInlining due to large method size + // Has conventional contract 0->0 (Log(0) is undefined) + + // Fill trailing zeros with ones, eg 00010010 becomes 00011111 + value |= value >> 01; + value |= value >> 02; + value |= value >> 04; + value |= value >> 08; + value |= value >> 16; + + // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check + return Unsafe.AddByteOffset( + // Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_1100_0100_1010_1100_1101_1101u + ref MemoryMarshal.GetReference(Log2DeBruijn), + // uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here + (IntPtr)(int)((value * 0x07C4ACDDu) >> 27)); + } + + /// + /// Returns the population count (number of bits set) of a mask. + /// Similar in behavior to the x86 instruction POPCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int PopCount(uint value) + { + const uint c1 = 0x_55555555u; + const uint c2 = 0x_33333333u; + const uint c3 = 0x_0F0F0F0Fu; + const uint c4 = 0x_01010101u; + + value -= (value >> 1) & c1; + value = (value & c2) + ((value >> 2) & c2); + value = (((value + (value >> 4)) & c3) * c4) >> 24; + + return (int)value; + } + + /// + /// Returns the population count (number of bits set) of a mask. + /// Similar in behavior to the x86 instruction POPCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int PopCount(ulong value) + { + const ulong c1 = 0x_55555555_55555555ul; + const ulong c2 = 0x_33333333_33333333ul; + const ulong c3 = 0x_0F0F0F0F_0F0F0F0Ful; + const ulong c4 = 0x_01010101_01010101ul; + + value -= (value >> 1) & c1; + value = (value & c2) + ((value >> 2) & c2); + value = (((value + (value >> 4)) & c3) * c4) >> 56; + + return (int)value; + } + + /// + /// Count the number of trailing zero bits in an integer value. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(int value) + => TrailingZeroCount((uint)value); + + /// + /// Count the number of trailing zero bits in an integer value. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(uint value) + { + // Unguarded fallback contract is 0->0, BSF contract is 0->undefined + if (value == 0) + return 32; + + // uint.MaxValue >> 27 is always in range [0 - 31] so we use Unsafe.AddByteOffset to avoid bounds check + return Unsafe.AddByteOffset( + // Using deBruijn sequence, k=2, n=5 (2^5=32) : 0b_0000_0111_0111_1100_1011_0101_0011_0001u + ref MemoryMarshal.GetReference(TrailingZeroCountDeBruijn), + // uint|long -> IntPtr cast on 32-bit platforms does expensive overflow checks not needed here + (IntPtr)(int)(((value & (uint)-(int)value) * 0x077CB531u) >> 27)); // Multi-cast mitigates redundant conv.u8 + } + + /// + /// Count the number of trailing zero bits in a mask. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(long value) + => TrailingZeroCount((ulong)value); + + /// + /// Count the number of trailing zero bits in a mask. + /// Similar in behavior to the x86 instruction TZCNT. + /// + /// The value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static int TrailingZeroCount(ulong value) + { + var lo = (uint)value; + + if (lo == 0) + return 32 + TrailingZeroCount((uint)(value >> 32)); + + return TrailingZeroCount(lo); + } + + /// + /// Rotates the specified value left by the specified number of bits. + /// Similar in behavior to the x86 instruction ROL. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..31] is treated as congruent mod 32. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static uint RotateLeft(uint value, int offset) + => (value << offset) | (value >> (32 - offset)); + + /// + /// Rotates the specified value left by the specified number of bits. + /// Similar in behavior to the x86 instruction ROL. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..63] is treated as congruent mod 64. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ulong RotateLeft(ulong value, int offset) + => (value << offset) | (value >> (64 - offset)); + + /// + /// Rotates the specified value right by the specified number of bits. + /// Similar in behavior to the x86 instruction ROR. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..31] is treated as congruent mod 32. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static uint RotateRight(uint value, int offset) + => (value >> offset) | (value << (32 - offset)); + + /// + /// Rotates the specified value right by the specified number of bits. + /// Similar in behavior to the x86 instruction ROR. + /// + /// The value to rotate. + /// The number of bits to rotate by. + /// Any value outside the range [0..63] is treated as congruent mod 64. + /// The rotated value. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static ulong RotateRight(ulong value, int offset) + => (value >> offset) | (value << (64 - offset)); + } +} diff --git a/src/core/Akka.Tests/Pattern/CircuitBreakerSpec.cs b/src/core/Akka.Tests/Pattern/CircuitBreakerSpec.cs index 0ad0e4eb044..ffc16ffc878 100644 --- a/src/core/Akka.Tests/Pattern/CircuitBreakerSpec.cs +++ b/src/core/Akka.Tests/Pattern/CircuitBreakerSpec.cs @@ -74,15 +74,17 @@ public void Must_increment_failure_count_on_fail_method() Assert.True(breaker.Instance.CurrentFailureCount == 1); } - [Fact(DisplayName = "A synchronous circuit breaker that is closed must reset failure count after success method")] + [Fact(DisplayName = "A synchronous circuit breaker that is closed must reset failure count and clears cached last exception after success method")] public void Must_reset_failure_count_after_success_method() { var breaker = MultiFailureCb(); Assert.True(breaker.Instance.CurrentFailureCount == 0); Assert.True(InterceptExceptionType(() => breaker.Instance.WithSyncCircuitBreaker(ThrowException))); Assert.True(breaker.Instance.CurrentFailureCount == 1); + Assert.True(breaker.Instance.LastCaughtException is TestException); breaker.Instance.Succeed(); Assert.True(breaker.Instance.CurrentFailureCount == 0); + Assert.True(breaker.Instance.LastCaughtException is null); } } @@ -101,6 +103,45 @@ public void Should_Pass_Call_And_Transition_To_Close_On_Success( ) Assert.Equal( SayTest( ), result ); } + [Fact(DisplayName = "An asynchronous circuit breaker that is half open should pass only one call until it closes")] + public async Task Should_Pass_Only_One_Call_And_Transition_To_Close_On_Success() + { + var breaker = ShortResetTimeoutCb(); + InterceptExceptionType(() => breaker.Instance.WithSyncCircuitBreaker(ThrowException)); + Assert.True(CheckLatch(breaker.HalfOpenLatch)); + + var task1 = breaker.Instance.WithCircuitBreaker(() => DelayedSayTest(TimeSpan.FromSeconds(0.1))); + var task2 = breaker.Instance.WithCircuitBreaker(() => DelayedSayTest(TimeSpan.FromSeconds(0.1))); + var combined = Task.WhenAny(task1, task2).Unwrap(); + + // One of the 2 tasks will throw, because the circuit breaker is half open + Exception caughtException = null; + try + { + await combined; + } + catch (Exception e) + { + caughtException = e; + } + Assert.True(caughtException is OpenCircuitException); + Assert.StartsWith("Circuit breaker is half open", caughtException.Message); + + // Wait until one of task completes + await Task.Delay(TimeSpan.FromSeconds(0.25)); + Assert.True(CheckLatch(breaker.ClosedLatch)); + + // We don't know which one of the task got faulted + string result = null; + if (task1.IsCompleted && !task1.IsFaulted) + result = task1.Result; + else if (task2.IsCompleted && !task2.IsFaulted) + result = task2.Result; + + Assert.Equal(SayTest(), result); + } + + [Fact(DisplayName = "A synchronous circuit breaker that is half open should pass call and transition to open on exception")] public void Should_Pass_Call_And_Transition_To_Open_On_Exception( ) { @@ -239,6 +280,7 @@ public void Should_Increment_Failure_Count_When_Call_Times_Out( ) Assert.True( CheckLatch( breaker.OpenLatch ) ); Assert.Equal( 1, breaker.Instance.CurrentFailureCount ); + Assert.True(breaker.Instance.LastCaughtException is TimeoutException); } } @@ -320,6 +362,12 @@ public Task Delay( TimeSpan toDelay, CancellationToken? token ) return token.HasValue ? Task.Delay( toDelay, token.Value ) : Task.Delay( toDelay ); } + public async Task DelayedSayTest(TimeSpan delay) + { + await Task.Delay(delay); + return "Test"; + } + public void ThrowException( ) { throw new TestException( "Test Exception" ); @@ -340,26 +388,46 @@ public bool InterceptExceptionType( Action action ) where T : Exception } catch ( Exception ex ) { - var aggregate = ex as AggregateException; - if ( aggregate != null ) + if (ex is AggregateException aggregate) { - // ReSharper disable once UnusedVariable - foreach ( var temp in aggregate.InnerExceptions.Select( innerException => innerException as T ).Where( temp => temp == null ) ) + foreach (var temp in aggregate + .InnerExceptions + .Where(t => !(t is T))) { throw; } - } - else + } else if (!(ex is T)) { - var temp = ex as T; + throw; + } + } + return true; + } - if ( temp == null ) + public async Task InterceptExceptionTypeAsync(Task action) where T : Exception + { + try + { + await action; + return false; + } + catch (Exception ex) + { + if (ex is AggregateException aggregate) + { + // ReSharper disable once UnusedVariable + foreach (var temp in aggregate + .InnerExceptions + .Where(t => !(t is T))) { throw; } } - + else if (!(ex is T)) + { + throw; + } } return true; } diff --git a/src/core/Akka/Pattern/CircuitBreaker.cs b/src/core/Akka/Pattern/CircuitBreaker.cs index ddc6aba8362..016353e9512 100644 --- a/src/core/Akka/Pattern/CircuitBreaker.cs +++ b/src/core/Akka/Pattern/CircuitBreaker.cs @@ -132,6 +132,9 @@ public long CurrentFailureCount get { return Closed.Current; } } + public Exception LastCaughtException { get; private set; } + + /// /// Wraps invocation of asynchronous calls that need to be protected /// @@ -197,12 +200,16 @@ public T WithSyncCircuitBreaker(Func body) /// public void Succeed() => _currentState.CallSucceeds(); + internal void OnSuccess() => LastCaughtException = null; + /// /// Mark a failed call through CircuitBreaker. Sometimes the callee of CircuitBreaker sends back a message to the /// caller Actor. In such a case, it is convenient to mark a failed call instead of using Future /// via /// - public void Fail() => _currentState.CallFails(); + public void Fail() => _currentState.CallFails(new UserCalledFailException()); + + internal void OnFail(Exception cause) => LastCaughtException = cause; /// /// Return true if the internal state is Closed. WARNING: It is a "power API" call which you should use with care. diff --git a/src/core/Akka/Pattern/CircuitBreakerState.cs b/src/core/Akka/Pattern/CircuitBreakerState.cs index 53064bce917..9ca5470c186 100644 --- a/src/core/Akka/Pattern/CircuitBreakerState.cs +++ b/src/core/Akka/Pattern/CircuitBreakerState.cs @@ -6,6 +6,7 @@ //----------------------------------------------------------------------- using System; +using System.Diagnostics; using System.Globalization; using System.Threading.Tasks; using Akka.Util; @@ -37,9 +38,9 @@ public Open(CircuitBreaker breaker) /// N/A /// This exception is thrown automatically since the circuit is open. /// N/A - public override async Task Invoke(Func> body) + public override Task Invoke(Func> body) { - throw new OpenCircuitException(); + throw new OpenCircuitException(_breaker.LastCaughtException); } /// @@ -48,16 +49,19 @@ public override async Task Invoke(Func> body) /// N/A /// This exception is thrown automatically since the circuit is open. /// N/A - public override async Task Invoke(Func body) + public override Task Invoke(Func body) { - throw new OpenCircuitException(); + throw new OpenCircuitException(_breaker.LastCaughtException); } /// /// No-op for open, calls are never executed so cannot succeed or fail /// - protected internal override void CallFails() + protected internal override void CallFails(Exception cause) { + // This is a no-op, but CallFails() can be called from CircuitBreaker + // (The function summary is a lie) + Debug.WriteLine($"Ignoring calls to [CallFails()] because {nameof(CircuitBreaker)} is in open state. Exception cause was: {cause}"); } /// @@ -65,6 +69,9 @@ protected internal override void CallFails() /// protected internal override void CallSucceeds() { + // This is a no-op, but CallSucceeds() can be called from CircuitBreaker + // (The function summary is a lie) + Debug.WriteLine($"Ignoring calls to [CallSucceeds()] because {nameof(CircuitBreaker)} is in open state."); } /// @@ -113,7 +120,7 @@ public override async Task Invoke(Func> body) { if (!_lock.CompareAndSet(true, false)) { - throw new OpenCircuitException(); + throw new OpenCircuitException("Circuit breaker is half open, only one call is allowed; this call is failing fast.", _breaker.LastCaughtException); } return await CallThrough(body); } @@ -129,7 +136,7 @@ public override async Task Invoke(Func body) { if (!_lock.CompareAndSet(true, false)) { - throw new OpenCircuitException(); + throw new OpenCircuitException("Circuit breaker is half open, only one call is allowed; this call is failing fast.", _breaker.LastCaughtException); } await CallThrough(body); } @@ -137,8 +144,9 @@ public override async Task Invoke(Func body) /// /// Reopen breaker on failed call. /// - protected internal override void CallFails() + protected internal override void CallFails(Exception cause) { + _breaker.OnFail(cause); _breaker.TripBreaker(this); } @@ -147,6 +155,7 @@ protected internal override void CallFails() /// protected internal override void CallSucceeds() { + _breaker.OnSuccess(); _breaker.ResetBreaker(); } @@ -210,8 +219,9 @@ public override Task Invoke(Func body) /// On failed call, the failure count is incremented. The count is checked against the configured maxFailures, and /// the breaker is tripped if we have reached maxFailures. /// - protected internal override void CallFails() + protected internal override void CallFails(Exception cause) { + _breaker.OnFail(cause); if (IncrementAndGet() == _breaker.MaxFailures) { _breaker.TripBreaker(this); @@ -223,6 +233,7 @@ protected internal override void CallFails() /// protected internal override void CallSucceeds() { + _breaker.OnSuccess(); Reset(); } diff --git a/src/core/Akka/Pattern/OpenCircuitException.cs b/src/core/Akka/Pattern/OpenCircuitException.cs index cc687870096..6ca30b78863 100644 --- a/src/core/Akka/Pattern/OpenCircuitException.cs +++ b/src/core/Akka/Pattern/OpenCircuitException.cs @@ -21,6 +21,11 @@ public class OpenCircuitException : AkkaException /// public OpenCircuitException() : base("Circuit Breaker is open; calls are failing fast") { } + public OpenCircuitException(Exception cause) + : base("Circuit Breaker is open; calls are failing fast", cause) + { + } + /// /// Initializes a new instance of the class. /// diff --git a/src/core/Akka/Pattern/UserCalledFailException.cs b/src/core/Akka/Pattern/UserCalledFailException.cs new file mode 100644 index 00000000000..1d2dfd841cf --- /dev/null +++ b/src/core/Akka/Pattern/UserCalledFailException.cs @@ -0,0 +1,13 @@ +using System; +using System.Collections.Generic; +using System.Text; +using Akka.Actor; + +namespace Akka.Pattern +{ + public class UserCalledFailException : AkkaException + { + public UserCalledFailException() : base($"User code caused [{nameof(CircuitBreaker)}] to fail because it calls the [{nameof(CircuitBreaker.Fail)}()] method.") + { } + } +} diff --git a/src/core/Akka/Util/Internal/AtomicState.cs b/src/core/Akka/Util/Internal/AtomicState.cs index fc6673e82b1..87f9d2d92d8 100644 --- a/src/core/Akka/Util/Internal/AtomicState.cs +++ b/src/core/Akka/Util/Internal/AtomicState.cs @@ -96,12 +96,16 @@ public async Task CallThrough(Func> task) capturedException = ExceptionDispatchInfo.Capture(ex); } - bool throwException = capturedException != null; - if (throwException || DateTime.UtcNow.CompareTo(deadline) >= 0) + // Need to make sure that timeouts are reported as timeouts + if (capturedException != null) { - CallFails(); - if (throwException) - capturedException.Throw(); + CallFails(capturedException.SourceException); + capturedException.Throw(); + } + else if (DateTime.UtcNow.CompareTo(deadline) >= 0) + { + CallFails(new TimeoutException( + $"Execution did not complete within the time allotted {_callTimeout.TotalMilliseconds} ms")); } else { @@ -135,11 +139,16 @@ public async Task CallThrough(Func task) capturedException = ExceptionDispatchInfo.Capture(ex); } - bool throwException = capturedException != null; - if (throwException || DateTime.UtcNow.CompareTo(deadline) >= 0) + // Need to make sure that timeouts are reported as timeouts + if (capturedException != null) + { + CallFails(capturedException?.SourceException); + capturedException.Throw(); + } + else if (DateTime.UtcNow.CompareTo(deadline) >= 0) { - CallFails(); - if (throwException) capturedException.Throw(); + CallFails(new TimeoutException( + $"Execution did not complete within the time allotted {_callTimeout.TotalMilliseconds} ms")); } else { @@ -167,7 +176,7 @@ public async Task CallThrough(Func task) /// /// Invoked when call fails /// - protected internal abstract void CallFails(); + protected internal abstract void CallFails(Exception cause); /// /// Invoked when call succeeds diff --git a/src/xunit.runner.json b/src/xunit.runner.json index cafdde412f1..4a73b1e56a4 100644 --- a/src/xunit.runner.json +++ b/src/xunit.runner.json @@ -1,4 +1,6 @@ { - "$schema": "https://xunit.github.io/schema/current/xunit.runner.schema.json", - "parallelizeTestCollections": false + "$schema": "https://xunit.github.io/schema/current/xunit.runner.schema.json", + "longRunningTestSeconds": 60, + "parallelizeAssembly": false, + "parallelizeTestCollections": false } \ No newline at end of file