From 208685a956e5020fb2ddcf0391d37778cd6fa480 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 9 Aug 2022 15:37:01 +0100 Subject: [PATCH 01/43] Add DynamoStore support --- .../Watchdog.Integration/CosmosConnector.fs | 31 ------------------- .../Watchdog.Integration/DynamoConnections.fs | 20 ++++++++++++ 2 files changed, 20 insertions(+), 31 deletions(-) delete mode 100644 equinox-shipping/Watchdog.Integration/CosmosConnector.fs create mode 100644 equinox-shipping/Watchdog.Integration/DynamoConnections.fs diff --git a/equinox-shipping/Watchdog.Integration/CosmosConnector.fs b/equinox-shipping/Watchdog.Integration/CosmosConnector.fs deleted file mode 100644 index dfa3a80ef..000000000 --- a/equinox-shipping/Watchdog.Integration/CosmosConnector.fs +++ /dev/null @@ -1,31 +0,0 @@ -namespace Shipping.Watchdog.Integration - -open Shipping.Infrastructure - -type CosmosConnector(connectionString, databaseId, containerId) = - - let discovery = connectionString |> Equinox.CosmosStore.Discovery.ConnectionString - let timeout = 5. |> System.TimeSpan.FromSeconds - let retries, maxRetryWaitTime = 5, 5. |> System.TimeSpan.FromSeconds - let connectionMode = Microsoft.Azure.Cosmos.ConnectionMode.Gateway - let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, connectionMode) - let leaseContainerId = containerId + "-aux" - let connectLeases () = connector.CreateUninitialized(databaseId, leaseContainerId) - - new (c : Shipping.Watchdog.SourceArgs.Configuration) = CosmosConnector(c.CosmosConnection, c.CosmosDatabase, c.CosmosContainer) - new () = CosmosConnector(Shipping.Watchdog.SourceArgs.Configuration EnvVar.tryGet) - - member val DumpStats = Equinox.CosmosStore.Core.Log.InternalMetrics.dump - member private _.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(databaseId, containerId) - member _.ConnectLeases() = - let leases : Microsoft.Azure.Cosmos.Container = connectLeases() - // Just as ConnectStoreAndMonitored references the global Logger, so do we -> see SerilogLogFixture, _dummy - Serilog.Log.Information("ChangeFeed Leases Database {db} Container {container}", leases.Database.Id, leases.Id) - leases - member x.Connect() = - let client, monitored = x.ConnectStoreAndMonitored() - let storeCfg = - let context = client |> CosmosStoreContext.create - let cache = Equinox.Cache("Tests", sizeMb = 10) - Shipping.Domain.Config.Store.Cosmos (context, cache) - storeCfg, monitored diff --git a/equinox-shipping/Watchdog.Integration/DynamoConnections.fs b/equinox-shipping/Watchdog.Integration/DynamoConnections.fs new file mode 100644 index 000000000..e0cf16258 --- /dev/null +++ b/equinox-shipping/Watchdog.Integration/DynamoConnections.fs @@ -0,0 +1,20 @@ +namespace Shipping.Watchdog.Integration + +open Equinox.DynamoStore +open Shipping.Watchdog.Infrastructure + +type DynamoConnections(serviceUrl, accessKey, secretKey, table, indexTable) = + let requestTimeout, retries = System.TimeSpan.FromSeconds 5., 5 + let connector = DynamoStoreConnector(serviceUrl, accessKey, secretKey, requestTimeout, retries) + let client = connector.CreateClient() + let storeClient = DynamoStoreClient(client, table) + let storeContext = storeClient |> DynamoStoreContext.create + let cache = Equinox.Cache ("Tests", sizeMb = 10) + + new (c : Shipping.Watchdog.Program.Configuration) = DynamoConnections(c.DynamoServiceUrl, c.DynamoAccessKey, c.DynamoSecretKey, c.DynamoTable, c.DynamoIndexTable) + new () = DynamoConnections(Shipping.Watchdog.Program.Configuration EnvVar.tryGet) + + member val IndexClient = DynamoStoreClient(client, match indexTable with Some x -> x | None -> table + "-index") + member val StoreContext = storeContext + member _.DynamoStore = (storeContext, cache) + member _.Store = Shipping.Domain.Config.Store.Dynamo (storeContext, cache) From e71b753fabc6765e00cbe7c466d97ba68ade1b87 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Sep 2022 13:54:32 +0100 Subject: [PATCH 02/43] Port eqxPatterns to v4 --- .../Domain.Tests/Domain.Tests.fsproj | 12 +++---- equinox-patterns/Domain/Config.fs | 12 ++++--- equinox-patterns/Domain/Domain.fsproj | 6 ++-- equinox-patterns/Domain/ListEpoch.fs | 35 +++++++------------ equinox-patterns/Domain/ListSeries.fs | 25 +++++-------- equinox-patterns/Domain/Period.fs | 28 +++++++-------- 6 files changed, 49 insertions(+), 69 deletions(-) diff --git a/equinox-patterns/Domain.Tests/Domain.Tests.fsproj b/equinox-patterns/Domain.Tests/Domain.Tests.fsproj index 3bbea1578..c37687f3a 100644 --- a/equinox-patterns/Domain.Tests/Domain.Tests.fsproj +++ b/equinox-patterns/Domain.Tests/Domain.Tests.fsproj @@ -1,10 +1,8 @@ - net5.0 + net6.0 5 - false - Library @@ -13,12 +11,12 @@ - + - + - - + + diff --git a/equinox-patterns/Domain/Config.fs b/equinox-patterns/Domain/Config.fs index 17bd6fbaa..8dbc31f8a 100644 --- a/equinox-patterns/Domain/Config.fs +++ b/equinox-patterns/Domain/Config.fs @@ -1,20 +1,22 @@ module Patterns.Domain.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let resolveDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + Codec.Create<'t>(options = defaultOptions) + let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) module Memory = - let create codec initial fold store = - Equinox.MemoryStore.MemoryStoreCategory(store, codec, fold, initial) + let create codec initial fold store : Equinox.Category<_, _, _> = + Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Deflate.EncodeUncompressed codec, fold, initial) module Cosmos = diff --git a/equinox-patterns/Domain/Domain.fsproj b/equinox-patterns/Domain/Domain.fsproj index b623149e4..05be57bca 100644 --- a/equinox-patterns/Domain/Domain.fsproj +++ b/equinox-patterns/Domain/Domain.fsproj @@ -16,9 +16,9 @@ - - - + + + diff --git a/equinox-patterns/Domain/ListEpoch.fs b/equinox-patterns/Domain/ListEpoch.fs index 90b8d8aa4..9fbc0889e 100644 --- a/equinox-patterns/Domain/ListEpoch.fs +++ b/equinox-patterns/Domain/ListEpoch.fs @@ -1,7 +1,7 @@ module Patterns.Domain.ListEpoch let [] Category = "ListEpoch" -let streamName = ListEpochId.toString >> FsCodec.StreamName.create Category +let streamName id = struct (Category, ListEpochId.toString id) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care [] @@ -12,7 +12,7 @@ module Events = | Closed | Snapshotted of {| ids : ItemId[]; closed : bool |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement module Fold = @@ -49,18 +49,16 @@ let decide shouldClose candidateIds = function // NOTE see feedSource for example of separating Service logic into Ingestion and Read Services in order to vary the folding and/or state held type Service internal ( shouldClose : ItemId[] -> ItemId[] -> bool, // let outer layers decide whether ingestion should trigger closing of the batch - resolve_ : Equinox.ResolveOption option -> ListEpochId -> Equinox.Decider) = - let resolve = resolve_ None - let resolveStale = resolve_ (Some Equinox.AllowStale) + resolve : ListEpochId -> Equinox.Decider) = /// Ingest the supplied items. Yields relevant elements of the post-state to enable generation of stats /// and facilitate deduplication of incoming items in order to avoid null store round-trips where possible member _.Ingest(epochId, items) : Async> = - let decider = resolveStale epochId - /// NOTE decider which will initially transact against potentially stale cached state, which will trigger a - /// resync if another writer has gotten in before us. This is a conscious decision in this instance; the bulk - /// of writes are presumed to be coming from within this same process - decider.Transact(decide shouldClose items) + let decider = resolve epochId + // NOTE decider which will initially transact against potentially stale cached state, which will trigger a + // resync if another writer has gotten in before us. This is a conscious decision in this instance; the bulk + // of writes are presumed to be coming from within this same process + decider.Transact(decide shouldClose items, load = Equinox.AllowStale) /// Returns all the items currently held in the stream (Not using AllowStale on the assumption this needs to see updates from other apps) member _.Read epochId : Async = @@ -69,16 +67,9 @@ type Service internal module Config = - let private create_ shouldClose resolve = Service(shouldClose, resolve) - let private resolveStream opt = function - | Config.Store.Memory store -> - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - fun sn -> cat.Resolve(sn, ?option = opt) - | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - fun sn -> cat.Resolve(sn, ?option = opt) - let private resolveDecider store opt = streamName >> resolveStream opt store >> Config.createDecider - let private create__ shouldClose = resolveDecider >> create_ shouldClose - let create maxItemsPerEpoch = + let private (|Category|) = function + | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create maxItemsPerEpoch (Category cat) = let shouldClose candidateItems currentItems = Array.length currentItems + Array.length candidateItems >= maxItemsPerEpoch - create__ shouldClose + Service(shouldClose, streamName >> Config.resolveDecider cat) diff --git a/equinox-patterns/Domain/ListSeries.fs b/equinox-patterns/Domain/ListSeries.fs index 43b38f7c4..4e01db3e6 100644 --- a/equinox-patterns/Domain/ListSeries.fs +++ b/equinox-patterns/Domain/ListSeries.fs @@ -6,7 +6,7 @@ module Patterns.Domain.ListSeries let [] Category = "ListSeries" // TOCONSIDER: if you need multiple lists series/epochs in a single system, the Series and Epoch streams should have a SeriesId in the stream name // See also the implementation in the feedSource template, where the Series aggregate also functions as an index of series held in the system -let streamName () = ListSeriesId.wellKnownId |> ListSeriesId.toString |> FsCodec.StreamName.create Category +let streamName () = struct (Category, ListSeriesId.toString ListSeriesId.wellKnownId) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care [] @@ -16,7 +16,7 @@ module Events = | Started of {| epochId : ListEpochId |} | Snapshotted of {| active : ListEpochId |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement module Fold = @@ -34,10 +34,7 @@ let interpret epochId (state : Fold.State) = [if state |> Option.forall (fun cur -> cur < epochId) && epochId >= ListEpochId.initial then yield Events.Started {| epochId = epochId |}] -type Service internal (resolve_ : Equinox.ResolveOption option -> unit -> Equinox.Decider) = - - let resolve = resolve_ None - let resolveStale = resolve_ (Some Equinox.AllowStale) +type Service internal (resolve : unit -> Equinox.Decider) = /// Determines the current active epoch /// Uses cached values as epoch transitions are rare, and caller needs to deal with the inherent race condition in any case @@ -48,17 +45,13 @@ type Service internal (resolve_ : Equinox.ResolveOption option -> unit -> Equino /// Mark specified `epochId` as live for the purposes of ingesting /// Writers are expected to react to having writes to an epoch denied (due to it being Closed) by anointing a successor via this member _.MarkIngestionEpochId epochId : Async = - let decider = resolveStale () - decider.Transact(interpret epochId) + let decider = resolve () + decider.Transact(interpret epochId, load = Equinox.AllowStale) module Config = - let private resolveStream opt = function - | Config.Store.Memory store -> - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - fun sn -> cat.Resolve(sn, ?option = opt) + let private (|Category|) = function + | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - fun sn -> cat.Resolve(sn, ?option = opt) - let private resolveDecider store opt = streamName >> resolveStream opt store >> Config.createDecider - let create = resolveDecider >> Service + Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create (Category cat) = Service(streamName >> Config.resolveDecider cat) diff --git a/equinox-patterns/Domain/Period.fs b/equinox-patterns/Domain/Period.fs index 3f454072e..561b6a37d 100644 --- a/equinox-patterns/Domain/Period.fs +++ b/equinox-patterns/Domain/Period.fs @@ -6,7 +6,7 @@ module Patterns.Domain.Period let [] Category = "Period" -let streamName periodId = FsCodec.StreamName.create Category (PeriodId.toString periodId) +let streamName periodId = struct (Category, PeriodId.toString periodId) // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -18,7 +18,7 @@ module Events = | Added of ItemIds | CarriedForward of Balance interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement module Fold = @@ -89,7 +89,7 @@ let decideIngestWithCarryForward rules req s : Async * Eve } /// Manages Application of Requests to the Period's stream, including closing preceding periods as appropriate -type Service internal (resolve : Equinox.ResolveOption option -> PeriodId -> Equinox.Decider) = +type Service internal (resolve : PeriodId -> Equinox.Decider) = let calcBalance state = let createEventsBalance items : Events.Balance = { items = items } @@ -103,11 +103,11 @@ type Service internal (resolve : Equinox.ResolveOption option -> PeriodId -> Equ { getIncomingBalance = fun () -> close periodId decideIngestion = fun () _state -> (), (), [] decideCarryForward = fun () -> genBalance } // always close - let decider = resolve (Some Equinox.AllowStale) periodId + let decider = resolve periodId let decide' s = async { let! r, es = decideIngestWithCarryForward rules () s return Option.get r.carryForward, es } - decider.Transact(decide') + decider.TransactAsync(decide', load = Equinox.AllowStale) /// Runs the decision function on the specified Period, closing and bringing forward balances from preceding Periods if necessary let tryTransact periodId getIncoming (decide : 'request -> Fold.State -> 'request * 'result * Events.Event list) request shouldClose : Async> = @@ -115,8 +115,8 @@ type Service internal (resolve : Equinox.ResolveOption option -> PeriodId -> Equ { getIncomingBalance = getIncoming decideIngestion = fun request state -> let residual, result, events = decide request state in residual, result, events decideCarryForward = fun res state -> async { if shouldClose res then return! genBalance state else return None } } // also close, if we should - let decider = resolve (Some Equinox.AllowStale) periodId - decider.Transact(decideIngestWithCarryForward rules request) + let decider = resolve periodId + decider.TransactAsync(decideIngestWithCarryForward rules request, load = Equinox.AllowStale) /// Runs the decision function on the specified Period, closing and bringing forward balances from preceding Periods if necessary /// Processing completes when `decide` yields None for the residual of the 'request @@ -135,19 +135,15 @@ type Service internal (resolve : Equinox.ResolveOption option -> PeriodId -> Equ /// Exposes the full state to a reader (which is appropriate for a demo but is an anti-pattern in the general case) /// NOTE unlike for the Transact method, we do not supply ResolveOption.AllowStale, which means we'll see updates from other instances member _.Read periodId = - let decider = resolve None periodId + let decider = resolve periodId decider.Query id module Config = - let private resolveStream opt = function - | Config.Store.Memory store -> - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - fun sn -> cat.Resolve(sn, ?option = opt) + let private (|Category|) = function + | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store | Config.Store.Cosmos (context, cache) -> // Not using snapshots, on the basis that the writes are all coming from this process, so the cache will be sufficient // to make reads cheap enough, with the benefit of writes being cheaper as you're not paying to maintain the snapshot - let cat = Config.Cosmos.createUnoptimized Events.codec Fold.initial Fold.fold (context, cache) - fun sn -> cat.Resolve(sn, ?option = opt) - let private resolveDecider store opt = streamName >> resolveStream opt store >> Config.createDecider - let create = resolveDecider >> Service + Config.Cosmos.createUnoptimized Events.codecJe Fold.initial Fold.fold (context, cache) + let create (Category cat) = Service(streamName >> Config.resolveDecider cat) From e7581518cfe1f226584f0b379426aac8245fc4c0 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 9 Sep 2022 14:43:40 +0100 Subject: [PATCH 03/43] Port equinox-web, adding Dynamo --- equinox-web/.template.config/template.json | 8 ++- equinox-web/Domain/Aggregate.fs | 24 ++++---- equinox-web/Domain/Config.fs | 41 +++++++++--- equinox-web/Domain/Domain.fsproj | 13 ++-- equinox-web/Domain/Infrastructure.fs | 9 --- equinox-web/Domain/Todo.fs | 24 ++++---- equinox-web/Web/Program.fs | 3 + equinox-web/Web/Startup.fs | 65 +++++++++++++------- equinox-web/Web/Web.fsproj | 8 +-- tests/Equinox.Templates.Tests/DotnetBuild.fs | 2 +- 10 files changed, 118 insertions(+), 79 deletions(-) diff --git a/equinox-web/.template.config/template.json b/equinox-web/.template.config/template.json index 86dc5361b..8c7f325cf 100644 --- a/equinox-web/.template.config/template.json +++ b/equinox-web/.template.config/template.json @@ -47,7 +47,13 @@ "type": "parameter", "dataType": "bool", "defaultValue": "false", - "description": "Store Events in an Azure CosmosDb Account" + "description": "Store Events in an Azure CosmosDb Container" + }, + "dynamo": { + "type": "parameter", + "dataType": "bool", + "defaultValue": "false", + "description": "Store Events in an AWS Dynamo Table" }, "cosmosSimulator": { "type": "parameter", diff --git a/equinox-web/Domain/Aggregate.fs b/equinox-web/Domain/Aggregate.fs index 8d2759c2b..fd6a014a6 100644 --- a/equinox-web/Domain/Aggregate.fs +++ b/equinox-web/Domain/Aggregate.fs @@ -1,7 +1,7 @@ module TodoBackendTemplate.Aggregate let [] Category = "Aggregate" -let streamName (id: string) = FsCodec.StreamName.create Category id +let streamName (id: string) = struct (Category, id) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -12,7 +12,7 @@ module Events = | Happened | Snapshotted of SnapshottedData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement module Fold = @@ -45,22 +45,22 @@ type Service internal (resolve : string -> Equinox.Decider - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - cat.Resolve + Config.Memory.create Events.codec Fold.initial Fold.fold store #endif //#endif //#if cosmos | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve + Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) +//#endif +//#if dynamo + | Config.Store.Dynamo (context, cache) -> + Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif //#if eventStore | Config.Store.Esdb (context, cache) -> - let cat = Config.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve + Config.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let create (Category cat) = Service(streamName >> Config.resolveDecider cat) diff --git a/equinox-web/Domain/Config.fs b/equinox-web/Domain/Config.fs index 23709c5d6..8c00043eb 100644 --- a/equinox-web/Domain/Config.fs +++ b/equinox-web/Domain/Config.fs @@ -1,20 +1,22 @@ module TodoBackendTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let resolveDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + Codec.Create<'t>(options = defaultOptions) + let genJsonElement<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) -#if (memoryStore || (!cosmos && !eventStore)) +#if (memoryStore || (!cosmos && !dynamo && !eventStore)) module Memory = - let create _codec initial fold store = + let create _codec initial fold store : Equinox.Category<_, _, _> = // While the actual prod codec can be used, the Box codec allows one to stub out the decoding on the basis that // nothing will be proved beyond what a complete roundtripping test per `module Aggregate` would already cover Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Box.Codec.Create(), fold, initial) @@ -35,18 +37,34 @@ module Cosmos = let accessStrategy = Equinox.CosmosStore.AccessStrategy.RollingState toSnapshot createCached codec initial fold accessStrategy (context, cache) +//#endif +//#if dynamo +module Dynamo = + + let private createCached codec initial fold accessStrategy (context, cache) = + let cacheStrategy = Equinox.DynamoStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.DynamoStore.DynamoStoreCategory(context, FsCodec.Deflate.EncodeUncompressed codec, fold, initial, cacheStrategy, accessStrategy) + + let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = + let accessStrategy = Equinox.DynamoStore.AccessStrategy.Snapshot (isOrigin, toSnapshot) + createCached codec initial fold accessStrategy (context, cache) + + let createRollingState codec initial fold toSnapshot (context, cache) = + let accessStrategy = Equinox.DynamoStore.AccessStrategy.RollingState toSnapshot + createCached codec initial fold accessStrategy (context, cache) + //#endif //#if eventStore module Esdb = let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = - let cacheStrategy = Equinox.EventStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) - let accessStrategy = Equinox.EventStore.AccessStrategy.RollingSnapshots (isOrigin, toSnapshot) - Equinox.EventStore.EventStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) + let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + let accessStrategy = Equinox.EventStoreDb.AccessStrategy.RollingSnapshots (isOrigin, toSnapshot) + Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) //#endif [] -#if (memoryStore || (!cosmos && !eventStore)) +#if (memoryStore || (!cosmos && !dynamo && !eventStore)) type Store<'t> = | Memory of Equinox.MemoryStore.VolatileStore<'t> #else @@ -55,6 +73,9 @@ type Store = //#if cosmos | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache //#endif +//#if dynamo + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache +//#endif //#if eventStore - | Esdb of Equinox.EventStore.EventStoreContext * Equinox.Core.ICache + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Core.ICache //#endif diff --git a/equinox-web/Domain/Domain.fsproj b/equinox-web/Domain/Domain.fsproj index 0c8fa5636..324e36802 100644 --- a/equinox-web/Domain/Domain.fsproj +++ b/equinox-web/Domain/Domain.fsproj @@ -1,10 +1,8 @@  - netstandard2.1 + net6.0 5 - false - true @@ -19,10 +17,11 @@ - - - - + + + + + diff --git a/equinox-web/Domain/Infrastructure.fs b/equinox-web/Domain/Infrastructure.fs index 5c2b355c9..3a95ea9e6 100644 --- a/equinox-web/Domain/Infrastructure.fs +++ b/equinox-web/Domain/Infrastructure.fs @@ -11,12 +11,3 @@ type ClientId = Guid and [] clientId module ClientId = let toString (value : ClientId) : string = Guid.toStringN %value - -[] -module DeciderExtensions = - - type Equinox.Decider<'e, 's> with - - // see https://github.com/jet/equinox/pull/320 - member x.Transact(decide, mapResult) = - x.TransactEx((fun c -> async { let events = decide c.State in return (), events }), fun () c -> mapResult c.State) diff --git a/equinox-web/Domain/Todo.fs b/equinox-web/Domain/Todo.fs index 9d1be7bbe..870a81b8d 100644 --- a/equinox-web/Domain/Todo.fs +++ b/equinox-web/Domain/Todo.fs @@ -2,7 +2,7 @@ let [] Category = "Todos" /// Maps a ClientId to the StreamName where data for that client will be held -let streamName (clientId: ClientId) = FsCodec.StreamName.create Category (ClientId.toString clientId) +let streamName (clientId: ClientId) = struct (Category, ClientId.toString clientId) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -22,7 +22,7 @@ module Events = /// For EventStore, AccessStrategy.RollingSnapshots embeds these events every `batchSize` events | Snapshotted of SnapshotData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJsonElement /// Types and mapping logic used maintain relevant State based on Events observed on the Todo List Stream module Fold = @@ -131,21 +131,21 @@ type Service internal (resolve : ClientId -> Equinox.Decider - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - cat.Resolve + Config.Memory.create Events.codec Fold.initial Fold.fold store #endif //#if cosmos | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve + Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) +//#endif +//#if dynamo + | Config.Store.Dynamo (context, cache) -> + Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif //#if eventStore | Config.Store.Esdb (context, cache) -> - let cat = Config.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve + Config.Esdb.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) //#endif - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let create store = Service(fun id -> Config.resolveDecider (resolveCategory store) (streamName id)) diff --git a/equinox-web/Web/Program.fs b/equinox-web/Web/Program.fs index 48fdd9e23..eea2edf7c 100644 --- a/equinox-web/Web/Program.fs +++ b/equinox-web/Web/Program.fs @@ -15,6 +15,9 @@ type Logging() = .MinimumLevel.Override("Microsoft.AspNetCore", Serilog.Events.LogEventLevel.Warning) #if cosmos .WriteTo.Sink(Equinox.CosmosStore.Prometheus.LogSink(customTags)) +#endif +#if dynamo + .WriteTo.Sink(Equinox.DynamoStore.Prometheus.LogSink(customTags)) #endif .Enrich.FromLogContext() .WriteTo.Console() diff --git a/equinox-web/Web/Startup.fs b/equinox-web/Web/Startup.fs index 6b6197aa9..be1c270f8 100644 --- a/equinox-web/Web/Startup.fs +++ b/equinox-web/Web/Startup.fs @@ -15,17 +15,20 @@ module Storage = /// Specifies the store to be used, together with any relevant custom parameters [] type Store = -//#if (memoryStore || (!cosmos && !eventStore)) +//#if (memoryStore || (!cosmos && !dynamo && !eventStore)) | Memory //#endif //#if eventStore - | Esdb of host: string * username: string * password: string * cacheMb: int + | Esdb of connectionString: string * cacheMb: int //#endif //#if cosmos | Cosmos of mode: Microsoft.Azure.Cosmos.ConnectionMode * connectionStringWithUriAndKey: string * database: string * container: string * cacheMb: int //#endif +//#if dynamo + | Dynamo of region : string * tableName: string * cacheMb: int +//#endif -//#if (memoryStore || (!cosmos && !eventStore)) +//#if (memoryStore || (!cosmos && !dynamo && !eventStore)) /// MemoryStore 'wiring', uses Equinox.MemoryStore nuget package module private Memory = open Equinox.MemoryStore @@ -36,11 +39,10 @@ module Storage = //#if eventStore /// EventStore wiring, uses Equinox.EventStore nuget package module private ES = - open Equinox.EventStore - let connect host username password = - let log = Logger.SerilogNormal (Log.ForContext()) - let c = Connector(username, password, reqTimeout=TimeSpan.FromSeconds 5., reqRetries=1, log=log) - let conn = c.Establish ("Twin", Discovery.GossipDns host, ConnectionStrategy.ClusterTwinPreferSlaveReads) |> Async.RunSynchronously + open Equinox.EventStoreDb + let connect connectionString = + let c = EventStoreConnector(reqTimeout=TimeSpan.FromSeconds 5., reqRetries=1) + let conn = c.Establish("Twin", Discovery.ConnectionString connectionString, ConnectionStrategy.ClusterTwinPreferSlaveReads) EventStoreContext(conn, BatchingPolicy(maxBatchSize=500)) //#endif @@ -59,18 +61,34 @@ module Storage = let c = CosmosStoreConnector(discovery, operationTimeout, maxRetryForThrottling, maxRetryWait, mode) CosmosStoreClient.Connect(c.CreateAndInitialize, databaseId, containerId) |> Async.RunSynchronously |> CosmosStoreContext.create +//#endif +//#if dynamo + /// DynamoDB wiring, uses Equinox.DynamoStore nuget package + module private Dynamo = + open Equinox.DynamoStore + module DynamoStoreContext = + + /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations + let create (storeClient : DynamoStoreClient) = + let maxEvents = 256 + DynamoStoreContext(storeClient, tipMaxEvents = maxEvents) + + let connect (region, table) (timeout, retries) = + let c = DynamoStoreConnector(region, timeout, retries) + DynamoStoreClient.Connect(c.CreateClient(), table) |> Async.RunSynchronously |> DynamoStoreContext.create + //#endif /// Creates and/or connects to a specific store as dictated by the specified config let connect = function -//#if (memoryStore || (!cosmos && !eventStore)) +//#if (memoryStore || (!cosmos && !dynamo && !eventStore)) | Store.Memory -> let store = Memory.connect() Config.Store.Memory store //#endif //#if eventStore - | Store.Esdb (host, user, pass, cache) -> + | Store.Esdb (connectionString, cache) -> let cache = Equinox.Cache("ES", sizeMb = cache) - let conn = ES.connect host user pass + let conn = ES.connect connectionString Config.Store.Esdb (conn, cache) //#endif //#if cosmos @@ -81,6 +99,14 @@ module Storage = let context = Cosmos.connect (mode, Equinox.CosmosStore.Discovery.ConnectionString connectionString, database, container) (timeout, retriesOn429Throttling, timeout) Config.Store.Cosmos (context, cache) //#endif +//#if dynamo + | Store.Dynamo (region, table, cache) -> + let cache = Equinox.Cache("Dynamo", sizeMb = cache) + let retries = 1 // Number of retries before failing processing when provisioned RU/s limit in CosmosDb is breached + let timeout = TimeSpan.FromSeconds 5. // Timeout applied per request, including retry attempts + let context = Dynamo.connect (region, table) (timeout, retries) + Config.Store.Dynamo (context, cache) +//#endif /// Dependency Injection wiring for services using Equinox module Services = @@ -104,7 +130,6 @@ type Startup() = member _.ConfigureServices(services: IServiceCollection) : unit = services .AddMvc() - .SetCompatibilityVersion(CompatibilityVersion.Latest) .AddJsonOptions(fun options -> FsCodec.SystemTextJson.Options.Default.Converters |> Seq.iter options.JsonSerializerOptions.Converters.Add @@ -118,15 +143,9 @@ type Startup() = //#endif //#if eventStore - // EVENTSTORE: see https://eventstore.org/ - // Requires a Commercial HA Cluster, which can be simulated by 1) installing the OSS Edition from Chocolatey 2) running it in cluster mode - - //# requires admin privilege - //cinst eventstore-oss -y # where cinst is an invocation of the Chocolatey Package Installer on Windows - //# run as a single-node cluster to allow connection logic to use cluster mode as for a commercial cluster - //& $env:ProgramData\chocolatey\bin\EventStore.ClusterNode.exe --gossip-on-single-node --discover-via-dns 0 --ext-http-port=30778 - - let storeConfig = Storage.Store.Esdb ("localhost", "admin", "changeit", cacheMb) + // EVENTSTORE: See https://github.com/jet/equinox/blob/master/docker-compose.yml for the associated docker-compose configuration + + let storeConfig = Storage.Store.Esdb ("esdb://admin:changeit@localhost:2111,localhost:2112,localhost:2113?tls=true&tlsVerifyCert=false", cacheMb) //#endif //#if cosmos @@ -154,11 +173,11 @@ type Startup() = failwithf "Event Storage subsystem requires the following Environment Variables to be specified: %s, %s, %s" connectionVar databaseVar containerVar //#endif -#if (memoryStore && !cosmos && !eventStore) +#if (memoryStore && !cosmos && !dynamo && !eventStore) let storeConfig = Storage.Store.Memory #endif -//#if (!memoryStore && !cosmos && !eventStore) +//#if (!memoryStore && !cosmos && !dynamo && !eventStore) //let storeConfig = Storage.Store.Memory //#endif diff --git a/equinox-web/Web/Web.fsproj b/equinox-web/Web/Web.fsproj index 4259a6f10..5d2330d1e 100644 --- a/equinox-web/Web/Web.fsproj +++ b/equinox-web/Web/Web.fsproj @@ -1,7 +1,7 @@  - netcoreapp3.1 + net6.0 @@ -11,11 +11,11 @@ - + - - + + diff --git a/tests/Equinox.Templates.Tests/DotnetBuild.fs b/tests/Equinox.Templates.Tests/DotnetBuild.fs index 98acd3694..836f69b75 100644 --- a/tests/Equinox.Templates.Tests/DotnetBuild.fs +++ b/tests/Equinox.Templates.Tests/DotnetBuild.fs @@ -6,7 +6,7 @@ open Xunit.Abstractions type ProProjector() as this = inherit TheoryData() - do for source in ["cosmos"; (* <-default *) "eventStore"; "sqlStreamStore"] do + do for source in ["cosmos"; (* <-default *) "dynamo"; "eventStore"; "sqlStreamStore"] do let variants = if source <> "cosmos" then [ []; ["--kafka"] ] else From 3900ddda9bff3d8e27053148f8a2836867ae4f99 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 12 Sep 2022 18:26:49 +0100 Subject: [PATCH 04/43] Port eqxweb-cs --- equinox-web-csharp/Domain/Aggregate.cs | 96 ++++++----------- equinox-web-csharp/Domain/Domain.csproj | 11 +- equinox-web-csharp/Domain/Infrastructure.cs | 58 +++------- equinox-web-csharp/Domain/Todo.cs | 107 +++++++------------ equinox-web-csharp/Web/CosmosContext.cs | 32 ++---- equinox-web-csharp/Web/EquinoxContext.cs | 32 +++--- equinox-web-csharp/Web/EventStoreContext.cs | 41 +++---- equinox-web-csharp/Web/MemoryStoreContext.cs | 17 +-- equinox-web-csharp/Web/Startup.cs | 25 +++-- equinox-web-csharp/Web/Web.csproj | 11 +- 10 files changed, 162 insertions(+), 268 deletions(-) diff --git a/equinox-web-csharp/Domain/Aggregate.cs b/equinox-web-csharp/Domain/Aggregate.cs index fad6f5e35..afaa5a1b7 100755 --- a/equinox-web-csharp/Domain/Aggregate.cs +++ b/equinox-web-csharp/Domain/Aggregate.cs @@ -1,12 +1,7 @@ -using Equinox; -using Equinox.Core; -using Microsoft.FSharp.Core; -using Newtonsoft.Json; -using Serilog; +using Microsoft.FSharp.Core; using System; using System.Collections.Generic; using System.Threading.Tasks; -using FsCodec; namespace TodoBackendTemplate { @@ -21,45 +16,37 @@ public class Happened : Event public class Snapshotted : Event { - public new bool Happened { get; set; } + public bool HasHappened { get; set; } } - static readonly JsonNetUtf8Codec Codec = new JsonNetUtf8Codec(new JsonSerializerSettings()); + static readonly SystemTextJsonUtf8Codec Codec = new(new()); - public static Event TryDecode(string et, byte[] json) - { - switch (et) + public static FSharpValueOption TryDecode(string et, byte[] json) => + et switch { - case nameof(Happened): return Codec.Decode(json); - case nameof(Snapshotted): return Codec.Decode(json); - default: return null; - } - } + nameof(Happened) => Codec.Decode(json), + nameof(Snapshotted) => Codec.Decode(json), + _ => FSharpValueOption.None + }; - public static Tuple Encode(Event e) => Tuple.Create(e.GetType().Name, Codec.Encode(e)); - public static string For(ClientId id) => StreamNameModule.create("Aggregate", id.ToString()); + public static (string, ReadOnlyMemory) Encode(Event e) => (e.GetType().Name, Codec.Encode(e)); + public static (string, string) StreamIds(ClientId id) => ("Aggregate", id.ToString()); } public class State { public bool Happened { get; set; } - internal State(bool happened) { Happened = happened; } + State(bool happened) { Happened = happened; } - public static readonly State Initial = new State(false); + public static readonly State Initial = new (false); - static void Evolve(State s, Event x) - { - switch (x) + static void Evolve(State s, Event x) => + s.Happened = x switch { - case Event.Happened e: - s.Happened = true; - break; - case Event.Snapshotted e: - s.Happened = e.Happened; - break; - default: throw new ArgumentOutOfRangeException(nameof(x), x, "invalid"); - } - } + Event.Happened e => true, + Event.Snapshotted e => e.HasHappened, + _ => throw new ArgumentOutOfRangeException(nameof(x), x, "invalid") + }; public static State Fold(State origin, IEnumerable xs) { @@ -72,7 +59,7 @@ public static State Fold(State origin, IEnumerable xs) public static bool IsOrigin(Event e) => e is Event.Snapshotted; - public static Event Snapshot(State s) => new Event.Snapshotted {Happened = s.Happened}; + public static Event Snapshot(State s) => new Event.Snapshotted {HasHappened = s.Happened}; } /// Defines the decision process which maps from the intent of the `Command` to the `Event`s that represent that decision in the Stream @@ -82,56 +69,39 @@ public class MakeItSo : Command { } - public static IEnumerable Interpret(State s, Command x) + public IEnumerable Interpret(State s) { - switch (x) + switch (this) { case MakeItSo c: if (!s.Happened) yield return new Event.Happened(); break; - default: throw new ArgumentOutOfRangeException(nameof(x), x, "invalid"); + default: throw new ArgumentOutOfRangeException("this", this, "invalid"); } } } - class Handler - { - readonly EquinoxStream _stream; - - public Handler(ILogger log, IStream stream) => - _stream = new EquinoxStream(State.Fold, log, stream); - - /// Execute `command`, syncing any events decided upon - public Task Execute(Command c) => - _stream.Execute(s => Command.Interpret(s, c)); - - /// Establish the present state of the Stream, project from that as specified by `projection` - public Task Query(Func projection) => - _stream.Query(projection); - } - - public class View - { - public bool Sorted { get; set; } - } + public record View(bool Sorted); public class Service { /// Maps a ClientId to Handler for the relevant stream - readonly Func _stream; + readonly Func> _resolve; - public Service(ILogger handlerLog, Func> resolve) => - _stream = id => new Handler(handlerLog, resolve(Event.For(id))); + public Service(Func> resolve) => + _resolve = resolve; /// Execute the specified command public Task Execute(ClientId id, Command command) => - _stream(id).Execute(command); + _resolve(id).Transact(command.Interpret); /// Read the present state // TOCONSIDER: you should probably be separating this out per CQRS and reading from a denormalized/cached set of projections - public Task Read(ClientId id) => _stream(id).Query(Render); + public Task Read(ClientId id) => + _resolve(id).Query(Render); - static View Render(State s) => new View() {Sorted = s.Happened}; + static View Render(State s) => + new (Sorted: s.Happened); } } -} \ No newline at end of file +} diff --git a/equinox-web-csharp/Domain/Domain.csproj b/equinox-web-csharp/Domain/Domain.csproj index abb2bc644..ba2873ad1 100755 --- a/equinox-web-csharp/Domain/Domain.csproj +++ b/equinox-web-csharp/Domain/Domain.csproj @@ -1,14 +1,17 @@  - netstandard2.0 + net6.0 - - - + + + + + + diff --git a/equinox-web-csharp/Domain/Infrastructure.cs b/equinox-web-csharp/Domain/Infrastructure.cs index 7b22bd51a..de84dfe4f 100644 --- a/equinox-web-csharp/Domain/Infrastructure.cs +++ b/equinox-web-csharp/Domain/Infrastructure.cs @@ -1,13 +1,8 @@ using Equinox; using Equinox.Core; -using Microsoft.FSharp.Collections; -using Microsoft.FSharp.Control; using Microsoft.FSharp.Core; -using Newtonsoft.Json; -using Serilog; using System; using System.Collections.Generic; -using System.IO; using System.Threading.Tasks; namespace TodoBackendTemplate @@ -27,17 +22,16 @@ public Accumulator(Func,TState> fold, TState state) public TState State => _fold(_state,Accumulated); public void Execute(Func> f) => Accumulated.AddRange(f(State)); - } - public class EquinoxStream : Decider + public class EquinoxStream : DeciderCore { private readonly Func, TState> _fold; public EquinoxStream( Func, TState> fold, - ILogger log, IStream stream, int maxAttempts = 3) - : base(log, stream, maxAttempts) + IStream stream) + : base(stream) { _fold = fold; } @@ -45,55 +39,37 @@ public EquinoxStream( /// Run the decision method, letting it decide whether or not the Command's intent should manifest as Events public async Task Execute(Func> interpret) { - FSharpList decide_(TState state) - { - var a = new Accumulator(_fold, state); - a.Execute(interpret); - return ListModule.OfSeq(a.Accumulated); - } - return await FSharpAsync.StartAsTask(Transact(FuncConvert.FromFunc>(decide_)), null, null); + return await Transact(interpret); } /// Execute a command, as Decide(Action) does, but also yield an outcome from the decision public async Task Decide(Func, T> decide) { - Tuple> decide_(TState state) + (T, IEnumerable) decideWrapped(TState state) { var a = new Accumulator(_fold, state); var r = decide(a); - return Tuple.Create(r, ListModule.OfSeq(a.Accumulated)); + return (r, a.Accumulated); } - return await FSharpAsync.StartAsTask(Transact(FuncConvert.FromFunc>>(decide_)), null, null); + + return await Transact(decide: decideWrapped); } // Project from the synchronized state, without the possibility of adding events that Decide(Func) admits public async Task Query(Func project) => - await FSharpAsync.StartAsTask(Query(FuncConvert.FromFunc(project)), null, null); + await base.Query(project); } - /// Newtonsoft.Json implementation of IEncoder that encodes direct to a UTF-8 Buffer - public class JsonNetUtf8Codec + /// System.Text.Json implementation of IEncoder that encodes direct to a UTF-8 Buffer + public class SystemTextJsonUtf8Codec { - readonly JsonSerializer _serializer; + private readonly TypeShape.UnionContract.IEncoder> _codec; - public JsonNetUtf8Codec(JsonSerializerSettings settings) => - _serializer = JsonSerializer.Create(settings); + public SystemTextJsonUtf8Codec(System.Text.Json.JsonSerializerOptions options) => + _codec = new FsCodec.SystemTextJson.Core.ReadOnlyMemoryEncoder(options); - public byte[] Encode(T value) where T : class - { - using (var ms = new MemoryStream()) - { - using (var jsonWriter = new JsonTextWriter(new StreamWriter(ms))) - _serializer.Serialize(jsonWriter, value, typeof(T)); - return ms.ToArray(); - } - } + public ReadOnlyMemory Encode(T value) where T : class => _codec.Encode(value); - public T Decode(byte[] json) where T : class - { - using (var ms = new MemoryStream(json)) - using (var jsonReader = new JsonTextReader(new StreamReader(ms))) - return _serializer.Deserialize(jsonReader); - } + public T Decode(ReadOnlyMemory json) where T : class => _codec.Decode(json); } -} \ No newline at end of file +} diff --git a/equinox-web-csharp/Domain/Todo.cs b/equinox-web-csharp/Domain/Todo.cs index dc4d2b1ac..bf8a1d552 100755 --- a/equinox-web-csharp/Domain/Todo.cs +++ b/equinox-web-csharp/Domain/Todo.cs @@ -1,13 +1,9 @@ -using Equinox; -using Equinox.Core; -using Microsoft.FSharp.Core; -using Newtonsoft.Json; -using Serilog; +using Microsoft.FSharp.Core; using System; using System.Collections.Generic; using System.Linq; +using System.Text.Json; using System.Threading.Tasks; -using FsCodec; namespace TodoBackendTemplate { @@ -55,26 +51,26 @@ public class Snapshotted : Event public ItemData[] Items { get; set; } } - static readonly JsonNetUtf8Codec Codec = new JsonNetUtf8Codec(new JsonSerializerSettings()); + static readonly SystemTextJsonUtf8Codec Codec = + new (new JsonSerializerOptions()); - public static Event TryDecode(string et, byte[] json) - { - switch (et) + public static FSharpValueOption TryDecode(string et, ReadOnlyMemory json) => + et switch { - case nameof(Added): return Codec.Decode(json); - case nameof(Updated): return Codec.Decode(json); - case nameof(Deleted): return Codec.Decode(json); - case nameof(Cleared): return Codec.Decode(json); - case nameof(Snapshotted): return Codec.Decode(json); - default: return null; - } - } + nameof(Added) => Codec.Decode(json), + nameof(Updated) => Codec.Decode(json), + nameof(Deleted) => Codec.Decode(json), + nameof(Cleared) => Codec.Decode(json), + nameof(Snapshotted) => Codec.Decode(json), + _ => FSharpValueOption.None + }; - public static Tuple Encode(Event e) => Tuple.Create(e.GetType().Name, Codec.Encode(e)); + public static (string, ReadOnlyMemory) Encode(Event e) => + (e.GetType().Name, Codec.Encode(e)); /// Maps a ClientId to the Target that specifies the Stream in which the data for that client will be held - public static string For(ClientId id) => - StreamNameModule.create("Todos", id?.ToString() ?? "1"); + public static (string, string) StreamIds(ClientId id) => + ("Todos", id?.ToString() ?? "1"); } /// Present state of the Todo List as inferred from the Events we've seen to date @@ -93,7 +89,7 @@ internal State(int nextId, Event.ItemData[] items) Items = items; } - public static State Initial = new State(0, new Event.ItemData[0]); + public static readonly State Initial = new State(0, Array.Empty()); /// Folds a set of events from the store into a given `state` public static State Fold(State origin, IEnumerable xs) @@ -172,9 +168,9 @@ public class Clear : Command } /// Defines the decision process which maps from the intent of the `Command` to the `Event`s that represent that decision in the Stream - public static IEnumerable Interpret(State s, Command x) + public IEnumerable Interpret(State s) { - switch (x) + switch (this) { case Add c: yield return Make(s.NextId, c.Props); @@ -198,7 +194,7 @@ bool IsEquivalent(Event.ItemData i) => break; default: - throw new ArgumentOutOfRangeException(nameof(x), x, "invalid"); + throw new ArgumentOutOfRangeException("this", this, "invalid"); } T Make(int id, Props value) where T : Event.ItemEvent, new() => @@ -206,31 +202,6 @@ bool IsEquivalent(Event.ItemData i) => } } - /// Defines low level stream operations relevant to the Todo Stream in terms of Command and Events - class Handler - { - readonly EquinoxStream _stream; - - public Handler(ILogger log, IStream stream) => - _stream = new EquinoxStream(State.Fold, log, stream); - - /// Execute `command`; does not emit the post state - public Task Execute(Command c) => - _stream.Execute(s => Command.Interpret(s, c)); - - /// Handle `command`, return the items after the command's intent has been applied to the stream - public Task Decide(Command c) => - _stream.Decide(ctx => - { - ctx.Execute(s => Command.Interpret(s, c)); - return ctx.State.Items; - }); - - /// Establish the present state of the Stream, project from that as specified by `projection` - public Task Query(Func projection) => - _stream.Query(projection); - } - /// A single Item in the Todo List public class View { @@ -244,10 +215,10 @@ public class View public class Service { /// Maps a ClientId to Handler for the relevant stream - readonly Func _stream; + readonly Func> _resolve; - public Service(ILogger handlerLog, Func> resolve) => - _stream = id => new Handler(handlerLog, resolve(Event.For(id))); + public Service(Func> resolve) => + _resolve = resolve; // // READ @@ -255,11 +226,11 @@ public Service(ILogger handlerLog, Func> resolve) /// List all open items public Task> List(ClientId clientId) => - _stream(clientId).Query(s => s.Items.Select(Render)); + _resolve(clientId).Query(s => s.Items.Select(Render)); /// Load details for a single specific item public Task TryGet(ClientId clientId, int id) => - _stream(clientId).Query(s => + _resolve(clientId).Query(s => { var i = s.Items.SingleOrDefault(x => x.Id == id); return i == null ? null : Render(i); @@ -271,28 +242,26 @@ public Task TryGet(ClientId clientId, int id) => /// Execute the specified (blind write) command public Task Execute(ClientId clientId, Command command) => - _stream(clientId).Execute(command); + _resolve(clientId).Transact(command.Interpret); // // WRITE-READ // /// Create a new ToDo List item; response contains the generated `id` - public async Task Create(ClientId clientId, Props template) - { - var state = await _stream(clientId).Decide(new Command.Add {Props = template}); - return Render(state.First()); - } + public Task Create(ClientId clientId, Props template) => + _resolve(clientId).Transact( + new Command.Add {Props = template}.Interpret, + s => Render(s.Items.First())); /// Update the specified item as referenced by the `item.id` - public async Task Patch(ClientId clientId, int id, Props value) - { - var state = await _stream(clientId).Decide(new Command.Update {Id = id, Props = value}); - return Render(state.Single(x => x.Id == id)); - } + public Task Patch(ClientId clientId, int id, Props value) => + _resolve(clientId).Transact( + new Command.Update {Id = id, Props = value}.Interpret, + s => Render(s.Items.Single(x => x.Id == id))); - static View Render(Event.ItemData i) => - new View {Id = i.Id, Order = i.Order, Title = i.Title, Completed = i.Completed}; + static View Render(Event.ItemData i) => + new View {Id = i.Id, Order = i.Order, Title = i.Title, Completed = i.Completed}; } } -} \ No newline at end of file +} diff --git a/equinox-web-csharp/Web/CosmosContext.cs b/equinox-web-csharp/Web/CosmosContext.cs index b03b72c1d..fdf678e4b 100644 --- a/equinox-web-csharp/Web/CosmosContext.cs +++ b/equinox-web-csharp/Web/CosmosContext.cs @@ -1,5 +1,6 @@ using Equinox; using Equinox.CosmosStore; +using FsCodec.SystemTextJson.Interop; using Microsoft.Azure.Cosmos; using Microsoft.FSharp.Control; using Microsoft.FSharp.Core; @@ -9,23 +10,7 @@ namespace TodoBackendTemplate { - public class CosmosConfig - { - public CosmosConfig(ConnectionMode mode, string connectionStringWithUriAndKey, string database, string container, int cacheMb) - { - Mode = mode; - ConnectionStringWithUriAndKey = connectionStringWithUriAndKey; - Database = database; - Container = container; - CacheMb = cacheMb; - } - - public ConnectionMode Mode { get; } - public string ConnectionStringWithUriAndKey { get; } - public string Database { get; } - public string Container { get; } - public int CacheMb { get; } - } + public record CosmosConfig(ConnectionMode Mode, string ConnectionStringWithUriAndKey, string Database, string Container, int CacheMb); public class CosmosContext : EquinoxContext { @@ -39,7 +24,7 @@ public CosmosContext(CosmosConfig config) _cache = new Cache("Cosmos", config.CacheMb); var retriesOn429Throttling = 1; // Number of retries before failing processing when provisioned RU/s limit in CosmosDb is breached var timeout = TimeSpan.FromSeconds(5); // Timeout applied per request to CosmosDb, including retry attempts - var discovery = Discovery.ConnectionString.NewConnectionString(config.ConnectionStringWithUriAndKey); + var discovery = Discovery.NewConnectionString(config.ConnectionStringWithUriAndKey); _connect = async () => { var connector = new CosmosStoreConnector(discovery, timeout, retriesOn429Throttling, timeout, config.Mode); @@ -62,8 +47,9 @@ await FSharpAsync.StartAsTask( return new CosmosStoreContext(storeClient, tipMaxEvents: 256); } - public override Func> Resolve( - FsCodec.IEventCodec codec, + public override Func<(string, string), DeciderCore> Resolve( + Serilog.ILogger handlerLog, + FsCodec.IEventCodec, Unit> codec, Func, TState> fold, TState initial, Func isOrigin = null, @@ -77,8 +63,8 @@ await FSharpAsync.StartAsTask( var cacheStrategy = _cache == null ? null : CachingStrategy.NewSlidingWindow(_cache, TimeSpan.FromMinutes(20)); - var cat = new CosmosStoreCategory(_store, codec, FuncConvert.FromFunc(fold), initial, cacheStrategy, accessStrategy, compressUnfolds:FSharpOption.None); - return t => cat.Resolve(t); + var cat = new CosmosStoreCategory(_store, codec.ToJsonElementCodec(), FuncConvert.FromFunc(fold), initial, cacheStrategy, accessStrategy, compressUnfolds:FSharpOption.None); + return cat.Resolve(handlerLog); } } -} \ No newline at end of file +} diff --git a/equinox-web-csharp/Web/EquinoxContext.cs b/equinox-web-csharp/Web/EquinoxContext.cs index 84c870799..4c872fa9c 100644 --- a/equinox-web-csharp/Web/EquinoxContext.cs +++ b/equinox-web-csharp/Web/EquinoxContext.cs @@ -1,16 +1,16 @@ using Microsoft.FSharp.Core; -using System.Text.Json; using System; using System.Collections.Generic; +using System.Text.Json; using System.Threading.Tasks; -using FsCodec.SystemTextJson; namespace TodoBackendTemplate { public abstract class EquinoxContext { - public abstract Func> Resolve( - FsCodec.IEventCodec codec, + public abstract Func<(string, string), Equinox.DeciderCore> Resolve( + Serilog.ILogger storeLog, + FsCodec.IEventCodec, Unit> codec, Func, TState> fold, TState initial, Func isOrigin = null, @@ -21,17 +21,21 @@ public abstract class EquinoxContext public static class EquinoxCodec { - public static FsCodec.IEventCodec Create( - Func> encode, - Func tryDecode) where TEvent: class - { - return FsCodec.Codec.Create( + public static FsCodec.IEventCodec, Unit> Create( + Func)> encode, + Func<(string, ReadOnlyMemory), FSharpValueOption> tryDecode) where TEvent: class => + + FsCodec.Codec.Create( FuncConvert.FromFunc(encode), - FuncConvert.FromFunc((Func, FSharpOption>) TryDecodeImpl)); - FSharpOption TryDecodeImpl(Tuple encoded) => OptionModule.OfObj(tryDecode(encoded.Item1, encoded.Item2)); - } + FuncConvert.FromFunc(tryDecode)); + + public static FsCodec.IEventCodec, Unit> Create( + Func)> encode, + Func, FSharpValueOption> tryDecode) where TEvent : class => - public static FsCodec.IEventCodec Create(JsonSerializerOptions options = null) where TEvent: TypeShape.UnionContract.IUnionContract => - FsCodec.SystemTextJson.Codec.Create(options).ToByteArrayCodec(); + Create(encode, tb => tryDecode(tb.Item1, tb.Item2)); + + public static FsCodec.IEventCodec, Unit> Create(JsonSerializerOptions options = null) where TEvent: TypeShape.UnionContract.IUnionContract => + FsCodec.SystemTextJson.Codec.Create(options); } } diff --git a/equinox-web-csharp/Web/EventStoreContext.cs b/equinox-web-csharp/Web/EventStoreContext.cs index 14bf38916..d9dcfb6ba 100644 --- a/equinox-web-csharp/Web/EventStoreContext.cs +++ b/equinox-web-csharp/Web/EventStoreContext.cs @@ -1,7 +1,5 @@ using Equinox; -using Equinox.EventStore; -using Equinox.Core; -using Microsoft.FSharp.Control; +using Equinox.EventStoreDb; using Microsoft.FSharp.Core; using System; using System.Collections.Generic; @@ -9,27 +7,13 @@ namespace TodoBackendTemplate { - public class EventStoreConfig - { - public EventStoreConfig(string host, string username, string password, int cacheMb) - { - Host = host; - Username = username; - Password = password; - CacheMb = cacheMb; - } - - public string Host { get; } - public string Username { get; } - public string Password { get; } - public int CacheMb { get; } - } + public record EventStoreConfig(string ConnectionString, int CacheMb); public class EventStoreContext : EquinoxContext { readonly Cache _cache; - Equinox.EventStore.EventStoreContext _connection; + Equinox.EventStoreDb.EventStoreContext _connection; readonly Func _connect; public EventStoreContext(EventStoreConfig config) @@ -40,18 +24,17 @@ public EventStoreContext(EventStoreConfig config) internal override async Task Connect() => await _connect(); - static async Task Connect(EventStoreConfig config) + static Task Connect(EventStoreConfig config) { - var c = new Connector(config.Username, config.Password, reqTimeout: TimeSpan.FromSeconds(5), reqRetries: 1); + var c = new EventStoreConnector(reqTimeout: TimeSpan.FromSeconds(5), reqRetries: 1); - var conn = await FSharpAsync.StartAsTask( - c.Establish("Twin", Discovery.NewGossipDns(config.Host), ConnectionStrategy.ClusterTwinPreferSlaveReads), - null, null); - return new Equinox.EventStore.EventStoreContext(conn, new BatchingPolicy(maxBatchSize: 500)); + var conn = c.Establish("Twin", Discovery.NewConnectionString(config.ConnectionString), ConnectionStrategy.ClusterTwinPreferSlaveReads); + return Task.FromResult(new Equinox.EventStoreDb.EventStoreContext(conn, new BatchingPolicy(maxBatchSize: 500))); } - public override Func> Resolve( - FsCodec.IEventCodec codec, + public override Func<(string, string), DeciderCore> Resolve( + Serilog.ILogger handlerLog, + FsCodec.IEventCodec, Unit> codec, Func, TState> fold, TState initial, Func isOrigin = null, @@ -64,9 +47,9 @@ public override Func> Resolve( var cacheStrategy = _cache == null ? null : CachingStrategy.NewSlidingWindow(_cache, TimeSpan.FromMinutes(20)); - var cat = new EventStoreCategory(_connection, codec, FuncConvert.FromFunc(fold), + var cat = new EventStoreCategory(_connection, codec, FuncConvert.FromFunc(fold), initial, cacheStrategy, accessStrategy); - return t => cat.Resolve(t); + return cat.Resolve(log: handlerLog); } } } diff --git a/equinox-web-csharp/Web/MemoryStoreContext.cs b/equinox-web-csharp/Web/MemoryStoreContext.cs index 64a50d8ee..12e00b952 100644 --- a/equinox-web-csharp/Web/MemoryStoreContext.cs +++ b/equinox-web-csharp/Web/MemoryStoreContext.cs @@ -1,5 +1,5 @@ +using Equinox; using Equinox.MemoryStore; -using Equinox.Core; using Microsoft.FSharp.Core; using System; using System.Collections.Generic; @@ -9,22 +9,23 @@ namespace TodoBackendTemplate { public class MemoryStoreContext : EquinoxContext { - readonly VolatileStore _store; + readonly VolatileStore> _store; - public MemoryStoreContext(VolatileStore store) => + public MemoryStoreContext(VolatileStore> store) => _store = store; - public override Func> Resolve( - FsCodec.IEventCodec codec, + public override Func<(string, string), DeciderCore> Resolve( + Serilog.ILogger handlerLog, + FsCodec.IEventCodec, Unit> codec, Func, TState> fold, TState initial, Func isOrigin = null, Func toSnapshot = null) { - var resolver = new MemoryStoreCategory(_store, codec, FuncConvert.FromFunc(fold), initial); - return target => resolver.Resolve(target); + var cat = new MemoryStoreCategory, Unit>(_store, codec, FuncConvert.FromFunc(fold), initial); + return cat.Resolve(log: handlerLog); } internal override Task Connect() => Task.CompletedTask; } -} \ No newline at end of file +} diff --git a/equinox-web-csharp/Web/Startup.cs b/equinox-web-csharp/Web/Startup.cs index 5f5babdfd..c73a111f7 100755 --- a/equinox-web-csharp/Web/Startup.cs +++ b/equinox-web-csharp/Web/Startup.cs @@ -40,7 +40,6 @@ public void ConfigureServices(IServiceCollection services) { services .AddMvc() - .SetCompatibilityVersion(CompatibilityVersion.Latest) .AddJsonOptions(o => { foreach(var c in FsCodec.SystemTextJson.Options.Default.Converters) @@ -106,10 +105,10 @@ static EquinoxContext ConfigureStore() var config = new CosmosConfig(connMode, conn, db, container, cacheMb); return new CosmosContext(config); #endif -#if (!cosmos && !eventStore) - return new MemoryStoreContext(new Equinox.MemoryStore.VolatileStore()); +#if (!cosmos && !dynamo && !eventStore) + return new MemoryStoreContext(new Equinox.MemoryStore.VolatileStore>()); #endif -#if (!memoryStore && !cosmos && !eventStore) +#if (!memoryStore && !cosmos && !dynamo && !eventStore) //return new MemoryStoreContext(new Equinox.MemoryStore.VolatileStore()); #endif } @@ -128,21 +127,25 @@ public ServiceBuilder(EquinoxContext context, ILogger handlerLog) } #if todos - public Todo.Service CreateTodoService() => - new Todo.Service( - _handlerLog, - _context.Resolve( + public Todo.Service CreateTodoService() + { + var resolve = + _context.Resolve( + _handlerLog, EquinoxCodec.Create(Todo.Event.Encode, Todo.Event.TryDecode), Todo.State.Fold, Todo.State.Initial, Todo.State.IsOrigin, - Todo.State.Snapshot)); + Todo.State.Snapshot); + return new Todo.Service(ids => resolve(Todo.Event.StreamIds(ids))); + } + #endif #if aggregate public Aggregate.Service CreateAggregateService() => new Aggregate.Service( - _handlerLog, _context.Resolve( + _handlerLog, EquinoxCodec.Create(Aggregate.Event.Encode, Aggregate.Event.TryDecode), Aggregate.State.Fold, Aggregate.State.Initial, @@ -152,8 +155,8 @@ public Aggregate.Service CreateAggregateService() => #if (!aggregate && !todos) // public Thing.Service CreateThingService() => // Thing.Service( -// _handlerLog, // _context.Resolve( +// _handlerLog, // EquinoxCodec.Create(), // Requires Union following IUnionContract pattern, see https://eiriktsarpalis.wordpress.com/2018/10/30/a-contract-pattern-for-schemaless-datastores/ // Thing.Fold.Fold, // Thing.Fold.Initial, diff --git a/equinox-web-csharp/Web/Web.csproj b/equinox-web-csharp/Web/Web.csproj index 55b903de9..fe689925d 100755 --- a/equinox-web-csharp/Web/Web.csproj +++ b/equinox-web-csharp/Web/Web.csproj @@ -1,15 +1,14 @@  - netcoreapp3.1 + net6.0 - - - - - + + + + From 5e7e40e36beee7545b56a0da0549b0d5b22e50ef Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 12 Sep 2022 18:28:33 +0100 Subject: [PATCH 05/43] Remove dead helpers --- equinox-web-csharp/Domain/Infrastructure.cs | 58 --------------------- 1 file changed, 58 deletions(-) diff --git a/equinox-web-csharp/Domain/Infrastructure.cs b/equinox-web-csharp/Domain/Infrastructure.cs index de84dfe4f..d9e171f15 100644 --- a/equinox-web-csharp/Domain/Infrastructure.cs +++ b/equinox-web-csharp/Domain/Infrastructure.cs @@ -1,65 +1,7 @@ -using Equinox; -using Equinox.Core; -using Microsoft.FSharp.Core; using System; -using System.Collections.Generic; -using System.Threading.Tasks; namespace TodoBackendTemplate { - public class Accumulator - { - readonly Func, TState> _fold; - readonly TState _state; - public List Accumulated { get; } = new List(); - - public Accumulator(Func,TState> fold, TState state) - { - _fold = fold; - _state = state; - } - - public TState State => _fold(_state,Accumulated); - - public void Execute(Func> f) => Accumulated.AddRange(f(State)); - } - - public class EquinoxStream : DeciderCore - { - private readonly Func, TState> _fold; - - public EquinoxStream( - Func, TState> fold, - IStream stream) - : base(stream) - { - _fold = fold; - } - - /// Run the decision method, letting it decide whether or not the Command's intent should manifest as Events - public async Task Execute(Func> interpret) - { - return await Transact(interpret); - } - - /// Execute a command, as Decide(Action) does, but also yield an outcome from the decision - public async Task Decide(Func, T> decide) - { - (T, IEnumerable) decideWrapped(TState state) - { - var a = new Accumulator(_fold, state); - var r = decide(a); - return (r, a.Accumulated); - } - - return await Transact(decide: decideWrapped); - } - - // Project from the synchronized state, without the possibility of adding events that Decide(Func) admits - public async Task Query(Func project) => - await base.Query(project); - } - /// System.Text.Json implementation of IEncoder that encodes direct to a UTF-8 Buffer public class SystemTextJsonUtf8Codec { From cec830b1e63d1b16413a19eecdf62bac423231dc Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 12 Sep 2022 19:01:50 +0100 Subject: [PATCH 06/43] Port feedSource --- feed-source/Domain.Tests/Domain.Tests.fsproj | 11 +++-- feed-source/Domain/Config.fs | 8 ++-- feed-source/Domain/Domain.fsproj | 8 ++-- feed-source/Domain/TicketsEpoch.fs | 42 ++++++++------------ feed-source/Domain/TicketsIngester.fs | 16 +++++--- feed-source/Domain/TicketsSeries.fs | 19 ++++----- feed-source/FeedApi/FeedApi.fsproj | 4 +- 7 files changed, 49 insertions(+), 59 deletions(-) diff --git a/feed-source/Domain.Tests/Domain.Tests.fsproj b/feed-source/Domain.Tests/Domain.Tests.fsproj index a205bcc57..2aa9ffe1b 100644 --- a/feed-source/Domain.Tests/Domain.Tests.fsproj +++ b/feed-source/Domain.Tests/Domain.Tests.fsproj @@ -1,10 +1,9 @@ - net5.0 + net6.0 5 false - Library @@ -12,12 +11,12 @@ - + - + - - + + diff --git a/feed-source/Domain/Config.fs b/feed-source/Domain/Config.fs index 254630785..b3cdd3d22 100644 --- a/feed-source/Domain/Config.fs +++ b/feed-source/Domain/Config.fs @@ -1,19 +1,19 @@ module FeedSourceTemplate.Domain.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let createDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) module Memory = - let create codec initial fold store = + let create codec initial fold store : Equinox.Category<_, _, _> = Equinox.MemoryStore.MemoryStoreCategory(store, codec, fold, initial) module Cosmos = diff --git a/feed-source/Domain/Domain.fsproj b/feed-source/Domain/Domain.fsproj index 74552dd9f..25cf2bb40 100644 --- a/feed-source/Domain/Domain.fsproj +++ b/feed-source/Domain/Domain.fsproj @@ -1,7 +1,7 @@  - netcoreapp3.1 + net6.0 5 @@ -14,9 +14,9 @@ - - - + + + diff --git a/feed-source/Domain/TicketsEpoch.fs b/feed-source/Domain/TicketsEpoch.fs index 7d960fa46..448745e1d 100644 --- a/feed-source/Domain/TicketsEpoch.fs +++ b/feed-source/Domain/TicketsEpoch.fs @@ -6,7 +6,7 @@ module FeedSourceTemplate.Domain.TicketsEpoch let [] Category = "TicketsEpoch" -let streamName (fcId, epochId) = FsCodec.StreamName.compose Category [FcId.toString fcId; TicketsEpochId.toString epochId] +let streamName (fcId, epochId) = struct (Category, FsCodec.StreamName.createStreamId [FcId.toString fcId; TicketsEpochId.toString epochId]) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care [] @@ -19,7 +19,7 @@ module Events = | Closed | Snapshotted of {| ids : TicketId[]; closed : bool |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec = Config.EventCodec.gen let itemId (x : Events.Item) : TicketId = x.id let (|ItemIds|) : Events.Item[] -> TicketId[] = Array.map itemId @@ -73,29 +73,24 @@ type IngestionService internal (capacity, resolve : FcId * TicketsEpochId -> Equ /// Handles idempotent deduplicated insertion into the set of items held within the epoch member _.Ingest(fcId, epochId, ticketIds) : Async = let decider = resolve (fcId, epochId) - decider.Transact(decide capacity ticketIds) + // Accept whatever date is in the cache on the basis that we are doing most of the writing so will more often than not + // have the correct state already without a roundtrip. What if the data is actually stale? we'll end up needing to resync, + // but we we need to deal with that as a race condition anyway + decider.Transact(decide capacity ticketIds, Equinox.AllowStale) /// Obtains a complete list of all the tickets in the specified fcid/epochId member _.ReadTickets(fcId, epochId) : Async = let decider = resolve (fcId, epochId) - decider.Query fst + decider.Query(fst, Equinox.AllowStale) module Config = let private create_ capacity resolve = - // Accept whatever date is in the cache on the basis that we are doing most of the writing so will more often than not - // have the correct state already without a roundtrip. What if the data is actually stale? we'll end up needing to resync, - // but we we need to deal with that as a race condition anyway - IngestionService(capacity, resolve (Some Equinox.AllowStale)) - let private resolveStream opt = function - | Config.Store.Memory store -> - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - fun sn -> cat.Resolve(sn, ?option = opt) - | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - fun sn -> cat.Resolve(sn, ?option = opt) - let private resolveDecider store opt = streamName >> resolveStream opt store >> Config.createDecider - let create capacity = resolveDecider >> create_ capacity + IngestionService(capacity, streamName >> resolve) + let private (|Category|) = function + | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create capacity (Category cat) = Config.createDecider cat |> create_ capacity /// Custom Fold and caching logic compared to the IngesterService /// - When reading, we want the full Items @@ -122,12 +117,7 @@ module Reader = module Config = - let private resolveStream = function - | Config.Store.Memory store -> - let cat = Config.Memory.create Events.codec initial fold store - cat.Resolve - | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createUnoptimized Events.codec initial fold (context, cache) - cat.Resolve - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Memory store -> Config.Memory.create Events.codec initial fold store + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createUnoptimized Events.codec initial fold (context, cache) + let create (Category cat) = Service(streamName >> Config.createDecider cat) diff --git a/feed-source/Domain/TicketsIngester.fs b/feed-source/Domain/TicketsIngester.fs index 96a07f7cf..0baa30774 100644 --- a/feed-source/Domain/TicketsIngester.fs +++ b/feed-source/Domain/TicketsIngester.fs @@ -11,6 +11,7 @@ /// deterministic in nature module FeedSourceTemplate.Domain.TicketsIngester +open System.Threading open Equinox.Core open FSharp.UMX @@ -46,12 +47,15 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I return! Async.Parallel(seq { for epochId in (max 0 (%startingId - lookBack)) .. (%startingId - 1) -> readEpoch %epochId }, loadDop) } // Tickets cache - used to maintain a list of tickets that have already been ingested in order to avoid db round-trips - let previousTickets : AsyncCacheCell = AsyncCacheCell <| async { - let! batches = loadPreviousEpochs 4 - return IdsCache.Create(Seq.concat batches) } + let previousTickets : AsyncCacheCell = + let aux = async { + let! batches = loadPreviousEpochs 4 + return IdsCache.Create(Seq.concat batches) } + AsyncCacheCell(fun ct -> Async.StartAsTask(aux, cancellationToken = ct)) let tryIngest items = async { - let! previousTickets = previousTickets.AwaitValue() + let! ct = Async.CancellationToken + let! previousTickets = previousTickets.Await ct |> Async.AwaitTask let firstEpochId = effectiveEpochId () let rec aux epochId ingestedTickets items = async { @@ -94,7 +98,9 @@ type ServiceForFc internal (log : Serilog.ILogger, fcId, epochs : TicketsEpoch.I let batchedIngest = AsyncBatchingGate(tryIngest, linger) /// Upon startup, we initialize the Tickets cache from recent epochs; we want to kick that process off before our first ingest - member _.Initialize() = previousTickets.AwaitValue() |> Async.Ignore + member _.Initialize() = async { + let! ct = Async.CancellationToken + return! previousTickets.Await(ct) |> Async.AwaitTask |> Async.Ignore } /// Attempts to feed the items into the sequence of epochs. Returns the subset that actually got fed in this time around. member _.IngestMany(items : TicketsEpoch.Events.Item[]) : Async = async { diff --git a/feed-source/Domain/TicketsSeries.fs b/feed-source/Domain/TicketsSeries.fs index 28431e38d..b62429d87 100644 --- a/feed-source/Domain/TicketsSeries.fs +++ b/feed-source/Domain/TicketsSeries.fs @@ -5,7 +5,7 @@ module FeedSourceTemplate.Domain.TicketsSeries let [] Category = "Tickets" -let streamName seriesId = FsCodec.StreamName.create Category (TicketsSeriesId.toString seriesId) +let streamName seriesId = struct (Category, TicketsSeriesId.toString seriesId) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care [] @@ -15,7 +15,7 @@ module Events = | Started of {| fcId : FcId; epochId : TicketsEpochId |} | Snapshotted of {| active : Map |} interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec = Config.EventCodec.gen module Fold = @@ -68,13 +68,8 @@ module Config = // For now we have a single global sequence. This provides us an extension point should we ever need to reprocess // NOTE we use a custom id in order to isolate data for acceptance tests let seriesId = defaultArg seriesId TicketsSeriesId.wellKnownId - Service(seriesId, resolve (Some Equinox.AllowStale)) - let private resolveStream opt = function - | Config.Store.Memory store -> - let cat = Config.Memory.create Events.codec Fold.initial Fold.fold store - fun sn -> cat.Resolve(sn, ?option = opt) - | Config.Store.Cosmos (context, cache) -> - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - fun sn -> cat.Resolve(sn, ?option = opt) - let private resolveDecider store opt = streamName >> resolveStream opt store >> Config.createDecider - let create seriesOverride = resolveDecider >> create_ seriesOverride + Service(seriesId, streamName >> resolve) + let private (|Category|) = function + | Config.Store.Memory store -> Config.Memory.create Events.codec Fold.initial Fold.fold store + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create seriesOverride (Category cat) = create_ seriesOverride (Config.createDecider cat) diff --git a/feed-source/FeedApi/FeedApi.fsproj b/feed-source/FeedApi/FeedApi.fsproj index d7c1b231f..11b262286 100644 --- a/feed-source/FeedApi/FeedApi.fsproj +++ b/feed-source/FeedApi/FeedApi.fsproj @@ -1,7 +1,7 @@ - netcoreapp3.1 + net6.0 True 5 @@ -20,7 +20,7 @@ - + From 28e14d6c7dfd2bfe5f060c82d6f50f0c30cfb892 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 12 Sep 2022 19:33:36 +0100 Subject: [PATCH 07/43] if eqx patterns --- equinox-patterns/Domain.Tests/Domain.Tests.fsproj | 1 + 1 file changed, 1 insertion(+) diff --git a/equinox-patterns/Domain.Tests/Domain.Tests.fsproj b/equinox-patterns/Domain.Tests/Domain.Tests.fsproj index c37687f3a..520b9e79f 100644 --- a/equinox-patterns/Domain.Tests/Domain.Tests.fsproj +++ b/equinox-patterns/Domain.Tests/Domain.Tests.fsproj @@ -3,6 +3,7 @@ net6.0 5 + false From 54ad370fb101e7b380dce6db7420a1038a026740 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 12 Sep 2022 23:40:36 +0100 Subject: [PATCH 08/43] Port periodicIngester --- periodic-ingester/ApiClient.fs | 9 +++--- periodic-ingester/Infrastructure.fs | 3 +- periodic-ingester/Ingester.fs | 14 ++++----- periodic-ingester/PeriodicIngester.fsproj | 13 ++++----- periodic-ingester/Program.fs | 35 +++++++++++------------ 5 files changed, 36 insertions(+), 38 deletions(-) diff --git a/periodic-ingester/ApiClient.fs b/periodic-ingester/ApiClient.fs index 1604aeecd..028e6f45b 100644 --- a/periodic-ingester/ApiClient.fs +++ b/periodic-ingester/ApiClient.fs @@ -16,11 +16,12 @@ type TicketsClient(client : HttpClient) = let basePath = "api/tickets" - member _.Crawl() : AsyncSeq = asyncSeq { + member _.Crawl() : AsyncSeq array)> = asyncSeq { let request = HttpReq.get () |> HttpReq.withPath basePath - let! response = client.Send request + let ts = System.Diagnostics.Stopwatch.StartNew() + let! response = client.Send2(request) let! basePage = response |> HttpRes.deserializeOkStj - yield + yield ts.Elapsed, [| for t in basePage.tickets -> let data : Ingester.TicketData = { lastUpdated = t.lastUpdated; body = t.body } Ingester.PipelineEvent.sourceItemOfTicketIdAndData (t.id, data) |] @@ -32,5 +33,5 @@ type TicketsFeed(baseUri) = let tickets = TicketsClient(client) // TODO add retries - consumer loop will abort if this throws - member _.Crawl(_trancheId): AsyncSeq = + member _.Crawl(_trancheId): AsyncSeq array)> = tickets.Crawl() diff --git a/periodic-ingester/Infrastructure.fs b/periodic-ingester/Infrastructure.fs index 3b47a69bb..998facd82 100644 --- a/periodic-ingester/Infrastructure.fs +++ b/periodic-ingester/Infrastructure.fs @@ -64,7 +64,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c @@ -148,7 +147,7 @@ type HttpClient with /// Drop-in replacement for HttpClient.SendAsync which addresses known timeout issues /// /// HttpRequestMessage to be submitted. - member client.Send(msg : HttpRequestMessage) = async { + member client.Send2(msg : HttpRequestMessage) = async { let! ct = Async.CancellationToken try return! client.SendAsync(msg, ct) |> Async.AwaitTask // address https://github.com/dotnet/corefx/issues/20296 diff --git a/periodic-ingester/Ingester.fs b/periodic-ingester/Ingester.fs index bac21e618..ec117f84e 100644 --- a/periodic-ingester/Ingester.fs +++ b/periodic-ingester/Ingester.fs @@ -39,18 +39,18 @@ module PipelineEvent = (* Each item per stream is represented as an event; if multiple events have been found for a given stream, they are delivered together *) - let private dummyEventData = let dummyEventType, noBody = "eventType", null in FsCodec.Core.EventData.Create(dummyEventType, noBody) - let sourceItemOfTicketIdAndData (id : TicketId, data : TicketData) : Propulsion.Feed.SourceItem = + let private dummyEventData = let dummyEventType, noBody = "eventType", Unchecked.defaultof<_> in FsCodec.Core.EventData.Create(dummyEventType, noBody) + let sourceItemOfTicketIdAndData (id : TicketId, data : TicketData) : Propulsion.Feed.SourceItem = { streamName = streamName id; eventData = dummyEventData; context = box data } - let (|TicketEvents|_|) = function + let [] (|TicketEvents|_|) = function | StreamName ticketId, (s : Propulsion.Streams.StreamSpan<_>) -> - Some (ticketId, s.events |> Seq.map (fun e -> Unchecked.unbox e.Context)) - | _ -> None + ValueSome (ticketId, s |> Seq.map (fun e -> Unchecked.unbox e.Context)) + | _ -> ValueNone -let handle (stream, span) = async { +let handle struct (stream, span) = async { match stream, span with | PipelineEvent.TicketEvents (ticketId, items) -> // TODO : Ingest the data - return Propulsion.Streams.SpanResult.AllProcessed, IngestionOutcome.Unchanged + return struct (Propulsion.Streams.SpanResult.AllProcessed, IngestionOutcome.Unchanged) | x -> return failwithf "Unexpected stream %O" x } diff --git a/periodic-ingester/PeriodicIngester.fsproj b/periodic-ingester/PeriodicIngester.fsproj index 9f36b288e..28ac4b5fa 100644 --- a/periodic-ingester/PeriodicIngester.fsproj +++ b/periodic-ingester/PeriodicIngester.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -17,14 +17,13 @@ - - - + + - - + + - + diff --git a/periodic-ingester/Program.fs b/periodic-ingester/Program.fs index a5235c3e7..b052fb91b 100644 --- a/periodic-ingester/Program.fs +++ b/periodic-ingester/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg message = raise (MissingArg message) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -28,6 +26,7 @@ module Args = type [] Parameters = | [] Verbose | [] PrometheusPort of int + | [] GroupId of string | [] MaxReadAhead of int | [] TicketsDop of int @@ -36,11 +35,14 @@ module Args = interface IArgParserTemplate with member a.Usage = a |> function | Verbose _ -> "request verbose logging." + | GroupId _ -> "consumer group name. Default: 'default'" | PrometheusPort _ -> "port from which to expose a Prometheus /metrics endpoint. Default: off (optional if environment variable PROMETHEUS_PORT specified)" | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 8." | TicketsDop _ -> "maximum number of Tickets to process in parallel. Default: 4" | Feed _ -> "Feed parameters." and Arguments(c : Configuration, a : ParseResults) = + member val GroupId = a.GetResult(GroupId, "default") + member val Verbose = a.Contains Parameters.Verbose member val PrometheusPort = a.TryGetResult PrometheusPort |> Option.orElseWith (fun () -> c.PrometheusPort) member val MaxReadAhead = a.GetResult(MaxReadAhead, 8) @@ -49,9 +51,9 @@ module Args = member val StateInterval = TimeSpan.FromMinutes 5. member val CheckpointInterval = TimeSpan.FromHours 1. member val Feed : FeedArguments = - match a.TryGetSubCommand() with - | Some (Feed feed) -> FeedArguments(c, feed) - | _ -> raise (MissingArg "Must specify feed") + match a.GetSubCommand() with + | Feed feed -> FeedArguments(c, feed) + | _ -> missingArg "Must specify feed" and [] FeedParameters = | [] Group of string | [] BaseUri of string @@ -66,9 +68,9 @@ module Args = member val BaseUri = a.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri member val RefreshInterval = TimeSpan.FromHours 1. member val Cosmos : CosmosArguments = - match a.TryGetSubCommand() with - | Some (Cosmos cosmos) -> CosmosArguments(c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos") + match a.GetSubCommand() with + | Cosmos cosmos -> CosmosArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos" and [] CosmosParameters = | [] Verbose | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode @@ -115,15 +117,14 @@ let build (args : Args.Arguments) = let sink = let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - Propulsion.Streams.StreamsProjector.Start(Log.Logger, args.MaxReadAhead, args.TicketsDop, Ingester.handle, stats, args.StatsInterval) + Propulsion.Streams.Default.Config.Start(Log.Logger, args.MaxReadAhead, args.TicketsDop, Ingester.handle, stats, args.StatsInterval) let pumpSource = - let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Log.Logger (context, cache) + let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Log.Logger (args.GroupId, args.CheckpointInterval) (context, cache) let client = ApiClient.TicketsFeed feed.BaseUri let source = Propulsion.Feed.PeriodicSource( Log.Logger, args.StatsInterval, feed.SourceId, - checkpoints, args.CheckpointInterval, - client.Crawl, feed.RefreshInterval, + client.Crawl, feed.RefreshInterval, checkpoints, sink) source.Pump() sink, pumpSource @@ -138,16 +139,14 @@ let startMetricsServer port : IDisposable = let run args = async { let sink, pumpSource = build args use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj - do! Async.Parallel [ pumpSource; sink.AwaitWithStopOnCancellation() ] |> Async.Ignore - return if sink.RanToCompletion then 0 else 3 -} + return! Async.Parallel [ pumpSource; sink.AwaitWithStopOnCancellation() ] |> Async.Ignore } [] let main argv = try let args = Args.parse EnvVar.tryGet argv try let metrics = Sinks.equinoxAndPropulsionFeedConsumerMetrics (Sinks.tags AppName) args.Feed.SourceId Log.Logger <- LoggerConfiguration().Configure(args.Verbose).Sinks(metrics, args.Feed.Cosmos.Verbose).CreateLogger() - try run args |> Async.RunSynchronously + try run args |> Async.RunSynchronously; 0 with e when not (e :? MissingArg) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() with MissingArg msg -> eprintfn "%s" msg; 1 From b729343f27b2511d50b31453d0a502ea959ae42a Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 12 Sep 2022 23:59:37 +0100 Subject: [PATCH 09/43] feedConsumer --- feed-consumer/ApiClient.fs | 4 ++-- feed-consumer/FeedConsumer.fsproj | 15 +++++++-------- feed-consumer/Infrastructure.fs | 3 +-- feed-consumer/Ingester.fs | 14 +++++++------- feed-consumer/Program.fs | 11 +++++++---- 5 files changed, 24 insertions(+), 23 deletions(-) diff --git a/feed-consumer/ApiClient.fs b/feed-consumer/ApiClient.fs index f3b0fe462..60b0e137d 100644 --- a/feed-consumer/ApiClient.fs +++ b/feed-consumer/ApiClient.fs @@ -34,7 +34,7 @@ type SliceDto = { closed : bool; tickets : ItemDto[]; position : TicketsCheckpoi type Session(client: HttpClient) = member _.Send(req : HttpRequestMessage) : Async = - client.Send(req) + client.Send2(req) type TicketsClient(session: Session) = @@ -69,7 +69,7 @@ type TicketsFeed(baseUri) = let tickets = Session(client).Tickets // TODO add retries - consumer loop will abort if this throws - member _.Poll(trancheId, pos) : Async> = async { + member _.Poll(trancheId, pos) : Async> = async { let checkpoint = TicketsCheckpoint.ofPosition pos let! pg = tickets.Poll(TrancheId.toFcId trancheId, checkpoint) let baseIndex = TicketsCheckpoint.toStreamIndex pg.position diff --git a/feed-consumer/FeedConsumer.fsproj b/feed-consumer/FeedConsumer.fsproj index 5e4087145..7d79ffac4 100644 --- a/feed-consumer/FeedConsumer.fsproj +++ b/feed-consumer/FeedConsumer.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -16,14 +16,13 @@ - - - - - - + + + + + - + diff --git a/feed-consumer/Infrastructure.fs b/feed-consumer/Infrastructure.fs index dd4708e29..455b46748 100644 --- a/feed-consumer/Infrastructure.fs +++ b/feed-consumer/Infrastructure.fs @@ -68,7 +68,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c @@ -152,7 +151,7 @@ type HttpClient with /// Drop-in replacement for HttpClient.SendAsync which addresses known timeout issues /// /// HttpRequestMessage to be submitted. - member client.Send(msg : HttpRequestMessage) = async { + member client.Send2(msg : HttpRequestMessage) = async { let! ct = Async.CancellationToken try return! client.SendAsync(msg, ct) |> Async.AwaitTask // address https://github.com/dotnet/corefx/issues/20296 diff --git a/feed-consumer/Ingester.fs b/feed-consumer/Ingester.fs index a10d83d94..906862019 100644 --- a/feed-consumer/Ingester.fs +++ b/feed-consumer/Ingester.fs @@ -32,14 +32,14 @@ module PipelineEvent = FsCodec.Core.TimelineEvent.Create( index, "eventType", - null, + Unchecked.defaultof<_>, context = item) - let (|ItemsForFc|_|) = function - | FsCodec.StreamName.CategoryAndIds (_,[|_ ; FcId.Parse fc|]), (s : Propulsion.Streams.StreamSpan<_>) -> - Some (fc, s.events |> Seq.map (fun e -> Unchecked.unbox e.Context)) - | _ -> None + let [] (|ItemsForFc|_|) = function + | FsCodec.StreamName.CategoryAndIds (_,[|_ ; FcId.Parse fc|]), (s : Propulsion.Streams.StreamSpan) -> + ValueSome (fc, s |> Seq.map (fun e -> Unchecked.unbox e.Context)) + | _ -> ValueNone -let handle maxDop (stream, span) = async { +let handle maxDop struct (stream, span) = async { match stream, span with | PipelineEvent.ItemsForFc (fc, items) -> // Take chunks of max 1000 in order to make handler latency be less 'lumpy' @@ -57,6 +57,6 @@ let handle maxDop (stream, span) = async { }) let! added = Async.Parallel(maybeAdd, maxDegreeOfParallelism=maxDop) let outcome = { added = Seq.length added; notReady = results.Length - ready.Length; dups = results.Length - ticketIds.Length } - return Propulsion.Streams.SpanResult.PartiallyProcessed ticketIds.Length, outcome + return struct (Propulsion.Streams.SpanResult.PartiallyProcessed ticketIds.Length, outcome) | x -> return failwithf "Unexpected stream %O" x } diff --git a/feed-consumer/Program.fs b/feed-consumer/Program.fs index e8ee41696..a9a6de6c2 100644 --- a/feed-consumer/Program.fs +++ b/feed-consumer/Program.fs @@ -27,6 +27,7 @@ module Args = | [] Verbose | [] Group of string + | [] SourceId of string | [] BaseUri of string | [] MaxReadAhead of int @@ -38,6 +39,7 @@ module Args = member a.Usage = a |> function | Verbose _ -> "request verbose logging." | Group _ -> "specify Api Consumer Group Id. (optional if environment variable API_CONSUMER_GROUP specified)" + | SourceId _ -> "specify Api SourceId. Default: 'default'" | BaseUri _ -> "specify Api endpoint. (optional if environment variable API_BASE_URI specified)" | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 8." | FcsDop _ -> "maximum number of FCs to process in parallel. Default: 4" @@ -45,7 +47,8 @@ module Args = | Cosmos _ -> "Cosmos Store parameters." and Arguments(c : Configuration, a : ParseResults) = member val Verbose = a.Contains Parameters.Verbose - member val SourceId = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) |> Propulsion.Feed.SourceId.parse + member val GroupId = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) + member val SourceId = a.GetResult(SourceId,"default") |> Propulsion.Feed.SourceId.parse member val BaseUri = a.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri member val MaxReadAhead = a.GetResult(MaxReadAhead,8) member val FcsDop = a.TryGetResult FcsDop |> Option.defaultValue 4 @@ -104,14 +107,14 @@ let build (args : Args.Arguments) = let sink = let handle = Ingester.handle args.TicketsDop let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - Propulsion.Streams.StreamsProjector.Start(Log.Logger, args.MaxReadAhead, args.FcsDop, handle, stats, args.StatsInterval) + Propulsion.Streams.Default.Config.Start(Log.Logger, args.MaxReadAhead, args.FcsDop, handle, stats, args.StatsInterval) let pumpSource = - let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Config.log (context, cache) + let checkpoints = Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Config.log (args.GroupId, args.CheckpointInterval) (context, cache) let feed = ApiClient.TicketsFeed args.BaseUri let source = Propulsion.Feed.FeedSource( Log.Logger, args.StatsInterval, args.SourceId, args.TailSleepInterval, - checkpoints, args.CheckpointInterval, feed.Poll, sink) + checkpoints, sink, feed.Poll) source.Pump feed.ReadTranches sink, pumpSource From 337ef2c02c82ccffada3c1c9ceb507b74759bae7 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 00:23:10 +0100 Subject: [PATCH 10/43] summaryConsumer --- propulsion-summary-consumer/Config.fs | 10 +++++----- propulsion-summary-consumer/Infrastructure.fs | 9 ++++----- propulsion-summary-consumer/Ingester.fs | 14 ++++++++------ propulsion-summary-consumer/SummaryConsumer.fsproj | 12 ++++++------ propulsion-summary-consumer/TodoSummary.fs | 13 +++++-------- 5 files changed, 28 insertions(+), 30 deletions(-) diff --git a/propulsion-summary-consumer/Config.fs b/propulsion-summary-consumer/Config.fs index 55228b318..f46a7882e 100644 --- a/propulsion-summary-consumer/Config.fs +++ b/propulsion-summary-consumer/Config.fs @@ -1,20 +1,20 @@ module ConsumerTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let createDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) let private withUpconverter<'c, 'e when 'c :> TypeShape.UnionContract.IUnionContract> up : FsCodec.IEventCodec<'e, _, _> = let down (_ : 'e) = failwith "Unexpected" - Codec.Create<'e, 'c, _>(up, down, options = defaultOptions).ToByteArrayCodec() + Codec.Create<'e, 'c, _>(up, down, options = defaultOptions) let withIndex<'c when 'c :> TypeShape.UnionContract.IUnionContract> : FsCodec.IEventCodec = - let up (raw : FsCodec.ITimelineEvent<_>, e) = raw.Index, e + let up struct (raw : FsCodec.ITimelineEvent<_>, e) = raw.Index, e withUpconverter<'c, int64 * 'c> up module Cosmos = diff --git a/propulsion-summary-consumer/Infrastructure.fs b/propulsion-summary-consumer/Infrastructure.fs index ad08d2aaf..ae54fe291 100644 --- a/propulsion-summary-consumer/Infrastructure.fs +++ b/propulsion-summary-consumer/Infrastructure.fs @@ -24,13 +24,13 @@ module EnvVar = module EventCodec = /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>)streamName (x : FsCodec.ITimelineEvent) = + let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = match codec.TryDecode x with - | None -> + | ValueNone -> if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(x.Data), true) + Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - None + ValueNone | x -> x type Equinox.CosmosStore.CosmosStoreConnector with @@ -61,7 +61,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c |> fun c -> let theme = Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code diff --git a/propulsion-summary-consumer/Ingester.fs b/propulsion-summary-consumer/Ingester.fs index ae15f479e..67d77b0af 100644 --- a/propulsion-summary-consumer/Ingester.fs +++ b/propulsion-summary-consumer/Ingester.fs @@ -2,6 +2,8 @@ /// Due to this, we should ensure that writes only happen where the update is not redundant and/or a replay of a previous message module ConsumerTemplate.Ingester +open Propulsion.Internal + /// Defines the contract we share with the proReactor --'s published feed module Contract = @@ -19,8 +21,8 @@ module Contract = type VersionAndMessage = int64*Message // We also want the index (which is the Version of the Summary) whenever we're handling an event let private codec : FsCodec.IEventCodec = Config.EventCodec.withIndex - let (|DecodeNewest|_|) (stream, span : Propulsion.Streams.StreamSpan<_>) : VersionAndMessage option = - span.events |> Seq.rev |> Seq.tryPick (EventCodec.tryDecode codec stream) + let [] (|DecodeNewest|_|) (stream, span : Propulsion.Streams.StreamSpan<_>) : VersionAndMessage voption = + span |> Seq.rev |> Seq.tryPickV (EventCodec.tryDecode codec stream) let (|StreamName|_|) = function | FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> Some clientId | _ -> None @@ -64,10 +66,10 @@ let map : Contract.Message -> TodoSummary.Events.SummaryData = function { id = x.id; order = x.order; title = x.title; completed = x.completed } |]} /// Ingest queued events per client - each time we handle all the incoming updates for a given stream as a single act -let ingest (service : TodoSummary.Service) (stream, span : Propulsion.Streams.StreamSpan<_>) = async { +let ingest (service : TodoSummary.Service) struct (stream, span : Propulsion.Streams.StreamSpan<_>) = async { match stream, span with | Contract.MatchNewest (clientId, version, update) -> match! service.TryIngest(clientId, version, map update) with - | true -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (1, span.events.Length - 1) - | false -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped span.events.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.events.Length } + | true -> return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (1, span.Length - 1)) + | false -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped span.Length + | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } diff --git a/propulsion-summary-consumer/SummaryConsumer.fsproj b/propulsion-summary-consumer/SummaryConsumer.fsproj index 57c6e7ccf..cf01e061d 100644 --- a/propulsion-summary-consumer/SummaryConsumer.fsproj +++ b/propulsion-summary-consumer/SummaryConsumer.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -17,11 +17,11 @@ - - - - - + + + + + diff --git a/propulsion-summary-consumer/TodoSummary.fs b/propulsion-summary-consumer/TodoSummary.fs index 46379c206..228919a57 100644 --- a/propulsion-summary-consumer/TodoSummary.fs +++ b/propulsion-summary-consumer/TodoSummary.fs @@ -1,7 +1,7 @@ module ConsumerTemplate.TodoSummary let [] Category = "TodoSummary" -let streamName (clientId: ClientId) = FsCodec.StreamName.create Category (ClientId.toString clientId) +let streamName (clientId: ClientId) = struct (Category, ClientId.toString clientId) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -12,7 +12,7 @@ module Events = type Event = | Ingested of IngestedData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec = Config.EventCodec.gen module Fold = @@ -50,9 +50,6 @@ type Service internal (resolve : ClientId -> Equinox.Decider - let cat = Config.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) - cat.Resolve - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) + let create (Category cat) = Service(streamName >> Config.createDecider cat) From 98f6daa24c7d6a9a4d1bd5227bdf3bbd94c2f631 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 00:35:16 +0100 Subject: [PATCH 11/43] trackingConsumer --- propulsion-tracking-consumer/Config.fs | 2 +- propulsion-tracking-consumer/Infrastructure.fs | 13 ++++++------- propulsion-tracking-consumer/Ingester.fs | 10 +++++----- propulsion-tracking-consumer/SkuSummary.fs | 13 +++++-------- .../TrackingConsumer.fsproj | 12 ++++++------ 5 files changed, 23 insertions(+), 27 deletions(-) diff --git a/propulsion-tracking-consumer/Config.fs b/propulsion-tracking-consumer/Config.fs index cf593fc82..3e1129098 100644 --- a/propulsion-tracking-consumer/Config.fs +++ b/propulsion-tracking-consumer/Config.fs @@ -1,7 +1,7 @@ module ConsumerTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let createDecider cat = Equinox.Decider.resolve log cat module Cosmos = diff --git a/propulsion-tracking-consumer/Infrastructure.fs b/propulsion-tracking-consumer/Infrastructure.fs index 9fa5fe0df..f70bdd5fd 100644 --- a/propulsion-tracking-consumer/Infrastructure.fs +++ b/propulsion-tracking-consumer/Infrastructure.fs @@ -20,20 +20,20 @@ module EnvVar = module EventCodec = /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = + let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = match codec.TryDecode x with - | None -> + | ValueNone -> if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(x.Data), true) + Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - None + ValueNone | x -> x open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) type Equinox.CosmosStore.CosmosStoreConnector with @@ -63,7 +63,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c |> fun c -> let theme = Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code diff --git a/propulsion-tracking-consumer/Ingester.fs b/propulsion-tracking-consumer/Ingester.fs index 81f9344bd..49bbc7b92 100644 --- a/propulsion-tracking-consumer/Ingester.fs +++ b/propulsion-tracking-consumer/Ingester.fs @@ -13,9 +13,9 @@ module Contract = pickTicketId : string purchaseOrderInfo : OrderInfo[] } let serdes = FsCodec.SystemTextJson.Options.Create() |> FsCodec.SystemTextJson.Serdes - let parse (utf8 : byte[]) : Message = + let parse (utf8 : Propulsion.Streams.Default.EventBody) : Message = // NB see https://github.com/jet/FsCodec for details of the default serialization profile (TL;DR only has an `OptionConverter`) - System.Text.Encoding.UTF8.GetString(utf8) + System.Text.Encoding.UTF8.GetString(utf8.Span) |> serdes.Deserialize type Outcome = Completed of used : int * unused : int @@ -40,9 +40,9 @@ type Stats(log, statsInterval, stateInterval) = /// Ingest queued events per sku - each time we handle all the incoming updates for a given stream as a single act let ingest (service : SkuSummary.Service) - (FsCodec.StreamName.CategoryAndId (_, SkuId.Parse skuId), span : Propulsion.Streams.StreamSpan<_>) = async { + struct (FsCodec.StreamName.CategoryAndId (_, SkuId.Parse skuId), span : Propulsion.Streams.StreamSpan<_>) = async { let items = - [ for e in span.events do + [ for e in span do let x = Contract.parse e.Data for o in x.purchaseOrderInfo do let x : SkuSummary.Events.ItemData = @@ -53,4 +53,4 @@ let ingest reservedQuantity = o.reservedUnitQuantity } yield x ] let! used = service.Ingest(skuId, items) - return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Completed(used, items.Length - used) } + return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Completed(used, items.Length - used)) } diff --git a/propulsion-tracking-consumer/SkuSummary.fs b/propulsion-tracking-consumer/SkuSummary.fs index bb014f527..20c349c37 100644 --- a/propulsion-tracking-consumer/SkuSummary.fs +++ b/propulsion-tracking-consumer/SkuSummary.fs @@ -1,7 +1,7 @@ module ConsumerTemplate.SkuSummary let [] Category = "SkuSummary" -let streamName (id : SkuId) = FsCodec.StreamName.create Category (SkuId.toString id) +let streamName (id : SkuId) = struct (Category, SkuId.toString id) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -16,7 +16,7 @@ module Events = | Ingested of ItemData | Snapshotted of ItemData[] interface TypeShape.UnionContract.IUnionContract - let codec = EventCodec.create() + let codec = EventCodec.gen module Fold = @@ -61,9 +61,6 @@ type Service internal (resolve : SkuId -> Equinox.Decider - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create (Category cat) = Service(streamName >> Config.createDecider cat) diff --git a/propulsion-tracking-consumer/TrackingConsumer.fsproj b/propulsion-tracking-consumer/TrackingConsumer.fsproj index 641f8bd05..f3af02cf1 100644 --- a/propulsion-tracking-consumer/TrackingConsumer.fsproj +++ b/propulsion-tracking-consumer/TrackingConsumer.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -17,11 +17,11 @@ - - - - - + + + + + From aac71bc94b0770e01511ba34e963f6b53fd08d9e Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 08:16:19 +0100 Subject: [PATCH 12/43] proProjector --- propulsion-projector/Handler.fs | 14 ++++++++----- propulsion-projector/Infrastructure.fs | 1 - propulsion-projector/Program.fs | 27 +++++++++----------------- propulsion-projector/Projector.fsproj | 20 +++++++++---------- 4 files changed, 28 insertions(+), 34 deletions(-) diff --git a/propulsion-projector/Handler.fs b/propulsion-projector/Handler.fs index 236f393b2..f21ecdb25 100644 --- a/propulsion-projector/Handler.fs +++ b/propulsion-projector/Handler.fs @@ -30,9 +30,9 @@ let mapToStreamItems docs : Propulsion.Streams.StreamEvent seq = //let hackDropBigBodies (e : Propulsion.Streams.StreamEvent<_>) : Propulsion.Streams.StreamEvent<_> = // { stream = e.stream; event = replaceLongDataWithNull e.event } -let mapToStreamItems docs : Propulsion.Streams.StreamEvent<_> seq = +let mapToStreamItems categoryFilter docs : Propulsion.Streams.StreamEvent<_> seq = docs - |> Seq.collect Propulsion.CosmosStore.EquinoxNewtonsoftParser.enumStreamEvents + |> Seq.collect (Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter) // TODO use Seq.filter and/or Seq.map to adjust what's being sent etc // |> Seq.map hackDropBigBodies #endif // cosmos && !parallelOnly && synthesizeSequence @@ -104,9 +104,13 @@ type Stats(log, statsInterval, stateInterval) = log.Information(" Total events processed {total}", totalCount) totalCount <- 0 -let handle (_stream, span: Propulsion.Streams.StreamSpan<_>) = async { +let categoryFilter = function + | "categoryA" + | _ -> true + +let handle struct (_stream, span: Propulsion.Streams.StreamSpan<_>) = async { let r = System.Random() - let ms = r.Next(1, span.events.Length) + let ms = r.Next(1, span.Length) do! Async.Sleep ms - return Propulsion.Streams.SpanResult.AllProcessed, span.events.Length } + return struct (Propulsion.Streams.SpanResult.AllProcessed, span.Length) } #endif // !kafka diff --git a/propulsion-projector/Infrastructure.fs b/propulsion-projector/Infrastructure.fs index 9be4bcca5..7499a9c20 100644 --- a/propulsion-projector/Infrastructure.fs +++ b/propulsion-projector/Infrastructure.fs @@ -49,7 +49,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c |> fun c -> let t = "[{Timestamp:HH:mm:ss} {Level:u3}] {Message:lj} {NewLine}{Exception}" diff --git a/propulsion-projector/Program.fs b/propulsion-projector/Program.fs index f626b38f6..b0b173cc8 100644 --- a/propulsion-projector/Program.fs +++ b/propulsion-projector/Program.fs @@ -345,20 +345,12 @@ module Args = //#if esdb module Checkpoints = - open Equinox.CosmosStore - open Propulsion.EventStore - // In this implementation, we keep the checkpoints in Cosmos when consuming from EventStore module Cosmos = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - let access = AccessStrategy.Custom (Checkpoint.Fold.isOrigin, Checkpoint.Fold.transmute) let create groupName (context, cache) = - let caching = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - let cat = CosmosStoreCategory(context, codec, Checkpoint.Fold.fold, Checkpoint.Fold.initial, caching, access) - let resolve streamName = cat.Resolve(streamName, Equinox.AllowStale) - Checkpoint.CheckpointSeries(groupName, resolve) - + Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Log.Logger groupName (context, cache) + //#endif // esdb let [] AppName = "ProjectorTemplate" @@ -418,7 +410,9 @@ let build (args : Args.Arguments) = let monitored = srcSql.Connect() let connectionString = srcSql.BuildCheckpointsConnectionString() - let checkpoints = Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString) + let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint; relevant for Cosmos + let groupName = "default" + let checkpoints = Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString, groupName, checkpointEventInterval) #if kafka // sss && kafka let broker, topic = args.Target.BuildTargetParams() @@ -427,18 +421,15 @@ let build (args : Args.Arguments) = let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.render, producer, stats, args.StatsInterval) #else // sss && !kafka let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Streams.StreamsProjector.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.handle, stats, args.StatsInterval) + let sink = Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.handle, stats, args.StatsInterval) #endif // sss && !kafka let pumpSource = - let sourceId = Propulsion.Feed.SourceId.parse "default" // (was hardwired as '$all' prior to v 2.12.0) - let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint; relevant for Cosmos let source = Propulsion.SqlStreamStore.SqlStreamStoreSource ( Log.Logger, args.StatsInterval, - sourceId, srcSql.MaxBatchSize, srcSql.TailInterval, - checkpoints, checkpointEventInterval, - monitored, sink) - source.Pump(args.ProcessorName) + monitored, srcSql.MaxBatchSize, srcSql.TailInterval, + checkpoints, sink, Handler.categoryFilter, hydrateBodies = true) + source.Pump [ pumpSource; sink.AwaitWithStopOnCancellation() ] //#endif // sss diff --git a/propulsion-projector/Projector.fsproj b/propulsion-projector/Projector.fsproj index 9d905a8f9..3d39d28a0 100644 --- a/propulsion-projector/Projector.fsproj +++ b/propulsion-projector/Projector.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -15,25 +15,25 @@ - - + - + - + - + - - + + - + + - + From 77baf6bf743535729fe5e851e409939841fa1d5a Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 10:16:41 +0100 Subject: [PATCH 13/43] proArchiver --- propulsion-archiver/Archiver.fsproj | 9 +++--- propulsion-archiver/Handler.fs | 12 ++++---- propulsion-archiver/Infrastructure.fs | 1 - propulsion-archiver/Program.fs | 40 +++++++++++++-------------- 4 files changed, 30 insertions(+), 32 deletions(-) diff --git a/propulsion-archiver/Archiver.fsproj b/propulsion-archiver/Archiver.fsproj index 45caf5a1a..41b3691b3 100644 --- a/propulsion-archiver/Archiver.fsproj +++ b/propulsion-archiver/Archiver.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -14,12 +14,11 @@ - - - + + - + diff --git a/propulsion-archiver/Handler.fs b/propulsion-archiver/Handler.fs index 5da7c317e..7bfe56c93 100644 --- a/propulsion-archiver/Handler.fs +++ b/propulsion-archiver/Handler.fs @@ -7,6 +7,10 @@ type Stats(log, statsInterval, stateInterval) = override _.HandleExn(log, exn) = log.Information(exn, "Unhandled") +let categoryFilter = function + | "CategoryName" -> true + | _ -> false + let (|Archivable|NotArchivable|) = function // TODO define Categories that should be copied to the secondary Container | "CategoryName" -> @@ -14,11 +18,9 @@ let (|Archivable|NotArchivable|) = function | _ -> NotArchivable -let selectArchivable (changeFeedDocument : Newtonsoft.Json.Linq.JObject) : Propulsion.Streams.StreamEvent<_> seq = seq { - let s = changeFeedDocument.GetValue("p") |> string - if s.StartsWith("events-") then () else - for batch in Propulsion.CosmosStore.EquinoxNewtonsoftParser.enumStreamEvents changeFeedDocument do - let (FsCodec.StreamName.CategoryAndId (cat,_)) = batch.stream +let selectArchivable changeFeedDocument: Propulsion.Streams.StreamEvent<_> seq = seq { + for struct (s, _e) as batch in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter changeFeedDocument do + let (FsCodec.StreamName.Category cat) = s match cat with | Archivable -> yield batch | NotArchivable -> () diff --git a/propulsion-archiver/Infrastructure.fs b/propulsion-archiver/Infrastructure.fs index 0908c50f0..4740bdbab 100644 --- a/propulsion-archiver/Infrastructure.fs +++ b/propulsion-archiver/Infrastructure.fs @@ -48,7 +48,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-archiver/Program.fs b/propulsion-archiver/Program.fs index cc0b6f56f..807874dc7 100644 --- a/propulsion-archiver/Program.fs +++ b/propulsion-archiver/Program.fs @@ -5,13 +5,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -53,9 +51,9 @@ module Args = member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val Source : CosmosSourceArguments = - match a.TryGetSubCommand() with - | Some (SrcCosmos cosmos) -> CosmosSourceArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos for SrcCosmos") + match a.GetSubCommand() with + | SrcCosmos cosmos -> CosmosSourceArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos for SrcCosmos" member x.DestinationArchive = x.Source.Archive member x.MonitoringParams() = let srcC = x.Source @@ -64,7 +62,7 @@ module Args = match srcC.LeaseContainer, dstC.LeaseContainerId with | _, None -> srcC.ConnectLeases() | None, Some dc -> dstC.ConnectLeases dc - | Some _, Some _ -> raise (MissingArg "LeaseContainerSource and LeaseContainerDestination are mutually exclusive - can only store in one database") + | Some _, Some _ -> missingArg "LeaseContainerSource and LeaseContainerDestination are mutually exclusive - can only store in one database" Log.Information("Archiving... {dop} writers, max {maxReadAhead} batches read ahead, max write batch {maxKib} KiB", x.MaxWriters, x.MaxReadAhead, x.MaxBytes / 1024) Log.Information("ChangeFeed {processorName} Leases Database {db} Container {container}. MaxItems limited to {maxItems}", x.ProcessorName, leases.Database.Id, leases.Id, Option.toNullable srcC.MaxItems) @@ -127,9 +125,9 @@ module Args = | Some sc -> x.ConnectLeases(sc) member val Archive = - match a.TryGetSubCommand() with - | Some (DstCosmos cosmos) -> CosmosSinkArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos for Sink") + match a.GetSubCommand() with + | DstCosmos cosmos -> CosmosSinkArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos for Sink" and [] CosmosSinkParameters = | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Connection of string @@ -183,11 +181,13 @@ let build (args : Args.Arguments, log) = let archiverSink = let context = args.DestinationArchive.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Config.log) - CosmosStoreSink.Start(log, args.MaxReadAhead, eventsContext, args.MaxWriters, args.StatsInterval, args.StateInterval, (*purgeInterval=TimeSpan.FromMinutes 10.,*) maxBytes = args.MaxBytes) + CosmosStoreSink.Start(log, args.MaxReadAhead, eventsContext, args.MaxWriters, args.StatsInterval, args.StateInterval, + purgeInterval=TimeSpan.FromMinutes 10., maxBytes = args.MaxBytes) let source = let observer = CosmosStoreSource.CreateObserver(log, archiverSink.StartIngester, Seq.collect Handler.selectArchivable) let monitored, leases, processorName, startFromTail, maxItems, lagFrequency = args.MonitoringParams() - CosmosStoreSource.Start(log, monitored, leases, processorName, observer, startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) + CosmosStoreSource.Start(log, monitored, leases, processorName, observer, + startFromTail = startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) archiverSink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly @@ -197,25 +197,23 @@ let startMetricsServer port : IDisposable = Log.Information("Prometheus /metrics endpoint on port {port}", port) { new IDisposable with member x.Dispose() = ms.Stop(); (metricsServer :> IDisposable).Dispose() } -open Propulsion.CosmosStore.Infrastructure // AwaitKeyboardInterruptAsTaskCancelledException +open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException let run (args : Args.Arguments) = async { let log = (Log.forGroup args.ProcessorName).ForContext() let sink, source = build (args, log) use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj - return! Async.Parallel [ - Async.AwaitKeyboardInterruptAsTaskCancelledException() - source.AwaitWithStopOnCancellation() - sink.AwaitWithStopOnCancellation() ] - |> Async.Ignore -} + return! [| Async.AwaitKeyboardInterruptAsTaskCanceledException() + source.AwaitWithStopOnCancellation() + sink.AwaitWithStopOnCancellation() + |] |> Async.Parallel |> Async.Ignore } [] let main argv = try let args = Args.parse EnvVar.tryGet argv try Log.Logger <- LoggerConfiguration().Configure(AppName, args.Verbose, args.SyncLogging).CreateLogger() try run args |> Async.RunSynchronously; 0 - with e when not (e :? MissingArg) -> Log.Fatal(e, "Exiting"); 2 + with e when not (e :? MissingArg) && not (e :? System.Threading.Tasks.TaskCanceledException) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() with MissingArg msg -> eprintfn "%s" msg; 1 | :? Argu.ArguParseException as e -> eprintfn "%s" e.Message; 1 From 9f55c24f86e4ceb95feb04f5b36019d802912de2 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:29:55 +0100 Subject: [PATCH 14/43] proReactorCosmos --- propulsion-cosmos-reactor/Config.fs | 8 +++--- propulsion-cosmos-reactor/Infrastructure.fs | 9 +++---- propulsion-cosmos-reactor/Program.fs | 25 ++++++++++--------- propulsion-cosmos-reactor/Reactor.fs | 10 +++++--- propulsion-cosmos-reactor/Reactor.fsproj | 11 ++++----- propulsion-cosmos-reactor/Todo.fs | 27 +++++++++++---------- propulsion-cosmos-reactor/TodoSummary.fs | 13 ++++------ 7 files changed, 52 insertions(+), 51 deletions(-) diff --git a/propulsion-cosmos-reactor/Config.fs b/propulsion-cosmos-reactor/Config.fs index a83e82548..8f4a9f1bf 100644 --- a/propulsion-cosmos-reactor/Config.fs +++ b/propulsion-cosmos-reactor/Config.fs @@ -1,15 +1,17 @@ module ReactorTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let createDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + Codec.Create<'t>(options = defaultOptions) + let genJe<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) module Cosmos = diff --git a/propulsion-cosmos-reactor/Infrastructure.fs b/propulsion-cosmos-reactor/Infrastructure.fs index 1785c36cb..f1ee2152b 100644 --- a/propulsion-cosmos-reactor/Infrastructure.fs +++ b/propulsion-cosmos-reactor/Infrastructure.fs @@ -23,13 +23,13 @@ module EnvVar = module EventCodec = /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = + let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = match codec.TryDecode x with - | None -> + | ValueNone -> if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(x.Data), true) + Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - None + ValueNone | x -> x module Log = @@ -100,7 +100,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-cosmos-reactor/Program.fs b/propulsion-cosmos-reactor/Program.fs index 01da30361..c42b0fa43 100644 --- a/propulsion-cosmos-reactor/Program.fs +++ b/propulsion-cosmos-reactor/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -46,7 +44,7 @@ module Args = (x.ProcessorName, maxReadAhead, maxConcurrentProcessors) member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. - member val Cosmos = CosmosArguments (c, a.GetResult Cosmos) + member val Cosmos = CosmosArguments(c, a.GetResult Cosmos) and [] CosmosParameters = | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Connection of string @@ -118,12 +116,13 @@ let build (args : Args.Arguments) = Config.Store.Cosmos (context, cache) let stats = Reactor.Stats(Log.Logger, args.StatsInterval, args.StateInterval) let handle = Reactor.Config.createHandler store - Propulsion.Streams.StreamsProjector.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval) + Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval) let source = - let parseFeedDoc : _ -> Propulsion.Streams.StreamEvent<_> seq = Seq.collect Propulsion.CosmosStore.EquinoxNewtonsoftParser.enumStreamEvents + let parseFeedDoc : _ -> Propulsion.Streams.StreamEvent<_> seq = Seq.collect (Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents Reactor.categoryFilter) let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, parseFeedDoc) let leases, startFromTail, maxItems, lagFrequency = args.Cosmos.MonitoringParams() - Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitored, leases, processorName, observer, startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) + Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitored, leases, processorName, observer, + startFromTail = startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) sink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly @@ -133,13 +132,15 @@ let startMetricsServer port : IDisposable = Log.Information("Prometheus /metrics endpoint on port {port}", port) { new IDisposable with member x.Dispose() = ms.Stop(); (metricsServer :> IDisposable).Dispose() } -open Propulsion.CosmosStore.Infrastructure // AwaitKeyboardInterruptAsTaskCancelledException +open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException let run args = async { let sink, source = build args use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj - return! Async.Parallel [ Async.AwaitKeyboardInterruptAsTaskCancelledException(); source.AwaitWithStopOnCancellation(); sink.AwaitWithStopOnCancellation() ] |> Async.Ignore -} + return! [| Async.AwaitKeyboardInterruptAsTaskCanceledException() + source.AwaitWithStopOnCancellation() + sink.AwaitWithStopOnCancellation() + |] |> Async.Parallel |> Async.Ignore } [] let main argv = @@ -147,7 +148,7 @@ let main argv = try let metrics = Sinks.equinoxAndPropulsionCosmosConsumerMetrics (Sinks.tags AppName) args.ProcessorName Log.Logger <- LoggerConfiguration().Configure(args.Verbose).Sinks(metrics, args.Cosmos.Verbose).CreateLogger() try run args |> Async.RunSynchronously; 0 - with e when not (e :? MissingArg) -> Log.Fatal(e, "Exiting"); 2 + with e when not (e :? MissingArg) && not (e :? System.Threading.Tasks.TaskCanceledException) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() with MissingArg msg -> eprintfn "%s" msg; 1 | :? Argu.ArguParseException as e -> eprintfn "%s" e.Message; 1 diff --git a/propulsion-cosmos-reactor/Reactor.fs b/propulsion-cosmos-reactor/Reactor.fs index d7be93e21..a1078ff5f 100644 --- a/propulsion-cosmos-reactor/Reactor.fs +++ b/propulsion-cosmos-reactor/Reactor.fs @@ -29,17 +29,19 @@ let toSummaryEventData ( x : Contract.SummaryInfo) : TodoSummary.Events.SummaryD [| for x in x.items -> { id = x.id; order = x.order; title = x.title; completed = x.completed } |] } +let categoryFilter = Todo.Reactions.categoryFilter + let handle (sourceService : Todo.Service) (summaryService : TodoSummary.Service) - (stream, span : Propulsion.Streams.StreamSpan<_>) = async { + struct (stream, span : Propulsion.Streams.StreamSpan<_>) = async { match stream, span with | Todo.Reactions.Parse (clientId, events) when events |> Seq.exists Todo.Reactions.impliesStateChange -> let! version', summary = sourceService.QueryWithVersion(clientId, Contract.ofState) match! summaryService.TryIngest(clientId, version', toSummaryEventData summary) with - | true -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, span.events.Length - 1) - | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.events.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.events.Length } + | true -> return struct (Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, span.Length - 1)) + | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.Length + | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } module Config = diff --git a/propulsion-cosmos-reactor/Reactor.fsproj b/propulsion-cosmos-reactor/Reactor.fsproj index 1953bb70a..a6c4ab687 100644 --- a/propulsion-cosmos-reactor/Reactor.fsproj +++ b/propulsion-cosmos-reactor/Reactor.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -20,12 +20,11 @@ - - - + + - + - + diff --git a/propulsion-cosmos-reactor/Todo.fs b/propulsion-cosmos-reactor/Todo.fs index 5a55831e7..c9b2b5785 100644 --- a/propulsion-cosmos-reactor/Todo.fs +++ b/propulsion-cosmos-reactor/Todo.fs @@ -1,8 +1,10 @@ module ReactorTemplate.Todo +open Propulsion.Internal + let [] Category = "Todos" -let streamName (clientId: ClientId) = FsCodec.StreamName.create Category (ClientId.toString clientId) -let (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> Some clientId | _ -> None +let streamName (clientId : ClientId) = struct (Category, ClientId.toString clientId) +let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId | _ -> ValueNone // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -18,15 +20,16 @@ module Events = | Cleared of ClearedData | Snapshotted of SnapshotData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe module Reactions = + let categoryFilter = function Category -> true | _ -> false let (|Decode|) (stream, span : Propulsion.Streams.StreamSpan<_>) = - span.events |> Array.choose (EventCodec.tryDecode Events.codec stream) - let (|Parse|_|) = function - | (StreamName clientId, _) & Decode events -> Some (clientId, events) - | _ -> None + span |> Array.chooseV (EventCodec.tryDecode Events.codec stream) + let [] (|Parse|_|) = function + | (StreamName clientId, _) & Decode events -> ValueSome struct (clientId, events) + | _ -> ValueNone /// Allows us to skip producing summaries for events that we know won't result in an externally discernable change to the summary output let impliesStateChange = function Events.Snapshotted _ -> false | _ -> true @@ -62,9 +65,7 @@ type Service internal (resolve : ClientId -> Equinox.Decider - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + let create (Category cat) = streamName >> Config.createDecider cat |> Service + diff --git a/propulsion-cosmos-reactor/TodoSummary.fs b/propulsion-cosmos-reactor/TodoSummary.fs index 3cf34bd6c..311aaf18a 100644 --- a/propulsion-cosmos-reactor/TodoSummary.fs +++ b/propulsion-cosmos-reactor/TodoSummary.fs @@ -1,7 +1,7 @@ module ReactorTemplate.TodoSummary let [] Category = "TodoSummary" -let streamName (clientId: ClientId) = FsCodec.StreamName.create Category (ClientId.toString clientId) +let streamName (clientId : ClientId) = struct (Category, ClientId.toString clientId) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -12,7 +12,7 @@ module Events = type Event = | Ingested of IngestedData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe module Fold = @@ -51,9 +51,6 @@ type Service internal (resolve : ClientId -> Equinox.Decider - let cat = Config.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) - cat.Resolve - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createRollingState Events.codecJe Fold.initial Fold.fold Fold.toSnapshot (context, cache) + let create (Category cat) = streamName >> Config.createDecider cat |> Service From 20f4b3494ae443adaa4bd2d99df5469cee1a7aa2 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:30:58 +0100 Subject: [PATCH 15/43] f periodicIngester --- periodic-ingester/Ingester.fs | 6 +++--- periodic-ingester/Program.fs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/periodic-ingester/Ingester.fs b/periodic-ingester/Ingester.fs index ec117f84e..5d7d98eec 100644 --- a/periodic-ingester/Ingester.fs +++ b/periodic-ingester/Ingester.fs @@ -33,9 +33,9 @@ module PipelineEvent = let [] Category = "Ticket" let streamName = TicketId.toString >> FsCodec.StreamName.create Category - let (|StreamName|_|) = function - | FsCodec.StreamName.CategoryAndId (Category, TicketId.Parse id) -> Some id - | _ -> None + let [] (|StreamName|_|) = function + | FsCodec.StreamName.CategoryAndId (Category, TicketId.Parse id) -> ValueSome id + | _ -> ValueNone (* Each item per stream is represented as an event; if multiple events have been found for a given stream, they are delivered together *) diff --git a/periodic-ingester/Program.fs b/periodic-ingester/Program.fs index b052fb91b..2485a76f9 100644 --- a/periodic-ingester/Program.fs +++ b/periodic-ingester/Program.fs @@ -4,7 +4,7 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message -let missingArg message = raise (MissingArg message) +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = From d66f5463bca84bd30c11618c68a50dfd00b8e120 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:31:23 +0100 Subject: [PATCH 16/43] f feedApi --- feed-source/FeedApi/FeedApi.fsproj | 1 - feed-source/FeedApi/Infrastructure.fs | 1 - feed-source/FeedApi/Program.fs | 12 +++++------- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/feed-source/FeedApi/FeedApi.fsproj b/feed-source/FeedApi/FeedApi.fsproj index 11b262286..ffa5c8aca 100644 --- a/feed-source/FeedApi/FeedApi.fsproj +++ b/feed-source/FeedApi/FeedApi.fsproj @@ -19,7 +19,6 @@ - diff --git a/feed-source/FeedApi/Infrastructure.fs b/feed-source/FeedApi/Infrastructure.fs index 794a16497..13270c9f3 100644 --- a/feed-source/FeedApi/Infrastructure.fs +++ b/feed-source/FeedApi/Infrastructure.fs @@ -48,7 +48,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() .MinimumLevel.Override("Microsoft.AspNetCore", Serilog.Events.LogEventLevel.Warning) |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/feed-source/FeedApi/Program.fs b/feed-source/FeedApi/Program.fs index f372cacf1..496647583 100644 --- a/feed-source/FeedApi/Program.fs +++ b/feed-source/FeedApi/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -31,9 +29,9 @@ module Args = and Arguments(config : Configuration, a : ParseResults) = member val Verbose = a.Contains Parameters.Verbose member val Cosmos : CosmosArguments = - match a.TryGetSubCommand() with - | Some (Parameters.Cosmos cosmos) -> CosmosArguments(config, cosmos) - | _ -> raise (MissingArg "Must specify cosmos") + match a.GetSubCommand() with + | Parameters.Cosmos cosmos -> CosmosArguments(config, cosmos) + | _ -> missingArg "Must specify cosmos" and [] CosmosParameters = | [] Verbose | [] Connection of string From fe5a01cb9ca823043e5f273c291630af19a194bc Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:31:41 +0100 Subject: [PATCH 17/43] f feedConsumer --- feed-consumer/Program.fs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/feed-consumer/Program.fs b/feed-consumer/Program.fs index a9a6de6c2..6b39e047e 100644 --- a/feed-consumer/Program.fs +++ b/feed-consumer/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -58,9 +56,9 @@ module Args = member val CheckpointInterval = TimeSpan.FromHours 1. member val TailSleepInterval = TimeSpan.FromSeconds 1. member val Cosmos : CosmosArguments = - match a.TryGetSubCommand() with - | Some (Cosmos cosmos) -> CosmosArguments(c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos") + match a.GetSubCommand() with + | Cosmos cosmos -> CosmosArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos" and [] CosmosParameters = | [] Verbose | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode From 0c80d622e91e8db5fe7b6af94253a73e337854ce Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:32:15 +0100 Subject: [PATCH 18/43] f summaryConsumer --- propulsion-summary-consumer/Ingester.fs | 12 ++++++------ propulsion-summary-consumer/Program.fs | 6 ++---- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/propulsion-summary-consumer/Ingester.fs b/propulsion-summary-consumer/Ingester.fs index 67d77b0af..322c751b5 100644 --- a/propulsion-summary-consumer/Ingester.fs +++ b/propulsion-summary-consumer/Ingester.fs @@ -23,12 +23,12 @@ module Contract = let private codec : FsCodec.IEventCodec = Config.EventCodec.withIndex let [] (|DecodeNewest|_|) (stream, span : Propulsion.Streams.StreamSpan<_>) : VersionAndMessage voption = span |> Seq.rev |> Seq.tryPickV (EventCodec.tryDecode codec stream) - let (|StreamName|_|) = function - | FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> Some clientId - | _ -> None - let (|MatchNewest|_|) = function - | (StreamName clientId, _) & DecodeNewest (version, update) -> Some (clientId, version, update) - | _ -> None + let [] (|StreamName|_|) = function + | FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId + | _ -> ValueNone + let [] (|MatchNewest|_|) = function + | (StreamName clientId, _) & DecodeNewest (version, update) -> ValueSome struct (clientId, version, update) + | _ -> ValueNone [] type Outcome = diff --git a/propulsion-summary-consumer/Program.fs b/propulsion-summary-consumer/Program.fs index 9180e80d5..80d9b85b1 100644 --- a/propulsion-summary-consumer/Program.fs +++ b/propulsion-summary-consumer/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" From a36df1b8ad3a24226799b18bf8869fc326137d35 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:32:33 +0100 Subject: [PATCH 19/43] f trackingConsumer --- propulsion-tracking-consumer/Program.fs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/propulsion-tracking-consumer/Program.fs b/propulsion-tracking-consumer/Program.fs index 351710b79..58d03ad41 100644 --- a/propulsion-tracking-consumer/Program.fs +++ b/propulsion-tracking-consumer/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" From 20a910e881e1b8ae541e56b6b2deb8a9d869249b Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 11:34:10 +0100 Subject: [PATCH 20/43] proPruner --- propulsion-pruner/Handler.fs | 16 ++++++++----- propulsion-pruner/Infrastructure.fs | 1 - propulsion-pruner/Program.fs | 36 ++++++++++++++--------------- propulsion-pruner/Pruner.fsproj | 9 ++++---- 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/propulsion-pruner/Handler.fs b/propulsion-pruner/Handler.fs index 4327bb40f..d6288bacb 100644 --- a/propulsion-pruner/Handler.fs +++ b/propulsion-pruner/Handler.fs @@ -4,8 +4,12 @@ open System // As we're not looking at the bodies of the events in the course of the shouldPrune decision, we remove them // from the Event immediately in order to avoid consuming lots of memory without purpose while they're queued -let removeDataAndMeta (x : FsCodec.ITimelineEvent) : FsCodec.ITimelineEvent<_> = - FsCodec.Core.TimelineEvent.Create(x.Index, x.EventType, null, timestamp=x.Timestamp) +let removeDataAndMeta (x : FsCodec.ITimelineEvent) : FsCodec.ITimelineEvent<_> = + FsCodec.Core.TimelineEvent.Create(x.Index, x.EventType, Unchecked.defaultof, timestamp = x.Timestamp) + +let categoryFilter = function + | "CategoryName" -> true + | _ -> false // We prune events from the Primary Container as we reach the point where there's no benefit to them staying there. e.g. // 1. If a ChangeFeedProcessor (including new ones) needs to be able to walk those events @@ -22,9 +26,9 @@ let shouldPrune category (age : TimeSpan) = // NOTE - DANGEROUS - events submitted to the CosmosPruner get removed from the supplied Context! let selectPrunable changeFeedDocument : Propulsion.Streams.StreamEvent<_> seq = seq { let asOf = DateTimeOffset.UtcNow - for se in Propulsion.CosmosStore.EquinoxNewtonsoftParser.enumStreamEvents changeFeedDocument do - let (FsCodec.StreamName.CategoryAndId (cat,_)) = se.stream - let age = asOf - se.event.Timestamp + for s, e in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter changeFeedDocument do + let (FsCodec.StreamName.Category cat) = s + let age = asOf - e.Timestamp if shouldPrune cat age then - yield { se with event = removeDataAndMeta se.event } + yield s, removeDataAndMeta e } diff --git a/propulsion-pruner/Infrastructure.fs b/propulsion-pruner/Infrastructure.fs index 053273340..8ccc38362 100644 --- a/propulsion-pruner/Infrastructure.fs +++ b/propulsion-pruner/Infrastructure.fs @@ -79,7 +79,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c diff --git a/propulsion-pruner/Program.fs b/propulsion-pruner/Program.fs index d3533a166..6506fc47b 100644 --- a/propulsion-pruner/Program.fs +++ b/propulsion-pruner/Program.fs @@ -5,13 +5,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -45,9 +43,9 @@ module Args = member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val Source : CosmosSourceArguments = - match a.TryGetSubCommand() with - | Some (SrcCosmos cosmos) -> (CosmosSourceArguments (c, cosmos)) - | _ -> raise (MissingArg "Must specify cosmos for Source") + match a.GetSubCommand() with + | SrcCosmos cosmos -> CosmosSourceArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos for Source" member x.DeletionTarget = x.Source.Target member x.MonitoringParams() = let srcC = x.Source @@ -57,7 +55,7 @@ module Args = | None, None -> srcC.ConnectLeases(srcC.ContainerId + "-aux") | Some sc, None -> srcC.ConnectLeases(sc) | None, Some dc -> dstC.ConnectLeases(dc) - | Some _, Some _ -> raise (MissingArg "LeaseContainerSource and LeaseContainerDestination are mutually exclusive - can only store in one database") + | Some _, Some _ -> missingArg "LeaseContainerSource and LeaseContainerDestination are mutually exclusive - can only store in one database" Log.Information("Pruning... {dop} writers, max {maxReadAhead} batches read ahead", x.MaxWriters, x.MaxReadAhead) Log.Information("ChangeFeed {processorName} Leases Database {db} Container {container}. MaxItems limited to {maxItems}", x.ProcessorName, leases.Database.Id, leases.Id, Option.toNullable srcC.MaxItems) @@ -117,9 +115,9 @@ module Args = member x.ConnectLeases containerId = connector.CreateUninitialized(x.DatabaseId, containerId) member val Target = - match a.TryGetSubCommand() with - | Some (DstCosmos cosmos) -> CosmosSinkArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos for Target") + match a.GetSubCommand() with + | DstCosmos cosmos -> CosmosSinkArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos for Target" and [] CosmosSinkParameters = | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Connection of string @@ -169,14 +167,15 @@ let build (args : Args.Arguments, log : ILogger) = let deletingEventsSink = let target = args.DeletionTarget if (target.DatabaseId, target.ContainerId) = (archive.DatabaseId, archive.ContainerId) then - raise (MissingArg "Danger! Can not prune a target based on itself") + missingArg "Danger! Can not prune a target based on itself" let context = target.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let eventsContext = Equinox.CosmosStore.Core.EventsContext(context, Config.log) CosmosStorePruner.Start(Log.Logger, args.MaxReadAhead, eventsContext, args.MaxWriters, args.StatsInterval, args.StateInterval) let source = let observer = CosmosStoreSource.CreateObserver(log.ForContext(), deletingEventsSink.StartIngester, Seq.collect Handler.selectPrunable) let monitored, leases, processorName, startFromTail, maxItems, lagFrequency = args.MonitoringParams() - CosmosStoreSource.Start(log, monitored, leases, processorName, observer, startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) + CosmosStoreSource.Start(log, monitored, leases, processorName, observer, + startFromTail = startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) deletingEventsSink, source // A typical app will likely have health checks etc, implying the wireup would be via `endpoints.MapMetrics()` and thus not use this ugly code directly @@ -186,17 +185,16 @@ let startMetricsServer port : IDisposable = Log.Information("Prometheus /metrics endpoint on port {port}", port) { new IDisposable with member x.Dispose() = ms.Stop(); (metricsServer :> IDisposable).Dispose() } -open Propulsion.CosmosStore.Infrastructure // AwaitKeyboardInterruptAsTaskCancelledException +open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException let run (args : Args.Arguments) = async { let log = (Log.forGroup args.ProcessorName).ForContext() let sink, source = build (args, log) use _metricsServer : IDisposable = args.PrometheusPort |> Option.map startMetricsServer |> Option.toObj - return! Async.Parallel [ - Async.AwaitKeyboardInterruptAsTaskCancelledException() - source.AwaitWithStopOnCancellation() - sink.AwaitWithStopOnCancellation() - ] |> Async.Ignore + return! [| Async.AwaitKeyboardInterruptAsTaskCanceledException() + source.AwaitWithStopOnCancellation() + sink.AwaitWithStopOnCancellation() + |] |> Async.Parallel |> Async.Ignore } [] diff --git a/propulsion-pruner/Pruner.fsproj b/propulsion-pruner/Pruner.fsproj index 9ad1724e3..f37e1fd96 100644 --- a/propulsion-pruner/Pruner.fsproj +++ b/propulsion-pruner/Pruner.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -14,12 +14,11 @@ - - + - + - + From 4a9e4eb56e9325b3689e82e397495e9049113c62 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 12:02:23 +0100 Subject: [PATCH 21/43] proConsumer --- propulsion-consumer/Consumer.fsproj | 2 +- propulsion-consumer/Examples.fs | 39 ++++++++++++++------------- propulsion-consumer/Infrastructure.fs | 5 ++-- propulsion-consumer/Program.fs | 6 ++--- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/propulsion-consumer/Consumer.fsproj b/propulsion-consumer/Consumer.fsproj index d467a944f..630202bc0 100644 --- a/propulsion-consumer/Consumer.fsproj +++ b/propulsion-consumer/Consumer.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 diff --git a/propulsion-consumer/Examples.fs b/propulsion-consumer/Examples.fs index be883c96a..bd1d06bc3 100644 --- a/propulsion-consumer/Examples.fs +++ b/propulsion-consumer/Examples.fs @@ -1,6 +1,7 @@ namespace ConsumerTemplate open FsCodec +open Propulsion.Internal open Serilog open System open System.Collections.Concurrent @@ -10,13 +11,13 @@ open System.Threading module EventCodec = /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) (log : ILogger) streamName (x : FsCodec.ITimelineEvent) = + let tryDecode (codec : IEventCodec<_, _, _>) (log : ILogger) streamName (x : ITimelineEvent) = match codec.TryDecode x with - | None -> + | ValueNone -> if log.IsEnabled Serilog.Events.LogEventLevel.Debug then - log.ForContext("event", System.Text.Encoding.UTF8.GetString(x.Data), true) + log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - None + ValueNone | x -> x /// This more advanced sample shows processing >1 category of events, and maintaining derived state based on it @@ -49,7 +50,7 @@ module MultiStreams = /// Clearing of the list | Cleared interface TypeShape.UnionContract.IUnionContract - let codec = EventCodec.create() + let codec = EventCodec.gen let tryDecode = EventCodec.tryDecode codec // NB - these schemas reflect the actual storage formats and hence need to be versioned with care @@ -64,7 +65,7 @@ module MultiStreams = | Favorited of Favorited | Unfavorited of Unfavorited interface TypeShape.UnionContract.IUnionContract - let codec = EventCodec.create() + let codec = EventCodec.gen let tryDecode = EventCodec.tryDecode codec type Stat = Faves of int | Saves of int | OtherCategory of string * int @@ -77,22 +78,22 @@ module MultiStreams = let faves, saves = ConcurrentDictionary>(), ConcurrentDictionary() // The StreamProjector mechanism trims any events that have already been handled based on the in-memory state - let (|FavoritesEvents|SavedForLaterEvents|OtherCategory|) (streamName, span : Propulsion.Streams.StreamSpan) = - let decode tryDecode = span.events |> Seq.choose (tryDecode log streamName) |> Array.ofSeq + let (|FavoritesEvents|SavedForLaterEvents|OtherCategory|) (streamName, span : Propulsion.Streams.StreamSpan) = + let decode tryDecode = span |> Seq.chooseV (tryDecode log streamName) |> Array.ofSeq match streamName with - | FsCodec.StreamName.CategoryAndId (Favorites.Category, id) -> - let s = match faves.TryGetValue id with true, value -> value | false, _ -> new HashSet() + | StreamName.CategoryAndId (Favorites.Category, id) -> + let s = match faves.TryGetValue id with true, value -> value | false, _ -> HashSet() FavoritesEvents (id, s, decode Favorites.tryDecode) - | FsCodec.StreamName.CategoryAndId (SavedForLater.Category, id) -> + | StreamName.CategoryAndId (SavedForLater.Category, id) -> let s = match saves.TryGetValue id with true, value -> value | false, _ -> [] SavedForLaterEvents (id, s, decode SavedForLater.tryDecode) - | FsCodec.StreamName.CategoryAndId (categoryName, _) -> OtherCategory (categoryName, Seq.length span.events) + | StreamName.CategoryAndId (categoryName, _) -> OtherCategory (categoryName, Seq.length span) // each event is guaranteed to only be supplied once by virtue of having been passed through the Streams Scheduler - member _.Handle(streamName : StreamName, span : Propulsion.Streams.StreamSpan<_>) = async { + member _.Handle(struct (streamName : StreamName, span : Propulsion.Streams.StreamSpan<_>)) = async { match streamName, span with | OtherCategory (cat, count) -> - return Propulsion.Streams.SpanResult.AllProcessed, OtherCategory (cat, count) + return struct (Propulsion.Streams.SpanResult.AllProcessed, OtherCategory (cat, count)) | FavoritesEvents (id, s, xs) -> let folder (s : HashSet<_>) = function | Favorites.Favorited e -> s.Add(e.skuId) |> ignore; s @@ -123,7 +124,7 @@ module MultiStreams = inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) let mutable faves, saves = 0, 0 - let otherCats = Propulsion.Streams.Internal.CatStats() + let otherCats = Stats.CatStats() override _.HandleOk res = res |> function | Faves count -> faves <- faves + count @@ -142,7 +143,7 @@ module MultiStreams = log.Information(" Ignored Categories {ignoredCats}", Seq.truncate 5 otherCats.StatsDescending) otherCats.Clear() - let private parseStreamEvents(res : Confluent.Kafka.ConsumeResult<_, _>) : seq> = + let private parseStreamEvents(res : Confluent.Kafka.ConsumeResult<_, _>) : seq = Propulsion.Codec.NewtonsoftJson.RenderedSpan.parse res.Message.Value let start (config : FsKafka.KafkaConsumerConfig, degreeOfParallelism : int) = @@ -164,7 +165,7 @@ module MultiMessages = type Processor() = let log = Log.ForContext() let mutable favorited, unfavorited, saved, removed, cleared = 0, 0, 0, 0, 0 - let cats, keys = Propulsion.Streams.Internal.CatStats(), ConcurrentDictionary() + let cats, keys = Stats.CatStats(), ConcurrentDictionary() // `BatchedConsumer` holds a `Processor` instance per in-flight batch (there will likely be a batch in flight per partition assigned to this consumer) // and waits for the work to complete before calling this @@ -178,7 +179,7 @@ module MultiMessages = /// Handles various category / eventType / payload types as produced by Equinox.Tool member private _.Interpret(streamName : StreamName, spanJson) : seq = seq { let span = Propulsion.Codec.NewtonsoftJson.RenderedSpan.Parse spanJson - let decode tryDecode wrap = Propulsion.Codec.NewtonsoftJson.RenderedSpan.enum span |> Seq.choose (fun x -> x.event |> tryDecode log streamName |> Option.map wrap) + let decode tryDecode wrap = Propulsion.Codec.NewtonsoftJson.RenderedSpan.enum span |> Seq.chooseV (fun struct (_s, e) -> e |> tryDecode log streamName |> ValueOption.map wrap) match streamName with | StreamName.CategoryAndId (Favorites.Category, _) -> yield! decode Favorites.tryDecode Fave | StreamName.CategoryAndId (SavedForLater.Category, _) -> yield! decode SavedForLater.tryDecode Save @@ -193,7 +194,7 @@ module MultiMessages = | Save (SavedForLater.Added e) -> Interlocked.Add(&saved, e.skus.Length) |> ignore | Save (SavedForLater.Removed e) -> Interlocked.Add(&cleared, e.skus.Length) |> ignore | Save (SavedForLater.Merged e) -> Interlocked.Add(&saved, e.items.Length) |> ignore - | Save (SavedForLater.Cleared) -> Interlocked.Increment(&cleared) |> ignore + | Save SavedForLater.Cleared -> Interlocked.Increment(&cleared) |> ignore | OtherCat (cat, count) -> lock cats <| fun () -> cats.Ingest(cat, int64 count) | Unclassified messageKey -> keys.TryAdd(messageKey, ()) |> ignore diff --git a/propulsion-consumer/Infrastructure.fs b/propulsion-consumer/Infrastructure.fs index c41a61b9e..3d9d0178b 100644 --- a/propulsion-consumer/Infrastructure.fs +++ b/propulsion-consumer/Infrastructure.fs @@ -10,8 +10,8 @@ module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + Codec.Create<'t>(options = defaultOptions) module EnvVar = @@ -68,7 +68,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c |> fun c -> let theme = Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code diff --git a/propulsion-consumer/Program.fs b/propulsion-consumer/Program.fs index 1ec52fd45..586934796 100644 --- a/propulsion-consumer/Program.fs +++ b/propulsion-consumer/Program.fs @@ -4,13 +4,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" From d5b250f4e3ffeb69174c6755ca360fbc51721693 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 13 Sep 2022 12:27:24 +0100 Subject: [PATCH 22/43] eqxTestbed --- equinox-testbed/Config.fs | 14 ++++++----- equinox-testbed/Program.fs | 26 +++++++++---------- equinox-testbed/Services.fs | 24 ++++++++---------- equinox-testbed/Storage.fs | 46 +++++++++++----------------------- equinox-testbed/Testbed.fsproj | 17 ++++++------- 5 files changed, 53 insertions(+), 74 deletions(-) diff --git a/equinox-testbed/Config.fs b/equinox-testbed/Config.fs index 123d1cfc2..ac07d5911 100644 --- a/equinox-testbed/Config.fs +++ b/equinox-testbed/Config.fs @@ -1,19 +1,21 @@ module TestbedTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let createDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + Codec.Create<'t>(options = defaultOptions) + let genJe<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) module Memory = - let create _codec initial fold store = + let create _codec initial fold store : Equinox.Category<_, _, _> = // While the actual prod codec can be used, the Box codec allows one to stub out the decoding on the basis that // nothing will be proved beyond what a complete roundtripping test per `module Aggregate` would already cover Equinox.MemoryStore.MemoryStoreCategory(store, FsCodec.Box.Codec.Create(), fold, initial) @@ -26,7 +28,7 @@ module Cosmos = module Esdb = let create codec initial fold cacheStrategy accessStrategy context = - Equinox.EventStore.EventStoreCategory(context, codec, fold, initial, ?caching = cacheStrategy, ?access = accessStrategy) + Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, ?caching = cacheStrategy, ?access = accessStrategy) [] type Store = @@ -37,5 +39,5 @@ type Store = | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.CosmosStore.CachingStrategy * unfolds: bool //#endif //#if eventStore - | Esdb of Equinox.EventStore.EventStoreContext * Equinox.EventStore.CachingStrategy option * unfolds: bool + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.EventStoreDb.CachingStrategy option * unfolds: bool //#endif diff --git a/equinox-testbed/Program.fs b/equinox-testbed/Program.fs index d7eba8125..45d1e8e5a 100644 --- a/equinox-testbed/Program.fs +++ b/equinox-testbed/Program.fs @@ -80,40 +80,39 @@ module Args = | intervals -> seq { for i in intervals -> TimeSpan.FromSeconds(float i) } |> fun intervals -> [| yield duration; yield! intervals |] member x.ConfigureStore(log : ILogger, createStoreLog) = - match a.TryGetSubCommand() with + match a.GetSubCommand() with //#if memoryStore || (!cosmos && !eventStore) - | Some (Memory _) -> + | Memory _ -> log.Warning("Running transactions in-process against Volatile Store with storage options: {options:l}", x.Options) createStoreLog false, Storage.MemoryStore.config () //#endif //#if eventStore - | Some (Es sargs) -> + | Es sargs -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Parameters.VerboseStore log.Information("Running transactions in-process against EventStore with storage options: {options:l}", x.Options) storeLog, Storage.EventStore.config (log, storeLog) (x.Cache, x.Unfolds, x.BatchSize) sargs //#endif //#if cosmos - | Some (Cosmos sargs) -> + | Cosmos sargs -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Parameters.VerboseStore log.Information("Running transactions in-process against CosmosDb with storage options: {options:l}", x.Options) - storeLog, Storage.Cosmos.config (x.Cache, x.Unfolds, x.BatchSize) (Storage.Cosmos.Arguments (c, sargs)) + storeLog, Storage.Cosmos.config (x.Cache, x.Unfolds, x.BatchSize) (Storage.Cosmos.Arguments(c, sargs)) //#endif #if ((!cosmos && !eventStore) || (cosmos && eventStore)) - | _ -> raise <| Storage.MissingArg (sprintf "Please identify a valid store: memory, es, cosmos") + | _ -> Storage.missingArg "Please identify a valid store: memory, es, cosmos" #endif #if eventStore - | _ -> raise <| Storage.MissingArg (sprintf "Please identify a valid store: memory, es") + | _ -> Storage.missingArg "Please identify a valid store: memory, es" #endif #if cosmos - | _ -> raise <| Storage.MissingArg (sprintf "Please identify a valid store: memory, cosmos") + | _ -> Storage.missingArg "Please identify a valid store: memory, cosmos" #endif let createStoreLog verbose verboseConsole maybeSeqEndpoint = let c = LoggerConfiguration() - .Destructure.FSharpTypes() let c = if verbose then c.MinimumLevel.Debug() else c //#if eventStore - let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) + let c = c.WriteTo.Sink(Equinox.EventStoreDb.Log.InternalMetrics.Stats.LogSink()) //#endif //#if cosmos let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) @@ -169,7 +168,7 @@ module LoadTest = //#endif //#if eventStore | Config.Store.Esdb _ -> - Equinox.EventStore.Log.InternalMetrics.dump log + Equinox.EventStoreDb.Log.InternalMetrics.dump log //#endif //#if memory | _ -> () @@ -178,11 +177,10 @@ module LoadTest = let createDomainLog verbose verboseConsole maybeSeqEndpoint = let c = LoggerConfiguration() - .Destructure.FSharpTypes() .Enrich.FromLogContext() let c = if verbose then c.MinimumLevel.Debug() else c //#if eventStore - let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) + let c = c.WriteTo.Sink(Equinox.EventStoreDb.Log.InternalMetrics.Stats.LogSink()) //#endif //#if cosmos let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) @@ -204,7 +202,7 @@ let main argv = let verbose = args.Contains Verbose let log = createDomainLog verbose verboseConsole maybeSeq let reportFilename = args.GetResult(LogFile, programName+".log") |> fun n -> System.IO.FileInfo(n).FullName - LoadTest.run log (verbose, verboseConsole, maybeSeq) reportFilename (TestArguments (Storage.Configuration EnvVar.tryGet, rargs)) + LoadTest.run log (verbose, verboseConsole, maybeSeq) reportFilename (TestArguments(Storage.Configuration EnvVar.tryGet, rargs)) | _ -> failwith "Please specify a valid subcommand :- run" 0 with :? Argu.ArguParseException as e -> eprintfn "%s" e.Message; 1 diff --git a/equinox-testbed/Services.fs b/equinox-testbed/Services.fs index c016bc542..f4ad0d818 100644 --- a/equinox-testbed/Services.fs +++ b/equinox-testbed/Services.fs @@ -5,7 +5,7 @@ open System module Domain = module Favorites = - let streamName (id : ClientId) = FsCodec.StreamName.create "Favorites" (ClientId.toString id) + let streamName (id : ClientId) = struct ("Favorites", ClientId.toString id) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -20,7 +20,7 @@ module Domain = | Favorited of Favorited | Unfavorited of Unfavorited interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe module Fold = @@ -73,32 +73,28 @@ module Domain = let decider = resolve clientId decider.Query id - let create log resolve = - let resolve clientId = - let stream = resolve (streamName clientId) - Equinox.Decider(log, stream, maxAttempts=3) - Service(resolve) + let create cat = + streamName >> Config.createDecider cat |> Service module Config = let snapshot = Fold.isOrigin, Fold.toSnapshot - let private resolveStream = function + let private (|Category|) = function //#if memoryStore || (!cosmos && !eventStore) | Config.Store.Memory store -> - (Config.Memory.create Events.codec Fold.initial Fold.fold store).Resolve + Config.Memory.create Events.codec Fold.initial Fold.fold store //#endif //#if cosmos | Config.Store.Cosmos (context, caching, unfolds) -> let accessStrategy = if unfolds then Equinox.CosmosStore.AccessStrategy.Snapshot snapshot else Equinox.CosmosStore.AccessStrategy.Unoptimized - (Config.Cosmos.create Events.codec Fold.initial Fold.fold caching accessStrategy context).Resolve + Config.Cosmos.create Events.codecJe Fold.initial Fold.fold caching accessStrategy context //#endif //#if eventStore | Config.Store.Esdb (context, caching, unfolds) -> - let accessStrategy = if unfolds then Equinox.EventStore.AccessStrategy.RollingSnapshots snapshot |> Some else None - (Config.Esdb.create Events.codec Fold.initial Fold.fold caching accessStrategy context).Resolve + let accessStrategy = if unfolds then Equinox.EventStoreDb.AccessStrategy.RollingSnapshots snapshot |> Some else None + Config.Esdb.create Events.codec Fold.initial Fold.fold caching accessStrategy context //#endif - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let create (Category cat) = streamName >> Config.createDecider cat |> Service open Microsoft.Extensions.DependencyInjection diff --git a/equinox-testbed/Storage.fs b/equinox-testbed/Storage.fs index b9f13e559..13d60f40b 100644 --- a/equinox-testbed/Storage.fs +++ b/equinox-testbed/Storage.fs @@ -2,15 +2,14 @@ open Argu open System +open Equinox.CosmosStore.Core exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet : string -> string option) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" @@ -87,50 +86,35 @@ module EventStore = | [] VerboseStore | [] Timeout of float | [] Retries of int - | [] Tcp - | [] Host of string - | [] Username of string - | [] Password of string - | [] ConcurrentOperationsLimit of int - | [] HeartbeatTimeout of float + | [] ConnectionString of string interface IArgParserTemplate with member a.Usage = a |> function | VerboseStore -> "include low level Store logging." | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 1." - | Tcp -> "Request connecting direct to a TCP/IP endpoint. Default: Use Clustered mode with Gossip-driven discovery (unless environment variable EQUINOX_ES_TCP specifies 'true')." - | Host _ -> "TCP mode: specify a hostname to connect to directly. Clustered mode: use Gossip protocol against all A records returned from DNS query. (optional if environment variable EQUINOX_ES_HOST specified)" - | Username _ -> "specify a username. Default: admin." - | Password _ -> "specify a Password. Default: changeit." - | ConcurrentOperationsLimit _ -> "max concurrent operations in flight. Default: 5000." - | HeartbeatTimeout _ -> "specify heartbeat timeout in seconds. Default: 1.5." + | ConnectionString _ -> "esdb connection string" - open Equinox.EventStore + open Equinox.EventStoreDb type Arguments(a : ParseResults) = - member val Host = a.GetResult(Host, "localhost") - member val Credentials = a.GetResult(Username, "admin"), a.GetResult(Password, "changeit") + member val ConnectionString = a.GetResult(ConnectionString) member val Retries = a.GetResult(Retries, 1) member val Timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - member val HeartbeatTimeout = a.GetResult(HeartbeatTimeout, 1.5) |> float |> TimeSpan.FromSeconds - member val ConcurrentOperationsLimit = a.GetResult(ConcurrentOperationsLimit, 5000) - let private connect (log: Serilog.ILogger) (dnsQuery, heartbeatTimeout, col) (username, password) (operationTimeout, operationRetries) = - Connector(username, password, reqTimeout=operationTimeout, reqRetries=operationRetries, - heartbeatTimeout=heartbeatTimeout, concurrentOperationsLimit=col, - log = (if log.IsEnabled(Serilog.Events.LogEventLevel.Debug) then Logger.SerilogVerbose log else Logger.SerilogNormal log), + let private connect (log: Serilog.ILogger) connectionString (operationTimeout, operationRetries) = + EventStoreConnector(reqTimeout=operationTimeout, reqRetries=operationRetries, + // heartbeatTimeout=heartbeatTimeout, concurrentOperationsLimit=col, + // log = (if log.IsEnabled(Serilog.Events.LogEventLevel.Debug) then Logger.SerilogVerbose log else Logger.SerilogNormal log), tags = ["M", Environment.MachineName; "I", Guid.NewGuid() |> string]) - .Establish("TestbedTemplate", Discovery.GossipDns dnsQuery, ConnectionStrategy.ClusterTwinPreferSlaveReads) + .Establish("TestbedTemplate", Discovery.ConnectionString connectionString, ConnectionStrategy.ClusterTwinPreferSlaveReads) let private createContext connection batchSize = EventStoreContext(connection, BatchingPolicy(maxBatchSize=batchSize)) let config (log: Serilog.ILogger, storeLog) (cache, unfolds, batchSize) (args : ParseResults) = let a = Arguments args let timeout, retries as operationThrottling = a.Timeout, a.Retries - let heartbeatTimeout = a.HeartbeatTimeout - let concurrentOperationsLimit = a.ConcurrentOperationsLimit - log.Information("EventStore {host} heartbeat: {heartbeat}s timeout: {timeout}s concurrent reqs: {concurrency} retries {retries}", - a.Host, heartbeatTimeout.TotalSeconds, timeout.TotalSeconds, concurrentOperationsLimit, retries) - let conn = connect storeLog (a.Host, heartbeatTimeout, concurrentOperationsLimit) a.Credentials operationThrottling |> Async.RunSynchronously + log.Information("EventStore {connectionString} timeout: {timeout}s retries {retries}", + a.ConnectionString, timeout.TotalSeconds, retries) + let conn = connect storeLog a.ConnectionString operationThrottling let cacheStrategy = if cache then let c = Equinox.Cache("TestbedTemplate", sizeMb = 50) diff --git a/equinox-testbed/Testbed.fsproj b/equinox-testbed/Testbed.fsproj index f476d52be..0bb1b001e 100644 --- a/equinox-testbed/Testbed.fsproj +++ b/equinox-testbed/Testbed.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -17,14 +17,13 @@ - - - - - - - - + + + + + + + From 247bc29322c48225c5cf45667127cea011bb17f6 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 00:37:46 +0100 Subject: [PATCH 23/43] Eqx4rc1, Prp3b4.1 --- CHANGELOG.md | 7 + README.md | 8 +- equinox-patterns/Domain/Domain.fsproj | 4 +- equinox-testbed/Program.fs | 30 +- equinox-testbed/Storage.fs | 45 +- equinox-testbed/Testbed.fsproj | 8 +- equinox-web-csharp/Domain/Domain.csproj | 8 +- equinox-web-csharp/Web/EventStoreContext.cs | 2 +- equinox-web-csharp/Web/Web.csproj | 10 +- equinox-web/Domain/Domain.fsproj | 8 +- equinox-web/Web/Startup.fs | 2 +- equinox-web/Web/Web.fsproj | 2 +- feed-consumer/FeedConsumer.fsproj | 8 +- feed-consumer/Program.fs | 42 +- feed-source/Domain/Domain.fsproj | 4 +- feed-source/FeedApi/FeedApi.fsproj | 2 +- feed-source/FeedApi/Program.fs | 26 +- periodic-ingester/PeriodicIngester.fsproj | 6 +- periodic-ingester/Program.fs | 48 +- propulsion-archiver/Archiver.fsproj | 4 +- propulsion-archiver/Program.fs | 72 +- propulsion-consumer/Consumer.fsproj | 7 +- propulsion-consumer/Program.fs | 20 +- .../.template.config/template.json | 3 +- propulsion-cosmos-reactor/Program.fs | 54 +- propulsion-cosmos-reactor/Reactor.fsproj | 4 +- .../.template.config/template.json | 14 +- propulsion-projector/Args.fs | 217 +++++ propulsion-projector/Config.fs | 55 ++ propulsion-projector/Handler.fs | 58 +- propulsion-projector/Infrastructure.fs | 61 +- propulsion-projector/Program.fs | 506 +++-------- propulsion-projector/Projector.fsproj | 22 +- propulsion-projector/README.md | 26 +- propulsion-projector/SourceArgs.fs | 295 +++++++ propulsion-projector/SourceConfig.fs | 102 +++ propulsion-pruner/Program.fs | 68 +- propulsion-pruner/Pruner.fsproj | 4 +- .../.template.config/template.json | 32 +- propulsion-reactor/Args.fs | 224 +++++ propulsion-reactor/Config.fs | 50 +- propulsion-reactor/Contract.fs | 15 +- propulsion-reactor/Handler.fs | 55 +- propulsion-reactor/Infrastructure.fs | 83 +- propulsion-reactor/Ingester.fs | 21 +- propulsion-reactor/Program.fs | 791 +++++------------- propulsion-reactor/README.md | 31 +- propulsion-reactor/Reactor.fsproj | 32 +- propulsion-reactor/SourceArgs.fs | 396 +++++++++ propulsion-reactor/SourceConfig.fs | 102 +++ propulsion-reactor/Todo.fs | 36 +- propulsion-reactor/TodoSummary.fs | 23 +- propulsion-summary-consumer/Program.fs | 40 +- .../SummaryConsumer.fsproj | 5 +- propulsion-sync/Infrastructure.fs | 7 +- propulsion-sync/Program.fs | 223 ++--- propulsion-sync/Sync.fsproj | 11 +- propulsion-tracking-consumer/Program.fs | 40 +- .../TrackingConsumer.fsproj | 5 +- tests/Equinox.Templates.Tests/DotnetBuild.fs | 6 +- 60 files changed, 2456 insertions(+), 1634 deletions(-) create mode 100644 propulsion-projector/Args.fs create mode 100644 propulsion-projector/Config.fs create mode 100644 propulsion-projector/SourceArgs.fs create mode 100644 propulsion-projector/SourceConfig.fs create mode 100644 propulsion-reactor/Args.fs create mode 100644 propulsion-reactor/SourceArgs.fs create mode 100644 propulsion-reactor/SourceConfig.fs diff --git a/CHANGELOG.md b/CHANGELOG.md index c5cc04988..868951cf8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,14 @@ The `Unreleased` section name is replaced by the expected version of next releas - `eqxShipping`: Use `Propulsion.DynamoStore`+`EventStoreDb`'s `AwaitCompletion` [#121](https://github.com/jet/dotnet-templates/pull/121) ### Changed + +- Target `Equinox` v `4.0.0`, `Propulsion` v `3.0.0`, `FsCodec` v `3.0.0`, `net6.0` [#122](https://github.com/jet/dotnet-templates/pull/122) + ### Removed + +- `eqxProjector --source cosmos --kafka --synthesizeSequence`: Removed custom mode [#122](https://github.com/jet/dotnet-templates/pull/122) +- `proReactor`: remove `--filter` (see `proSync`) and `--changeFeedOnly` (see `proReactorCosmos`) [#122](https://github.com/jet/dotnet-templates/pull/122) + ### Fixed diff --git a/README.md b/README.md index b75707e7a..fbd431a2a 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,6 @@ The following templates focus specifically on the usage of `Propulsion` componen * `-k --parallelOnly` schedule kafka emission to operate in parallel at document (rather than accumulated span of events for a stream) level - * `-k --synthesizeSequence` parse documents, preserving input order as items are produced to Kafka - 2. `--source eventStore`: EventStoreDB's `$all` feed 3. `--source sqlStreamStore`: [`SqlStreamStore`](https://github.com/SQLStreamStore/SQLStreamStore)'s `$all` feed @@ -51,8 +49,7 @@ The specific behaviors carried out in reaction to incoming events often use `Equ Input options are: - 0. (default) dual mode CosmosDB ChangeFeed Processor and/or EventStore `$all` stream projector/reactor using `Propulsion.Cosmos`/`Propulsion.EventStore` depending on whether the program is run with `cosmos` or `es` arguments - 1. `--source changeFeedOnly`: removes `EventStore` wiring from commandline processing + 0. (default) `Propulsion.Cosmos`/`Propulsion.DynamoStore`/`Propulsion.EventStore` depending on whether the program is run with `cosmos`, `dynamo`, `es` arguments 2. `--source kafkaEventSpans`: changes source to be Kafka Event Spans, as emitted from `dotnet new proProjector --kafka` The reactive behavior template has the following options: @@ -62,9 +59,6 @@ The specific behaviors carried out in reaction to incoming events often use `Equ 2. `--kafka` (without `--blank`): adds Optional projection to Apache Kafka using [`Propulsion.Kafka`](https://github.com/jet/propulsion) (instead of ingesting into a local `Cosmos` store). Produces versioned [Summary Event](http://verraes.net/2019/05/patterns-for-decoupling-distsys-summary-event/) feed. 3. `--kafka --blank`: provides wiring for producing to Kafka, without summary reading logic etc - Miscellaneous options: - - `--filter` - include category filtering boilerplate - **NOTE At present, checkpoint storage when projecting from EventStore uses Azure CosmosDB - help wanted ;)** - [`feedSource`](feed-source/) - Boilerplate for an ASP.NET Core Web Api serving a feed of items stashed in an `Equinox.CosmosStore`. See `dotnet new feedConsumer` for the associated consumption logic diff --git a/equinox-patterns/Domain/Domain.fsproj b/equinox-patterns/Domain/Domain.fsproj index 05be57bca..c17ce5ab8 100644 --- a/equinox-patterns/Domain/Domain.fsproj +++ b/equinox-patterns/Domain/Domain.fsproj @@ -16,8 +16,8 @@ - - + + diff --git a/equinox-testbed/Program.fs b/equinox-testbed/Program.fs index 45d1e8e5a..80b4b0b52 100644 --- a/equinox-testbed/Program.fs +++ b/equinox-testbed/Program.fs @@ -18,7 +18,7 @@ module Args = | [] LogFile of string | [] Run of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "Include low level logging regarding specific test runs." | VerboseConsole -> "Include low level test and store actions logging in on-screen output to console." | LocalSeq -> "Configures writing to a local Seq endpoint at http://localhost:5341, see https://getseq.net" @@ -45,7 +45,7 @@ module Args = | [] Cosmos of ParseResults //#endif interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Name _ -> "specify which test to run. Default: Favorite." | Size _ -> "For `-t Todo`: specify random title length max size to use. Default 100." | Cached -> "employ a 50MB cache, wire in to Stream configuration." @@ -64,23 +64,23 @@ module Args = //#if cosmos | Cosmos _ -> "Run transactions in-process against CosmosDb." //#endif - and TestArguments(c : Storage.Configuration, a : ParseResults) = - let duration = a.GetResult(DurationM, 30.) |> TimeSpan.FromMinutes - member val Options = a.GetResults Cached @ a.GetResults Unfolds - member val Cache = a.Contains Cached - member val Unfolds = a.Contains Unfolds - member val BatchSize = a.GetResult(BatchSize, 500) - member val Test = a.GetResult(Name, Tests.Favorite) - member val ErrorCutoff = a.GetResult(ErrorCutoff, 10000L) - member val TestsPerSecond = a.GetResult(TestsPerSecond, 100) + and TestArguments(c : Storage.Configuration, p : ParseResults) = + let duration = p.GetResult(DurationM, 30.) |> TimeSpan.FromMinutes + member val Options = p.GetResults Cached @ p.GetResults Unfolds + member val Cache = p.Contains Cached + member val Unfolds = p.Contains Unfolds + member val BatchSize = p.GetResult(BatchSize, 500) + member val Test = p.GetResult(Name, Tests.Favorite) + member val ErrorCutoff = p.GetResult(ErrorCutoff, 10000L) + member val TestsPerSecond = p.GetResult(TestsPerSecond, 100) member val Duration = duration member val ReportingIntervals = - match a.GetResults(ReportIntervalS) with + match p.GetResults(ReportIntervalS) with | [] -> TimeSpan.FromSeconds 10.|> Seq.singleton | intervals -> seq { for i in intervals -> TimeSpan.FromSeconds(float i) } |> fun intervals -> [| yield duration; yield! intervals |] member x.ConfigureStore(log : ILogger, createStoreLog) = - match a.GetSubCommand() with + match p.GetSubCommand() with //#if memoryStore || (!cosmos && !eventStore) | Memory _ -> log.Warning("Running transactions in-process against Volatile Store with storage options: {options:l}", x.Options) @@ -88,13 +88,13 @@ module Args = //#endif //#if eventStore | Es sargs -> - let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Parameters.VerboseStore + let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Parameters.Verbose log.Information("Running transactions in-process against EventStore with storage options: {options:l}", x.Options) storeLog, Storage.EventStore.config (log, storeLog) (x.Cache, x.Unfolds, x.BatchSize) sargs //#endif //#if cosmos | Cosmos sargs -> - let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Parameters.VerboseStore + let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Parameters.Verbose log.Information("Running transactions in-process against CosmosDb with storage options: {options:l}", x.Options) storeLog, Storage.Cosmos.config (x.Cache, x.Unfolds, x.BatchSize) (Storage.Cosmos.Arguments(c, sargs)) //#endif diff --git a/equinox-testbed/Storage.fs b/equinox-testbed/Storage.fs index 13d60f40b..5faff203b 100644 --- a/equinox-testbed/Storage.fs +++ b/equinox-testbed/Storage.fs @@ -2,7 +2,6 @@ open Argu open System -open Equinox.CosmosStore.Core exception MissingArg of message : string with override this.Message = this.message let missingArg msg = raise (MissingArg msg) @@ -18,10 +17,10 @@ type Configuration(tryGet : string -> string option) = //#if (memoryStore || (!cosmos && !eventStore)) module MemoryStore = type [] Parameters = - | [] VerboseStore + | [] Verbose interface IArgParserTemplate with - member a.Usage = a |> function - | VerboseStore -> "Include low level Store logging." + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." let config () = Config.Store.Memory (Equinox.MemoryStore.VolatileStore()) @@ -30,7 +29,7 @@ module MemoryStore = module Cosmos = type [] Parameters = - | [] VerboseStore + | [] Verbose | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Timeout of float | [] Retries of int @@ -39,8 +38,8 @@ module Cosmos = | [] Database of string | [] Container of string interface IArgParserTemplate with - member a.Usage = a |> function - | VerboseStore -> "Include low level Store logging." + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 1." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." @@ -48,15 +47,15 @@ module Cosmos = | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" | Container _ -> "specify a container name for store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" - type Arguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 1) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + type Arguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 1) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) member _.Connect() = connector.ConnectStore("Main", database, container) @@ -83,24 +82,24 @@ module Cosmos = /// 2. & $env:ProgramData\chocolatey\bin\EventStore.ClusterNode.exe --gossip-on-single-node --discover-via-dns 0 --ext-http-port=30778 module EventStore = type [] Parameters = - | [] VerboseStore + | [] Verbose | [] Timeout of float | [] Retries of int | [] ConnectionString of string interface IArgParserTemplate with - member a.Usage = a |> function - | VerboseStore -> "include low level Store logging." + member p.Usage = p |> function + | Verbose -> "include low level Store logging." | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 1." | ConnectionString _ -> "esdb connection string" open Equinox.EventStoreDb - type Arguments(a : ParseResults) = - member val ConnectionString = a.GetResult(ConnectionString) + type Arguments(p : ParseResults) = + member val ConnectionString = p.GetResult(ConnectionString) - member val Retries = a.GetResult(Retries, 1) - member val Timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + member val Retries = p.GetResult(Retries, 1) + member val Timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds let private connect (log: Serilog.ILogger) connectionString (operationTimeout, operationRetries) = EventStoreConnector(reqTimeout=operationTimeout, reqRetries=operationRetries, @@ -108,7 +107,7 @@ module EventStore = // log = (if log.IsEnabled(Serilog.Events.LogEventLevel.Debug) then Logger.SerilogVerbose log else Logger.SerilogNormal log), tags = ["M", Environment.MachineName; "I", Guid.NewGuid() |> string]) .Establish("TestbedTemplate", Discovery.ConnectionString connectionString, ConnectionStrategy.ClusterTwinPreferSlaveReads) - let private createContext connection batchSize = EventStoreContext(connection, BatchingPolicy(maxBatchSize=batchSize)) + let private createContext connection batchSize = EventStoreContext(connection, batchSize = batchSize) let config (log: Serilog.ILogger, storeLog) (cache, unfolds, batchSize) (args : ParseResults) = let a = Arguments args let timeout, retries as operationThrottling = a.Timeout, a.Retries diff --git a/equinox-testbed/Testbed.fsproj b/equinox-testbed/Testbed.fsproj index 0bb1b001e..3e5072480 100644 --- a/equinox-testbed/Testbed.fsproj +++ b/equinox-testbed/Testbed.fsproj @@ -17,10 +17,10 @@ - - - - + + + + diff --git a/equinox-web-csharp/Domain/Domain.csproj b/equinox-web-csharp/Domain/Domain.csproj index ba2873ad1..729ac30ae 100755 --- a/equinox-web-csharp/Domain/Domain.csproj +++ b/equinox-web-csharp/Domain/Domain.csproj @@ -5,13 +5,13 @@ - + - - - + + + diff --git a/equinox-web-csharp/Web/EventStoreContext.cs b/equinox-web-csharp/Web/EventStoreContext.cs index d9dcfb6ba..1a2efec77 100644 --- a/equinox-web-csharp/Web/EventStoreContext.cs +++ b/equinox-web-csharp/Web/EventStoreContext.cs @@ -29,7 +29,7 @@ public EventStoreContext(EventStoreConfig config) var c = new EventStoreConnector(reqTimeout: TimeSpan.FromSeconds(5), reqRetries: 1); var conn = c.Establish("Twin", Discovery.NewConnectionString(config.ConnectionString), ConnectionStrategy.ClusterTwinPreferSlaveReads); - return Task.FromResult(new Equinox.EventStoreDb.EventStoreContext(conn, new BatchingPolicy(maxBatchSize: 500))); + return Task.FromResult(new Equinox.EventStoreDb.EventStoreContext(conn)); } public override Func<(string, string), DeciderCore> Resolve( diff --git a/equinox-web-csharp/Web/Web.csproj b/equinox-web-csharp/Web/Web.csproj index fe689925d..ceed8623c 100755 --- a/equinox-web-csharp/Web/Web.csproj +++ b/equinox-web-csharp/Web/Web.csproj @@ -5,13 +5,13 @@ - - - - + + + + - + diff --git a/equinox-web/Domain/Domain.fsproj b/equinox-web/Domain/Domain.fsproj index 324e36802..df8e447cd 100644 --- a/equinox-web/Domain/Domain.fsproj +++ b/equinox-web/Domain/Domain.fsproj @@ -17,10 +17,10 @@ - - - - + + + + diff --git a/equinox-web/Web/Startup.fs b/equinox-web/Web/Startup.fs index be1c270f8..390354df6 100644 --- a/equinox-web/Web/Startup.fs +++ b/equinox-web/Web/Startup.fs @@ -43,7 +43,7 @@ module Storage = let connect connectionString = let c = EventStoreConnector(reqTimeout=TimeSpan.FromSeconds 5., reqRetries=1) let conn = c.Establish("Twin", Discovery.ConnectionString connectionString, ConnectionStrategy.ClusterTwinPreferSlaveReads) - EventStoreContext(conn, BatchingPolicy(maxBatchSize=500)) + EventStoreContext(conn, batchSize = 500) //#endif //#if cosmos diff --git a/equinox-web/Web/Web.fsproj b/equinox-web/Web/Web.fsproj index 5d2330d1e..e58d363ca 100644 --- a/equinox-web/Web/Web.fsproj +++ b/equinox-web/Web/Web.fsproj @@ -11,7 +11,7 @@ - + diff --git a/feed-consumer/FeedConsumer.fsproj b/feed-consumer/FeedConsumer.fsproj index 7d79ffac4..e8cdba063 100644 --- a/feed-consumer/FeedConsumer.fsproj +++ b/feed-consumer/FeedConsumer.fsproj @@ -16,11 +16,11 @@ - - + + - - + + diff --git a/feed-consumer/Program.fs b/feed-consumer/Program.fs index 6b39e047e..98bc98695 100644 --- a/feed-consumer/Program.fs +++ b/feed-consumer/Program.fs @@ -34,7 +34,7 @@ module Args = | [] Cosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose _ -> "request verbose logging." | Group _ -> "specify Api Consumer Group Id. (optional if environment variable API_CONSUMER_GROUP specified)" | SourceId _ -> "specify Api SourceId. Default: 'default'" @@ -43,20 +43,20 @@ module Args = | FcsDop _ -> "maximum number of FCs to process in parallel. Default: 4" | TicketsDop _ -> "maximum number of Tickets to process in parallel (per FC). Default: 4" | Cosmos _ -> "Cosmos Store parameters." - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose - member val GroupId = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) - member val SourceId = a.GetResult(SourceId,"default") |> Propulsion.Feed.SourceId.parse - member val BaseUri = a.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri - member val MaxReadAhead = a.GetResult(MaxReadAhead,8) - member val FcsDop = a.TryGetResult FcsDop |> Option.defaultValue 4 - member val TicketsDop = a.TryGetResult TicketsDop |> Option.defaultValue 4 + and Arguments(c : Configuration, p : ParseResults) = + member val Verbose = p.Contains Parameters.Verbose + member val GroupId = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) + member val SourceId = p.GetResult(SourceId,"default") |> Propulsion.Feed.SourceId.parse + member val BaseUri = p.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri + member val MaxReadAhead = p.GetResult(MaxReadAhead,8) + member val FcsDop = p.TryGetResult FcsDop |> Option.defaultValue 4 + member val TicketsDop = p.TryGetResult TicketsDop |> Option.defaultValue 4 member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val CheckpointInterval = TimeSpan.FromHours 1. member val TailSleepInterval = TimeSpan.FromSeconds 1. member val Cosmos : CosmosArguments = - match a.GetSubCommand() with + match p.GetSubCommand() with | Cosmos cosmos -> CosmosArguments(c, cosmos) | _ -> missingArg "Must specify cosmos" and [] CosmosParameters = @@ -69,7 +69,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose _ -> "request verbose logging." | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" @@ -78,16 +78,16 @@ module Args = | Timeout _ -> "specify operation timeout in seconds (default: 30)." | Retries _ -> "specify operation retries (default: 9)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 30)" - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 9) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + and CosmosArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode=mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) - member val Verbose = a.Contains Verbose + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + member val Verbose = p.Contains Verbose member _.Connect() = connector.ConnectStore("Main", database, container) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args @@ -99,7 +99,7 @@ module Args = let [] AppName = "FeedConsumerTemplate" let build (args : Args.Arguments) = - let cache = Equinox.Cache (AppName, sizeMb = 10) + let cache = Equinox.Cache(AppName, sizeMb = 10) let context = args.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create let sink = diff --git a/feed-source/Domain/Domain.fsproj b/feed-source/Domain/Domain.fsproj index 25cf2bb40..38ca0b500 100644 --- a/feed-source/Domain/Domain.fsproj +++ b/feed-source/Domain/Domain.fsproj @@ -14,8 +14,8 @@ - - + + diff --git a/feed-source/FeedApi/FeedApi.fsproj b/feed-source/FeedApi/FeedApi.fsproj index ffa5c8aca..bf5396460 100644 --- a/feed-source/FeedApi/FeedApi.fsproj +++ b/feed-source/FeedApi/FeedApi.fsproj @@ -19,7 +19,7 @@ - + diff --git a/feed-source/FeedApi/Program.fs b/feed-source/FeedApi/Program.fs index 496647583..4f8eb9739 100644 --- a/feed-source/FeedApi/Program.fs +++ b/feed-source/FeedApi/Program.fs @@ -26,10 +26,10 @@ module Args = match a with | Verbose -> "request Verbose Logging. Default: off." | Cosmos _ -> "specify CosmosDB input parameters." - and Arguments(config : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose + and Arguments(config : Configuration, p : ParseResults) = + member val Verbose = p.Contains Parameters.Verbose member val Cosmos : CosmosArguments = - match a.GetSubCommand() with + match p.GetSubCommand() with | Parameters.Cosmos cosmos -> CosmosArguments(config, cosmos) | _ -> missingArg "Must specify cosmos" and [] CosmosParameters = @@ -42,7 +42,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose _ -> "request verbose logging." | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" @@ -51,16 +51,16 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 9." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 9) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + and CosmosArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode=mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) - member val Verbose = a.Contains Verbose + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + member val Verbose = p.Contains Verbose member _.Connect() = connector.ConnectStore("Main", database, container) /// Parse the commandline; can throw MissingArg or Argu.ArguParseException in response to missing arguments and/or `-h`/`--help` args diff --git a/periodic-ingester/PeriodicIngester.fsproj b/periodic-ingester/PeriodicIngester.fsproj index 28ac4b5fa..b720783c9 100644 --- a/periodic-ingester/PeriodicIngester.fsproj +++ b/periodic-ingester/PeriodicIngester.fsproj @@ -17,11 +17,11 @@ - + - - + + diff --git a/periodic-ingester/Program.fs b/periodic-ingester/Program.fs index 2485a76f9..08af485df 100644 --- a/periodic-ingester/Program.fs +++ b/periodic-ingester/Program.fs @@ -33,25 +33,25 @@ module Args = | [] Feed of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose _ -> "request verbose logging." | GroupId _ -> "consumer group name. Default: 'default'" | PrometheusPort _ -> "port from which to expose a Prometheus /metrics endpoint. Default: off (optional if environment variable PROMETHEUS_PORT specified)" | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 8." | TicketsDop _ -> "maximum number of Tickets to process in parallel. Default: 4" | Feed _ -> "Feed parameters." - and Arguments(c : Configuration, a : ParseResults) = - member val GroupId = a.GetResult(GroupId, "default") + and Arguments(c : Configuration, p : ParseResults) = + member val GroupId = p.GetResult(GroupId, "default") - member val Verbose = a.Contains Parameters.Verbose - member val PrometheusPort = a.TryGetResult PrometheusPort |> Option.orElseWith (fun () -> c.PrometheusPort) - member val MaxReadAhead = a.GetResult(MaxReadAhead, 8) - member val TicketsDop = a.GetResult(TicketsDop, 4) + member val Verbose = p.Contains Parameters.Verbose + member val PrometheusPort = p.TryGetResult PrometheusPort |> Option.orElseWith (fun () -> c.PrometheusPort) + member val MaxReadAhead = p.GetResult(MaxReadAhead, 8) + member val TicketsDop = p.GetResult(TicketsDop, 4) member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val CheckpointInterval = TimeSpan.FromHours 1. member val Feed : FeedArguments = - match a.GetSubCommand() with + match p.GetSubCommand() with | Feed feed -> FeedArguments(c, feed) | _ -> missingArg "Must specify feed" and [] FeedParameters = @@ -59,16 +59,16 @@ module Args = | [] BaseUri of string | [] Cosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Group _ -> "specify Api Consumer Group Id. (optional if environment variable API_CONSUMER_GROUP specified)" | BaseUri _ -> "specify Api endpoint. (optional if environment variable API_BASE_URI specified)" | Cosmos _ -> "Cosmos Store parameters." - and FeedArguments(c : Configuration, a : ParseResults) = - member val SourceId = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) |> Propulsion.Feed.SourceId.parse - member val BaseUri = a.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri + and FeedArguments(c : Configuration, p : ParseResults) = + member val SourceId = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) |> Propulsion.Feed.SourceId.parse + member val BaseUri = p.TryGetResult BaseUri |> Option.defaultWith (fun () -> c.BaseUri) |> Uri member val RefreshInterval = TimeSpan.FromHours 1. member val Cosmos : CosmosArguments = - match a.GetSubCommand() with + match p.GetSubCommand() with | Cosmos cosmos -> CosmosArguments(c, cosmos) | _ -> missingArg "Must specify cosmos" and [] CosmosParameters = @@ -81,7 +81,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose _ -> "request verbose logging." | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" @@ -90,16 +90,16 @@ module Args = | Timeout _ -> "specify operation timeout in seconds (default: 30)." | Retries _ -> "specify operation retries (default: 9)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 30)" - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 9) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + and CosmosArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode=mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) - member val Verbose = a.Contains Verbose + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + member val Verbose = p.Contains Verbose member _.Connect() = connector.ConnectStore("Main", database, container) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args @@ -111,7 +111,7 @@ module Args = let [] AppName = "PeriodicIngesterTemplate" let build (args : Args.Arguments) = - let cache = Equinox.Cache (AppName, sizeMb = 10) + let cache = Equinox.Cache(AppName, sizeMb = 10) let feed = args.Feed let context = feed.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create diff --git a/propulsion-archiver/Archiver.fsproj b/propulsion-archiver/Archiver.fsproj index 41b3691b3..1df74569c 100644 --- a/propulsion-archiver/Archiver.fsproj +++ b/propulsion-archiver/Archiver.fsproj @@ -14,8 +14,8 @@ - - + + diff --git a/propulsion-archiver/Program.fs b/propulsion-archiver/Program.fs index 807874dc7..875e7e582 100644 --- a/propulsion-archiver/Program.fs +++ b/propulsion-archiver/Program.fs @@ -30,7 +30,7 @@ module Args = | [] MaxKib of int | [] SrcCosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Logging. Default: off" | SyncVerbose -> "request Logging for Sync operations (Writes). Default: off" | PrometheusPort _ -> "port from which to expose a Prometheus /metrics endpoint. Default: off" @@ -40,18 +40,18 @@ module Args = | RuThreshold _ -> "minimum request charge required to log. Default: 0" | MaxKib _ -> "max KiB to submit to Sync operation. Default: 512" | SrcCosmos _ -> "Cosmos input parameters." - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose - member val SyncLogging = a.Contains SyncVerbose, a.TryGetResult RuThreshold - member val PrometheusPort = a.TryGetResult PrometheusPort - member val ProcessorName = a.GetResult ProcessorName - member val MaxReadAhead = a.GetResult(MaxReadAhead, 32) - member val MaxWriters = a.GetResult(MaxWriters, 4) - member val MaxBytes = a.GetResult(MaxKib, 512) * 1024 + and Arguments(c : Configuration, p : ParseResults) = + member val Verbose = p.Contains Parameters.Verbose + member val SyncLogging = p.Contains SyncVerbose, p.TryGetResult RuThreshold + member val PrometheusPort = p.TryGetResult PrometheusPort + member val ProcessorName = p.GetResult ProcessorName + member val MaxReadAhead = p.GetResult(MaxReadAhead, 32) + member val MaxWriters = p.GetResult(MaxWriters, 4) + member val MaxBytes = p.GetResult(MaxKib, 512) * 1024 member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val Source : CosmosSourceArguments = - match a.GetSubCommand() with + match p.GetSubCommand() with | SrcCosmos cosmos -> CosmosSourceArguments(c, cosmos) | _ -> missingArg "Must specify cosmos for SrcCosmos" member x.DestinationArchive = x.Source.Archive @@ -73,7 +73,7 @@ module Args = and [] CosmosSourceParameters = | [] Verbose | [] FromTail - | [] MaxItems of int + | [] MaxItems of int | [] LagFreqM of float | [] LeaseContainer of string @@ -87,7 +87,7 @@ module Args = | [] DstCosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Change Feed Processor Logging. Default: off" | FromTail -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | MaxItems _ -> "maximum item count to request from feed. Default: unlimited" @@ -103,29 +103,29 @@ module Args = | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." | DstCosmos _ -> "CosmosDb Sink parameters." - and CosmosSourceArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult CosmosSourceParameters.ConnectionMode - let timeout = a.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSourceParameters.Retries, 5) - let maxRetryWaitTime = a.GetResult(CosmosSourceParameters.RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + and CosmosSourceArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult CosmosSourceParameters.ConnectionMode + let timeout = p.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(CosmosSourceParameters.Retries, 5) + let maxRetryWaitTime = p.GetResult(CosmosSourceParameters.RetriesWaitTime, 30.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId = a.GetResult CosmosSourceParameters.Container + let database = p.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + member val ContainerId = p.GetResult CosmosSourceParameters.Container member x.MonitoredContainer() = connector.ConnectMonitored(database, x.ContainerId) - member val FromTail = a.Contains CosmosSourceParameters.FromTail - member val MaxItems = a.TryGetResult MaxItems - member val LagFrequency : TimeSpan = a.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes - member val LeaseContainer = a.TryGetResult CosmosSourceParameters.LeaseContainer - member val Verbose = a.Contains Verbose + member val FromTail = p.Contains CosmosSourceParameters.FromTail + member val MaxItems = p.TryGetResult MaxItems + member val LagFrequency : TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member val LeaseContainer = p.TryGetResult CosmosSourceParameters.LeaseContainer + member val Verbose = p.Contains Verbose member private _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) member x.ConnectLeases() = match x.LeaseContainer with | None -> x.ConnectLeases(x.ContainerId + "-aux") | Some sc -> x.ConnectLeases(sc) member val Archive = - match a.GetSubCommand() with + match p.GetSubCommand() with | DstCosmos cosmos -> CosmosSinkArguments(c, cosmos) | _ -> missingArg "Must specify cosmos for Sink" and [] CosmosSinkParameters = @@ -138,7 +138,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for Cosmos account. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" @@ -147,18 +147,18 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 0." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosSinkArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSinkParameters.Retries, 0) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosSinkArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(CosmosSinkParameters.Retries, 0) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) member _.Connect() = connector.ConnectStore("Destination", database, container) - member val LeaseContainerId = a.TryGetResult LeaseContainer + member val LeaseContainerId = p.TryGetResult LeaseContainer member _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args diff --git a/propulsion-consumer/Consumer.fsproj b/propulsion-consumer/Consumer.fsproj index 630202bc0..0a842cde2 100644 --- a/propulsion-consumer/Consumer.fsproj +++ b/propulsion-consumer/Consumer.fsproj @@ -15,10 +15,9 @@ - - - - + + + diff --git a/propulsion-consumer/Program.fs b/propulsion-consumer/Program.fs index 586934796..77b97cfa3 100644 --- a/propulsion-consumer/Program.fs +++ b/propulsion-consumer/Program.fs @@ -32,7 +32,7 @@ module Args = | [] Verbose interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)." | Topic _ -> "specify Kafka Topic name. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)." | Group _ -> "specify Kafka Consumer Group Id. (optional if environment variable PROPULSION_KAFKA_GROUP specified)." @@ -41,15 +41,15 @@ module Args = | MaxDop _ -> "maximum number of items to process in parallel. Default: 8" | Verbose _ -> "request verbose logging." - type Arguments(c : Configuration, a : ParseResults) = - member val Broker = a.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) - member val Group = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) - member val MaxInFlightBytes = a.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 - member val LagFrequency = a.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes - - member val MaxDop = a.GetResult(MaxDop, 8) - member val Verbose = a.Contains Verbose + type Arguments(c : Configuration, p : ParseResults) = + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member val Group = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) + member val MaxInFlightBytes = p.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 + member val LagFrequency = p.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes + + member val MaxDop = p.GetResult(MaxDop, 8) + member val Verbose = p.Contains Verbose /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args let parse tryGetConfigValue argv : Arguments = diff --git a/propulsion-cosmos-reactor/.template.config/template.json b/propulsion-cosmos-reactor/.template.config/template.json index 0fe6f5bbe..e821f84a2 100644 --- a/propulsion-cosmos-reactor/.template.config/template.json +++ b/propulsion-cosmos-reactor/.template.config/template.json @@ -11,7 +11,8 @@ "Reactor" ], "tags": { - "language": "F#" + "language": "F#", + "type": "project" }, "identity": "Propulsion.Template.CosmosReactor", "name": "Propulsion Cosmos Reactor", diff --git a/propulsion-cosmos-reactor/Program.fs b/propulsion-cosmos-reactor/Program.fs index c42b0fa43..aa55ab01e 100644 --- a/propulsion-cosmos-reactor/Program.fs +++ b/propulsion-cosmos-reactor/Program.fs @@ -26,25 +26,25 @@ module Args = | [] MaxWriters of int | [] Cosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Logging. Default: off." | PrometheusPort _ -> "port from which to expose a Prometheus /metrics endpoint. Default: off." | ProcessorName _ -> "Projector consumer group name." | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 2." | MaxWriters _ -> "maximum number of concurrent streams on which to process at any time. Default: 8." | Cosmos _ -> "specify CosmosDB input parameters" - and Arguments(c : Configuration, a : ParseResults) = - let maxReadAhead = a.GetResult(MaxReadAhead, 2) - let maxConcurrentProcessors = a.GetResult(MaxWriters, 8) - member val Verbose = a.Contains Parameters.Verbose - member val PrometheusPort = a.TryGetResult PrometheusPort - member val ProcessorName = a.GetResult ProcessorName + and Arguments(c : Configuration, p : ParseResults) = + let maxReadAhead = p.GetResult(MaxReadAhead, 2) + let maxConcurrentProcessors = p.GetResult(MaxWriters, 8) + member val Verbose = p.Contains Parameters.Verbose + member val PrometheusPort = p.TryGetResult PrometheusPort + member val ProcessorName = p.GetResult ProcessorName member x.ProcessorParams() = Log.Information("Reacting... {processorName}, reading {maxReadAhead} ahead, {dop} writers", x.ProcessorName, maxReadAhead, maxConcurrentProcessors) (x.ProcessorName, maxReadAhead, maxConcurrentProcessors) member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. - member val Cosmos = CosmosArguments(c, a.GetResult Cosmos) + member val Cosmos = CosmosArguments(c, p.GetResult Cosmos) and [] CosmosParameters = | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Connection of string @@ -57,10 +57,10 @@ module Args = | [] Verbose | [] LeaseContainer of string | [] FromTail - | [] MaxItems of int + | [] MaxItems of int | [] LagFreqM of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" @@ -74,21 +74,21 @@ module Args = | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | MaxItems _ -> "maximum item count to request from the feed. Default: unlimited." | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult CosmosParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 1) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult CosmosParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 1) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let containerId = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) - - let leaseContainerId = a.GetResult(LeaseContainer, containerId + "-aux") - let fromTail = a.Contains FromTail - let maxItems = a.TryGetResult MaxItems - let lagFrequency = a.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes - member _.Verbose = a.Contains Verbose + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let containerId = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + + let leaseContainerId = p.GetResult(LeaseContainer, containerId + "-aux") + let fromTail = p.Contains FromTail + let maxItems = p.TryGetResult MaxItems + let lagFrequency = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member _.Verbose = p.Contains Verbose member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) member x.MonitoringParams() = let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() @@ -112,14 +112,14 @@ let build (args : Args.Arguments) = let sink = let store = let context = client |> CosmosStoreContext.create - let cache = Equinox.Cache (AppName, sizeMb = 10) + let cache = Equinox.Cache(AppName, sizeMb = 10) Config.Store.Cosmos (context, cache) let stats = Reactor.Stats(Log.Logger, args.StatsInterval, args.StateInterval) let handle = Reactor.Config.createHandler store Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval) let source = - let parseFeedDoc : _ -> Propulsion.Streams.StreamEvent<_> seq = Seq.collect (Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents Reactor.categoryFilter) - let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, parseFeedDoc) + let parseFeedDoc = Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents Reactor.categoryFilter + let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, Seq.collect parseFeedDoc) let leases, startFromTail, maxItems, lagFrequency = args.Cosmos.MonitoringParams() Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitored, leases, processorName, observer, startFromTail = startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) diff --git a/propulsion-cosmos-reactor/Reactor.fsproj b/propulsion-cosmos-reactor/Reactor.fsproj index a6c4ab687..91170fb6d 100644 --- a/propulsion-cosmos-reactor/Reactor.fsproj +++ b/propulsion-cosmos-reactor/Reactor.fsproj @@ -20,10 +20,10 @@ - + - + diff --git a/propulsion-projector/.template.config/template.json b/propulsion-projector/.template.config/template.json index ce85311ff..2865dce99 100644 --- a/propulsion-projector/.template.config/template.json +++ b/propulsion-projector/.template.config/template.json @@ -6,6 +6,7 @@ "Equinox", "Propulsion", "CosmosDb", + "DynamoDb", "ChangeFeed", "ChangeFeedProcessor", "EventStore", @@ -32,6 +33,10 @@ "choice": "cosmos", "description": "Wire for CosmosDB ChangeFeedProcessor source" }, + { + "choice": "dynamo", + "description": "Wire for DynamoStoreSource" + }, { "choice": "eventStore", "description": "Wire for EventStoreDB $all source" @@ -54,12 +59,9 @@ "type": "computed", "value": "(source == \"cosmos\")" }, - "synthesizeSequence": { - "type": "parameter", - "datatype": "bool", - "isRequired": false, - "defaultValue": "false", - "description": "(--source=cosmos only) Include custom parsing / sequence generation logic for projecting arbitrary CosmosDB document changes, maintaining ordering." + "dynamo": { + "type": "computed", + "value": "(source == \"dynamo\")" }, "kafka": { "type": "parameter", diff --git a/propulsion-projector/Args.fs b/propulsion-projector/Args.fs new file mode 100644 index 000000000..0f6b0f34c --- /dev/null +++ b/propulsion-projector/Args.fs @@ -0,0 +1,217 @@ +/// Commandline arguments and/or secrets loading specifications +module ProjectorTemplate.Args + +open System + +exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) + +let [] REGION = "EQUINOX_DYNAMO_REGION" +let [] SERVICE_URL = "EQUINOX_DYNAMO_SERVICE_URL" +let [] ACCESS_KEY = "EQUINOX_DYNAMO_ACCESS_KEY_ID" +let [] SECRET_KEY = "EQUINOX_DYNAMO_SECRET_ACCESS_KEY" +let [] TABLE = "EQUINOX_DYNAMO_TABLE" +let [] INDEX_TABLE = "EQUINOX_DYNAMO_TABLE_INDEX" + +type Configuration(tryGet : string -> string option) = + + member val tryGet = tryGet + member _.get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" + + member x.CosmosConnection = x.get "EQUINOX_COSMOS_CONNECTION" + member x.CosmosDatabase = x.get "EQUINOX_COSMOS_DATABASE" + member x.CosmosContainer = x.get "EQUINOX_COSMOS_CONTAINER" + + member x.DynamoServiceUrl = x.get SERVICE_URL + member x.DynamoAccessKey = x.get ACCESS_KEY + member x.DynamoSecretKey = x.get SECRET_KEY + member x.DynamoTable = x.get TABLE + member x.DynamoRegion = x.tryGet REGION + + member x.EventStoreConnection = x.get "EQUINOX_ES_CONNECTION" + // member x.EventStoreCredentials = x.get "EQUINOX_ES_CREDENTIALS" + member _.MaybeEventStoreConnection = tryGet "EQUINOX_ES_CONNECTION" + member _.MaybeEventStoreCredentials = tryGet "EQUINOX_ES_CREDENTIALS" + + member x.SqlStreamStoreConnection = x.get "SQLSTREAMSTORE_CONNECTION" + member x.SqlStreamStoreCredentials = tryGet "SQLSTREAMSTORE_CREDENTIALS" + member x.SqlStreamStoreCredentialsCheckpoints = tryGet "SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS" + member x.SqlStreamStoreDatabase = x.get "SQLSTREAMSTORE_DATABASE" + member x.SqlStreamStoreContainer = x.get "SQLSTREAMSTORE_CONTAINER" + +//#if kafka + member x.Broker = x.get "PROPULSION_KAFKA_BROKER" + member x.Topic = x.get "PROPULSION_KAFKA_TOPIC" +//#endif + + member x.PrometheusPort = tryGet "PROMETHEUS_PORT" |> Option.map int + +// Type used to represent where checkpoints (for either the FeedConsumer position, or for a Reactor's Event Store subscription position) will be stored +// In a typical app you don't have anything like this as you'll simply use your primary Event Store (see) +module Checkpoints = + + [] + type Store = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache + (* Propulsion.EventStoreDb does not implement a native checkpoint storage mechanism, + perhaps port https://github.com/absolutejam/Propulsion.EventStoreDB ? + or fork/finish https://github.com/jet/dotnet-templates/pull/81 + alternately one could use a SQL Server DB via Propulsion.SqlStreamStore + + For now, we store the Checkpoints in one of the above stores *) + + let create (consumerGroup, checkpointInterval) storeLog : Store -> Propulsion.Feed.IFeedCheckpointStore = function + | Store.Cosmos (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.CosmosStore.create storeLog (consumerGroup, checkpointInterval) (context, cache) + | Store.Dynamo (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create storeLog (consumerGroup, checkpointInterval) (context, cache) + let createCheckpointStore (group, checkpointInterval, store : Config.Store) : Propulsion.Feed.IFeedCheckpointStore = + let checkpointStore = + match store with + | Config.Store.Cosmos (context, cache) -> Store.Cosmos (context, cache) + | Config.Store.Dynamo (context, cache) -> Store.Dynamo (context, cache) + create (group, checkpointInterval) Config.log checkpointStore + +open Argu + +#if kafka + type [] KafkaSinkParameters = + | [] Broker of string + | [] Topic of string + interface IArgParserTemplate with + member p.Usage = p |> function + | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" + | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" + +type KafkaSinkArguments(c : Configuration, p : ParseResults) = + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member x.BuildTargetParams() = x.Broker, x.Topic + +#endif + +#if (esdb || sss || cosmos) +module Cosmos = + + type [] Parameters = + | [] Verbose + | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode + | [] Connection of string + | [] Database of string + | [] Container of string + | [] Timeout of float + | [] Retries of int + | [] RetriesWaitTime of float +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose _ -> "request verbose logging." + | ConnectionMode _ -> "override the connection mode. Default: Direct." + | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" + | Database _ -> "specify a database name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" + | Container _ -> "specify a container name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" + | Timeout _ -> "specify operation timeout in seconds (default: 5)." + | Retries _ -> "specify operation retries (default: 1)." + | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + type Arguments(c : Configuration, p : ParseResults) = + let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) + let discovery = Equinox.CosmosStore.Discovery.ConnectionString connection + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 1) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + member val Verbose = p.Contains Verbose + member _.Connect() = connector.ConnectStore("Target", database, container) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Parameters.Kafka kafka -> KafkaSinkArguments(c, kafka) + | _ -> missingArg "Must specify `kafka` arguments" +#endif + +#endif // cosmos +#if (esdb || sss || dynamo) +module Dynamo = + + type [] Parameters = + | [] Verbose + | [] RegionProfile of string + | [] ServiceUrl of string + | [] AccessKey of string + | [] SecretKey of string + | [] Table of string + | [] Retries of int + | [] RetriesTimeoutS of float +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." + | RegionProfile _ -> "specify an AWS Region (aka System Name, e.g. \"us-east-1\") to connect to using the implicit AWS SDK/tooling config and/or environment variables etc. Optional if:\n" + + "1) $" + REGION + " specified OR\n" + + "2) Explicit `ServiceUrl`/$" + SERVICE_URL + "+`AccessKey`/$" + ACCESS_KEY + "+`Secret Key`/$" + SECRET_KEY + " specified.\n" + + "See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html for details" + | ServiceUrl _ -> "specify a server endpoint for a Dynamo account. (Not applicable if `ServiceRegion`/$" + REGION + " specified; Optional if $" + SERVICE_URL + " specified)" + | AccessKey _ -> "specify an access key id for a Dynamo account. (Not applicable if `ServiceRegion`/$" + REGION + " specified; Optional if $" + ACCESS_KEY + " specified)" + | SecretKey _ -> "specify a secret access key for a Dynamo account. (Not applicable if `ServiceRegion`/$" + REGION + " specified; Optional if $" + SECRET_KEY + " specified)" + | Table _ -> "specify a table name for the primary store. (optional if $" + TABLE + " specified)" + | Retries _ -> "specify operation retries (default: 1)." + | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 5)" +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + type Arguments(c : Configuration, p : ParseResults) = + let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with + | Some systemName -> + Choice1Of2 systemName + | None -> + let serviceUrl = p.TryGetResult ServiceUrl |> Option.defaultWith (fun () -> c.DynamoServiceUrl) + let accessKey = p.TryGetResult AccessKey |> Option.defaultWith (fun () -> c.DynamoAccessKey) + let secretKey = p.TryGetResult SecretKey |> Option.defaultWith (fun () -> c.DynamoSecretKey) + Choice2Of2 (serviceUrl, accessKey, secretKey) + let retries = p.GetResult(Retries, 1) + let timeout = p.GetResult(RetriesTimeoutS, 5.) |> TimeSpan.FromSeconds + let connector = match conn with + | Choice1Of2 systemName -> + Equinox.DynamoStore.DynamoStoreConnector(systemName, timeout, retries) + | Choice2Of2 (serviceUrl, accessKey, secretKey) -> + Equinox.DynamoStore.DynamoStoreConnector(serviceUrl, accessKey, secretKey, timeout, retries) + let table = p.TryGetResult Table |> Option.defaultWith (fun () -> c.DynamoTable) + member val Verbose = p.Contains Verbose + member _.Connect() = connector.LogConfiguration() + let client = connector.CreateClient() + client.ConnectStore("Main", table) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> KafkaSinkArguments(c, kafka) + | _ -> missingArg "Must specify `kafka` arguments" +#endif + +#endif // dynamo + +type [] + TargetStoreArgs = + | Cosmos of Cosmos.Arguments + | Dynamo of Dynamo.Arguments + +module TargetStoreArgs = + + let connectTarget targetStore cache: Config.Store = + match targetStore with + | TargetStoreArgs.Cosmos a -> + let context = a.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create + Config.Store.Cosmos (context, cache) + | TargetStoreArgs.Dynamo a -> + let context = a.Connect() |> DynamoStoreContext.create + Config.Store.Dynamo (context, cache) diff --git a/propulsion-projector/Config.fs b/propulsion-projector/Config.fs new file mode 100644 index 000000000..e33d14b10 --- /dev/null +++ b/propulsion-projector/Config.fs @@ -0,0 +1,55 @@ +module ProjectorTemplate.Config + +let log = Serilog.Log.ForContext("isMetric", true) + +module Cosmos = + + let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let cacheStrategy = Equinox.CosmosStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.CosmosStore.CosmosStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) + + let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = + let accessStrategy = Equinox.CosmosStore.AccessStrategy.Snapshot (isOrigin, toSnapshot) + createCached codec initial fold accessStrategy (context, cache) + + let createRollingState codec initial fold toSnapshot (context, cache) = + let accessStrategy = Equinox.CosmosStore.AccessStrategy.RollingState toSnapshot + createCached codec initial fold accessStrategy (context, cache) + +module Dynamo = + + let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let cacheStrategy = Equinox.DynamoStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.DynamoStore.DynamoStoreCategory(context, FsCodec.Deflate.EncodeUncompressed codec, fold, initial, cacheStrategy, accessStrategy) + + let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = + let accessStrategy = Equinox.DynamoStore.AccessStrategy.Snapshot (isOrigin, toSnapshot) + createCached codec initial fold accessStrategy (context, cache) + + let createRollingState codec initial fold toSnapshot (context, cache) = + let accessStrategy = Equinox.DynamoStore.AccessStrategy.RollingState toSnapshot + createCached codec initial fold accessStrategy (context, cache) + +#if !(sourceKafka && kafka) +module Esdb = + + let create codec initial fold (context, cache) = + let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy) + +module Sss = + + let create codec initial fold (context, cache) = + let cacheStrategy = Equinox.SqlStreamStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.SqlStreamStore.SqlStreamStoreCategory(context, codec, fold, initial, cacheStrategy) + +#endif + +[] +type Store = +#if cosmos || esdb || sss + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache +#endif +#if dynamo || esdb || sss + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache +#endif diff --git a/propulsion-projector/Handler.fs b/propulsion-projector/Handler.fs index f21ecdb25..0137bc20a 100644 --- a/propulsion-projector/Handler.fs +++ b/propulsion-projector/Handler.fs @@ -5,49 +5,9 @@ module ProjectorTemplate.Handler // Here we pass the items directly through to the handler without parsing them let mapToStreamItems (x : System.Collections.Generic.IReadOnlyCollection<'a>) : seq<'a> = upcast x #else // cosmos && !parallelOnly -#if synthesizeSequence // cosmos && !parallelOnly && !synthesizeSequence -let indices = Propulsion.Kafka.StreamNameSequenceGenerator() - -let parseDocumentAsEvent (doc : Newtonsoft.Json.Linq.JObject) : Propulsion.Streams.StreamEvent = - let docId = doc.Value("id") - //let streamName = Propulsion.Streams.StreamName.internalParseSafe docId // if we're not sure there is a `-` in the id, this helper adds one - let streamName = FsCodec.StreamName.parse docId // throws if there's no `-` in the id - let ts = let raw = Propulsion.CosmosStore.EquinoxNewtonsoftParser.timestamp doc in raw.ToUniversalTime() |> System.DateTimeOffset - let docType = "DocumentTypeA" // each Event requires an EventType - enables the handler to route without having to parse the Data first - let data = string doc |> System.Text.Encoding.UTF8.GetBytes - // Ideally, we'd extract a monotonically incrementing index/version from the source and use that - // (Using this technique neuters the deduplication mechanism) - let streamIndex = indices.GenerateIndex streamName - { stream = streamName; event = FsCodec.Core.TimelineEvent.Create(streamIndex, docType, data, timestamp=ts) } - -let mapToStreamItems docs : Propulsion.Streams.StreamEvent seq = - docs |> Seq.map parseDocumentAsEvent -#else // cosmos && !parallelOnly && synthesizeSequence -//let replaceLongDataWithNull (x : FsCodec.ITimelineEvent) : FsCodec.ITimelineEvent<_> = -// if x.Data.Length < 900_000 then x -// else FsCodec.Core.TimelineEvent.Create(x.Index, x.EventType, null, x.Meta, timestamp=x.Timestamp) -// -//let hackDropBigBodies (e : Propulsion.Streams.StreamEvent<_>) : Propulsion.Streams.StreamEvent<_> = -// { stream = e.stream; event = replaceLongDataWithNull e.event } - -let mapToStreamItems categoryFilter docs : Propulsion.Streams.StreamEvent<_> seq = - docs - |> Seq.collect (Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter) - // TODO use Seq.filter and/or Seq.map to adjust what's being sent etc - // |> Seq.map hackDropBigBodies -#endif // cosmos && !parallelOnly && synthesizeSequence +let categoryFilter _ = true #endif // !parallelOnly //#endif // cosmos -#if esdb -open Propulsion.EventStore - -/// Responsible for inspecting and then either dropping or tweaking events coming from EventStore -// NB the `Index` needs to be contiguous with existing events - IOW filtering needs to be at stream (and not event) level -let tryMapEvent filterByStreamName (x : EventStore.ClientAPI.ResolvedEvent) = - match x.Event with - | e when not e.IsJson || e.EventStreamId.StartsWith "$" || not (filterByStreamName e.EventStreamId) -> None - | PropulsionStreamEvent e -> Some e -#endif // esdb #if kafka #if (cosmos && parallelOnly) // kafka && cosmos && parallelOnly @@ -76,12 +36,12 @@ type ProductionStats(log, statsInterval, stateInterval) = /// to preserve ordering at stream (key) level for messages produced to the topic) // TODO NOTE: The bulk of any manipulation should take place before events enter the scheduler, i.e. in program.fs // TODO NOTE: While filtering out entire categories is appropriate, you should not filter within a given stream (i.e., by event type) -let render (stream : FsCodec.StreamName, span : Propulsion.Streams.StreamSpan<_>) = async { +let render struct (stream : FsCodec.StreamName, span : Propulsion.Streams.Default.StreamSpan) = async { let value = span |> Propulsion.Codec.NewtonsoftJson.RenderedSpan.ofStreamSpan stream |> Propulsion.Codec.NewtonsoftJson.Serdes.Serialize - return FsCodec.StreamName.toString stream, value } + return struct (FsCodec.StreamName.toString stream, value) } #endif // kafka && !(cosmos && parallelOnly) #else // !kafka // Each outcome from `handle` is passed to `HandleOk` or `HandleExn` by the scheduler, DumpStats is called at `statsInterval` @@ -114,3 +74,15 @@ let handle struct (_stream, span: Propulsion.Streams.StreamSpan<_>) = async { do! Async.Sleep ms return struct (Propulsion.Streams.SpanResult.AllProcessed, span.Length) } #endif // !kafka + +type Config private () = + + static member StartSink(log : Serilog.ILogger, stats, + handle : struct (FsCodec.StreamName * Propulsion.Streams.Default.StreamSpan) + -> Async, + maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + + static member StartSource(log, sink, sourceConfig) = + SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig diff --git a/propulsion-projector/Infrastructure.fs b/propulsion-projector/Infrastructure.fs index 7499a9c20..579918765 100644 --- a/propulsion-projector/Infrastructure.fs +++ b/propulsion-projector/Infrastructure.fs @@ -8,7 +8,7 @@ module EnvVar = let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj -#if esdb +#if (cosmos || esdb || sss) module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations @@ -16,8 +16,6 @@ module CosmosStoreContext = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) -#endif -//#if (cosmos || esdb) type Equinox.CosmosStore.CosmosStoreConnector with member private x.LogConfiguration(connectionName, databaseId, containerId) = @@ -42,7 +40,62 @@ type Equinox.CosmosStore.CosmosStoreConnector with x.LogConfiguration(defaultArg connectionName "Source", databaseId, containerId) x.CreateUninitialized(databaseId, containerId) -//#endif + /// Connects to a Store as both a ChangeFeedProcessor Monitored Container and a CosmosStoreClient + member x.ConnectStoreAndMonitored(databaseId, containerId) = + let monitored = x.ConnectMonitored(databaseId, containerId, "Main") + let storeClient = Equinox.CosmosStore.CosmosStoreClient(monitored.Database.Client, databaseId, containerId) + storeClient, monitored + +#endif +#if (dynamo || esdb || sss) +module Dynamo = + + open Equinox.DynamoStore + + let defaultCacheDuration = System.TimeSpan.FromMinutes 20. + let private createCached codec initial fold accessStrategy (context, cache) = + let cacheStrategy = CachingStrategy.SlidingWindow (cache, defaultCacheDuration) + DynamoStoreCategory(context, FsCodec.Deflate.EncodeTryDeflate codec, fold, initial, cacheStrategy, accessStrategy) + + let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = + let accessStrategy = AccessStrategy.Snapshot (isOrigin, toSnapshot) + createCached codec initial fold accessStrategy (context, cache) + +type Equinox.DynamoStore.DynamoStoreConnector with + + member x.LogConfiguration() = + Log.Information("DynamoStore {endpoint} Timeout {timeoutS}s Retries {retries}", + x.Endpoint, (let t = x.Timeout in t.TotalSeconds), x.Retries) + +type Equinox.DynamoStore.DynamoStoreClient with + + member internal x.LogConfiguration(role, ?log) = + (defaultArg log Log.Logger).Information("DynamoStore {role:l} Table {table} Archive {archive}", role, x.TableName, Option.toObj x.ArchiveTableName) + member client.CreateCheckpointService(consumerGroupName, cache, log, ?checkpointInterval) = + let checkpointInterval = defaultArg checkpointInterval (TimeSpan.FromHours 1.) + let context = Equinox.DynamoStore.DynamoStoreContext(client) + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create log (consumerGroupName, checkpointInterval) (context, cache) + +type Equinox.DynamoStore.DynamoStoreContext with + + member internal x.LogConfiguration(log : ILogger) = + log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", + x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) + +type Amazon.DynamoDBv2.IAmazonDynamoDB with + + member x.ConnectStore(role, table) = + let storeClient = Equinox.DynamoStore.DynamoStoreClient(x, table) + storeClient.LogConfiguration(role) + storeClient + +module DynamoStoreContext = + + /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations + let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = + Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) + +#endif [] type Logging() = diff --git a/propulsion-projector/Program.fs b/propulsion-projector/Program.fs index b0b173cc8..40001fd28 100644 --- a/propulsion-projector/Program.fs +++ b/propulsion-projector/Program.fs @@ -3,252 +3,9 @@ open Serilog open System -exception MissingArg of message : string with override this.Message = this.message - -type Configuration(tryGet) = - - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) -#if esdb - let isTrue varName = tryGet varName |> Option.exists (fun s -> String.Equals(s, bool.TrueString, StringComparison.OrdinalIgnoreCase)) -#endif -#if (cosmos || esdb) - member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" - member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" - member _.CosmosContainer = get "EQUINOX_COSMOS_CONTAINER" -#endif -//#if sss - member _.SqlStreamStoreConnection = get "SQLSTREAMSTORE_CONNECTION" - member _.SqlStreamStoreCredentials = tryGet "SQLSTREAMSTORE_CREDENTIALS" - member _.SqlStreamStoreCredentialsCheckpoints = tryGet "SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS" - member _.SqlStreamStoreDatabase = get "SQLSTREAMSTORE_DATABASE" - member _.SqlStreamStoreContainer = get "SQLSTREAMSTORE_CONTAINER" -//#endif -#if esdb - member _.EventStoreHost = get "EQUINOX_ES_HOST" - member _.EventStoreTcp = isTrue "EQUINOX_ES_TCP" - member _.EventStorePort = tryGet "EQUINOX_ES_PORT" |> Option.map int - member _.EventStoreUsername = get "EQUINOX_ES_USERNAME" - member _.EventStorePassword = get "EQUINOX_ES_PASSWORD" -#endif -//#if kafka - member _.Broker = get "PROPULSION_KAFKA_BROKER" - member _.Topic = get "PROPULSION_KAFKA_TOPIC" -//#endif - module Args = open Argu -#if cosmos - type [] CosmosParameters = - | [] FromTail - | [] MaxItems of int - | [] LagFreqM of float - - | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode - | [] Connection of string - | [] Database of string - | [] Container of string - | [] LeaseContainer of string - | [] Timeout of float - | [] Retries of int - | [] RetriesWaitTime of float - interface IArgParserTemplate with - member a.Usage = a |> function - | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." - | MaxItems _ -> "maximum item count to request from the feed. Default: unlimited." - | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" - - | ConnectionMode _ -> "override the connection mode. Default: Direct." - | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" - | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" - | Container _ -> "specify a container name for store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" - | LeaseContainer _ -> "specify Container Name (in this [target] Database) for Leases container. Default: `SourceContainer` + `-aux`." - | Timeout _ -> "specify operation timeout in seconds. Default: 5." - | Retries _ -> "specify operation retries. Default: 1." - | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - type CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult CosmosParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 1) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds - let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let containerId = a.GetResult Container - let leaseContainerId = a.GetResult(LeaseContainer, containerId + "-aux") - member _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) - member x.MonitoredContainer() = connector.ConnectMonitored(database, containerId) - - member val FromTail = a.Contains FromTail - member val MaxItems = a.TryGetResult MaxItems - member val LagFrequency = a.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes - member _.Connect() = connector.ConnectStore("Main", database, containerId) -#endif -#if esdb - open Equinox.EventStore - open Propulsion.EventStore - type [] EsSourceParameters = - | [] FromTail - | [] Gorge of int - | [] Tail of intervalS: float - | [] ForceRestart - | [] BatchSize of int - | [] MinBatchSize of int - | [] Position of int64 - | [] Chunk of int - | [] Percent of float - - | [] Verbose - | [] Timeout of float - | [] Retries of int - | [] HeartbeatTimeout of float - | [] Tcp - | [] Host of string - | [] Port of int - | [] Username of string - | [] Password of string - - | [] Cosmos of ParseResults - interface IArgParserTemplate with - member a.Usage = a |> function - | FromTail -> "Start the processing from the Tail" - | Gorge _ -> "Request Parallel readers phase during initial catchup, running one chunk (256MB) apart. Default: off" - | Tail _ -> "attempt to read from tail at specified interval in Seconds. Default: 1" - | ForceRestart _ -> "Forget the current committed position; start from (and commit) specified position. Default: start from specified position or resume from committed." - | BatchSize _ -> "maximum item count to request from feed. Default: 4096" - | MinBatchSize _ -> "minimum item count to drop down to in reaction to read failures. Default: 512" - | Position _ -> "EventStore $all Stream Position to commence from" - | Chunk _ -> "EventStore $all Chunk to commence from" - | Percent _ -> "EventStore $all Stream Position to commence from (as a percentage of current tail position)" - - | Verbose -> "Include low level Store logging." - | Tcp -> "Request connecting EventStore direct to a TCP/IP endpoint. Default: Use Clustered mode with Gossip-driven discovery (unless environment variable EQUINOX_ES_TCP specifies 'true')." - | Host _ -> "TCP mode: specify EventStore hostname to connect to directly. Clustered mode: use Gossip protocol against all A records returned from DNS query. (optional if environment variable EQUINOX_ES_HOST specified)" - | Port _ -> "specify EventStore custom port. Uses value of environment variable EQUINOX_ES_PORT if specified. Defaults for Cluster and Direct TCP/IP mode are 30778 and 1113 respectively." - | Username _ -> "specify username for EventStore. (optional if environment variable EQUINOX_ES_USERNAME specified)" - | Password _ -> "specify Password for EventStore. (optional if environment variable EQUINOX_ES_PASSWORD specified)" - | Timeout _ -> "specify operation timeout in seconds. Default: 20." - | Retries _ -> "specify operation retries. Default: 3." - | HeartbeatTimeout _ -> "specify heartbeat timeout in seconds. Default: 1.5." - - | Cosmos _ -> "CosmosDB (Checkpoint) Store parameters." - and EsSourceArguments(c : Configuration, a : ParseResults) = - let discovery (host, port, tcp) = - match tcp, port with - | false, None -> Discovery.GossipDns host - | false, Some p -> Discovery.GossipDnsCustomPort (host, p) - | true, None -> Discovery.Uri (UriBuilder("tcp", host, 1113).Uri) - | true, Some p -> Discovery.Uri (UriBuilder("tcp", host, p).Uri) - member val Gorge = a.TryGetResult Gorge - member val TailInterval = a.GetResult(Tail, 1.) |> TimeSpan.FromSeconds - member val ForceRestart = a.Contains ForceRestart - member val StartingBatchSize = a.GetResult(BatchSize, 4096) - member val MinBatchSize = a.GetResult(MinBatchSize, 512) - member val StartPos = - match a.TryGetResult Position, a.TryGetResult Chunk, a.TryGetResult Percent, a.Contains EsSourceParameters.FromTail with - | Some p, _, _, _ -> Absolute p - | _, Some c, _, _ -> StartPos.Chunk c - | _, _, Some p, _ -> Percentage p - | None, None, None, true -> StartPos.TailOrCheckpoint - | None, None, None, _ -> StartPos.StartOrCheckpoint - member val Tcp = a.Contains Tcp || c.EventStoreTcp - member val Port = match a.TryGetResult Port with Some x -> Some x | None -> c.EventStorePort - member val Host = a.TryGetResult Host |> Option.defaultWith (fun () -> c.EventStoreHost) - member val User = a.TryGetResult Username |> Option.defaultWith (fun () -> c.EventStoreUsername) - member val Password = a.TryGetResult Password |> Option.defaultWith (fun () -> c.EventStorePassword) - member val Retries = a.GetResult(EsSourceParameters.Retries, 3) - member val Timeout = a.GetResult(EsSourceParameters.Timeout, 20.) |> TimeSpan.FromSeconds - member val Heartbeat = a.GetResult(HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds - - member x.Connect(log: ILogger, storeLog: ILogger, appName, nodePreference) = - let discovery = discovery (x.Host, x.Port, x.Tcp) - let ts (x : TimeSpan) = x.TotalSeconds - log.ForContext("host", x.Host).ForContext("port", x.Port) - .Information("EventStore {discovery} heartbeat: {heartbeat}s Timeout: {timeout}s Retries {retries}", - discovery, ts x.Heartbeat, ts x.Timeout, x.Retries) - let log=if storeLog.IsEnabled Serilog.Events.LogEventLevel.Debug then Logger.SerilogVerbose storeLog else Logger.SerilogNormal storeLog - let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] - Connector(x.User, x.Password, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) - .Connect(appName, discovery, nodePreference) |> Async.RunSynchronously - - member val CheckpointInterval = TimeSpan.FromHours 1. - member val Cosmos : CosmosArguments = - match a.TryGetSubCommand() with - | Some (EsSourceParameters.Cosmos cosmos) -> CosmosArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify `cosmos` checkpoint store when source is `es`") - and [] CosmosParameters = - | [] Connection of string - | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode - | [] Database of string - | [] Container of string - | [] Timeout of float - | [] Retries of int - | [] RetriesWaitTime of float - interface IArgParserTemplate with - member a.Usage = a |> function - | ConnectionMode _ -> "override the connection mode. Default: Direct." - | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" - | Database _ -> "specify a database name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" - | Container _ -> "specify a container name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" - | Timeout _ -> "specify operation timeout in seconds. Default: 5." - | Retries _ -> "specify operation retries. Default: 1." - | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 9) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds - let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode=mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) - member _.Connect() = connector.ConnectStore("Main", database, container) -#endif -//#if sss - // TOCONSIDER: add DB connectors other than MsSql - type [] SqlStreamStoreSourceParameters = - | [] Tail of intervalS: float - | [] BatchSize of int - | [] Connection of string - | [] Credentials of string - | [] Schema of string - | [] CheckpointsConnection of string - | [] CheckpointsCredentials of string - interface IArgParserTemplate with - member a.Usage = a |> function - | Tail _ -> "Polling interval in Seconds. Default: 1" - | BatchSize _ -> "Maximum events to request from feed. Default: 512" - | Connection _ -> "Connection string for SqlStreamStore db. Optional if SQLSTREAMSTORE_CONNECTION specified" - | Credentials _ -> "Credentials string for SqlStreamStore db (used as part of connection string, but NOT logged). Default: use SQLSTREAMSTORE_CREDENTIALS environment variable (or assume no credentials)" - | Schema _ -> "Database schema name" - | CheckpointsConnection _ ->"Connection string for Checkpoints sql db. Optional if SQLSTREAMSTORE_CONNECTION_CHECKPOINTS specified. Default: same as `Connection`" - | CheckpointsCredentials _ ->"Credentials string for Checkpoints sql db. (used as part of checkpoints connection string, but NOT logged). Default (when no `CheckpointsConnection`: use `Credentials. Default (when `CheckpointsConnection` specified): use SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS environment variable (or assume no credentials)" - and SqlStreamStoreSourceArguments(c : Configuration, a : ParseResults) = - member val TailInterval = a.GetResult(Tail, 1.) |> TimeSpan.FromSeconds - member val MaxBatchSize = a.GetResult(BatchSize, 512) - member val private Connection = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.SqlStreamStoreConnection) - member val private Credentials = a.TryGetResult Credentials |> Option.orElseWith (fun () -> c.SqlStreamStoreCredentials) |> Option.toObj - member val Schema = a.GetResult(Schema, null) - - member x.BuildCheckpointsConnectionString() = - let c, cs = - match a.TryGetResult CheckpointsConnection, a.TryGetResult CheckpointsCredentials with - | Some c, Some p -> c, String.Join(";", c, p) - | None, Some p -> let c = x.Connection in c, String.Join(";", c, p) - | None, None -> let c = x.Connection in c, String.Join(";", c, x.Credentials) - | Some cc, None -> let p = c.SqlStreamStoreCredentialsCheckpoints |> Option.toObj - cc, String.Join(";", cc, p) - Log.Information("Checkpoints MsSql Connection {connectionString}", c) - cs - member x.Connect() = - let conn, creds, schema, autoCreate = x.Connection, x.Credentials, x.Schema, false - let sssConnectionString = String.Join(";", conn, creds) - Log.Information("SqlStreamStore MsSql Connection {connectionString} Schema {schema} AutoCreate {autoCreate}", conn, schema, autoCreate) - Equinox.SqlStreamStore.MsSql.Connector(sssConnectionString, schema, autoCreate=autoCreate).Connect() |> Async.RunSynchronously -//#endif [] type Parameters = @@ -256,22 +13,25 @@ module Args = | [] ProcessorName of string | [] MaxReadAhead of int | [] MaxWriters of int -//#if kafka +#if kafka (* Kafka Args *) | [] Broker of string | [] Topic of string -//#endif +#endif #if cosmos - | [] Cosmos of ParseResults + | [] Cosmos of ParseResults +#endif +#if dynamo + | [] Dynamo of ParseResults #endif #if esdb - | [] Es of ParseResults + | [] Esdb of ParseResults +#endif +#if sss + | [] Sss of ParseResults #endif -//#if sss - | [] SqlMs of ParseResults -//#endif interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "Request Verbose Logging. Default: off" | ProcessorName _ -> "Projector consumer group name." | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 64" @@ -283,156 +43,132 @@ module Args = #if cosmos | Cosmos _ -> "specify CosmosDb input parameters" #endif +#if dynamo + | Dynamo _ -> "specify DynamoDb input parameters" +#endif #if esdb - | Es _ -> "specify EventStore input parameters." + | Esdb _ -> "specify EventStore input parameters." #endif -//#if sss - | SqlMs _ -> "specify SqlStreamStore input parameters." -//#endif - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose - member val ProcessorName = a.GetResult ProcessorName - member val private MaxReadAhead = a.GetResult(MaxReadAhead, 64) - member val private MaxConcurrentProcessors =a.GetResult(MaxWriters, 1024) +#if sss + | Sss _ -> "specify SqlStreamStore.MsSql input parameters." +#endif + and Arguments(c : SourceArgs.Configuration, p : ParseResults) = + let processorName = p.GetResult ProcessorName + let maxReadAhead = p.GetResult(MaxReadAhead, 64) + let maxConcurrentProcessors = p.GetResult(MaxWriters, 1024) + member val Verbose = p.Contains Parameters.Verbose member val StatsInterval = TimeSpan.FromMinutes 1. - member val StateInterval = TimeSpan.FromMinutes 2. - member x.ProcessorParams() = Log.Information("Projecting... {dop} writers, max {maxReadAhead} batches read ahead", - x.MaxConcurrentProcessors, x.MaxReadAhead) - (x.MaxReadAhead, x.MaxConcurrentProcessors) -#if cosmos - member val Cosmos = CosmosArguments (c, a.GetResult Cosmos) - member x.MonitoringParams() = - let srcC = x.Cosmos - let leases : Microsoft.Azure.Cosmos.Container = srcC.ConnectLeases() - Log.Information("ChangeFeed {processorName} Leases Database {db} Container {container}. MaxItems limited to {maxItems}", - x.ProcessorName, leases.Database.Id, leases.Id, Option.toNullable srcC.MaxItems) - if srcC.FromTail then Log.Warning("(If new projector group) Skipping projection of all existing events.") - Log.Information("ChangeFeed Lag stats interval {lagS:n0}s", let f = srcC.LagFrequency in f.TotalSeconds) - let monitored = srcC.MonitoredContainer() - (monitored, leases, x.ProcessorName, srcC.FromTail, srcC.MaxItems, srcC.LagFrequency) + member val StateInterval = TimeSpan.FromMinutes 10. + member val CacheSizeMb = 10 + member _.ProcessorParams() = Log.Information("Projecting... {processorName}, reading {maxReadAhead} ahead, {dop} writers", + processorName, maxReadAhead, maxConcurrentProcessors) + (processorName, maxReadAhead, maxConcurrentProcessors) + member val Store : Choice = + match p.GetSubCommand() with + | Cosmos p -> Choice1Of4 <| SourceArgs.Cosmos.Arguments(c, p) + | Dynamo p -> Choice2Of4 <| SourceArgs.Dynamo.Arguments(c, p) + | Esdb p -> Choice3Of4 <| SourceArgs.Esdb.Arguments(c, p) + | Sss p -> Choice4Of4 <| SourceArgs.Sss.Arguments(c, p) + | p -> Args.missingArg $"Unexpected Store subcommand %A{p}" + member x.VerboseStore = match x.Store with + | Choice1Of4 p -> p.Verbose + | Choice2Of4 p -> p.Verbose + | Choice3Of4 p -> p.Verbose + | Choice4Of4 p -> p.Verbose + member x.ConnectSource(appName) : (ILogger -> string -> SourceConfig) * _ * (ILogger -> unit) = + let cache = Equinox.Cache (appName, sizeMb = x.CacheSizeMb) + match x.Store with + | Choice1Of4 a -> + let monitored = a.ConnectMonitored() + let buildSourceConfig log groupName = + let leases, startFromTail, maxItems, tailSleepInterval, lagFrequency = a.MonitoringParams(log) + let checkpointConfig = CosmosFeedConfig.Persistent (groupName, startFromTail, maxItems, lagFrequency) + SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) +#if kafka + let target = a.Kafka +#else + let target = () #endif -#if esdb - member val Es = EsSourceArguments (c, a.GetResult Es) - member x.BuildEventStoreParams() = - let srcE = x.Es - let startPos = srcE.StartPos - let context = srcE.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - Log.Information("Processing Consumer Group {groupName} from {startPos} (force: {forceRestart}) in Database {db} Container {container}", - x.ProcessorName, startPos, srcE.ForceRestart) - Log.Information("Ingesting in batches of [{minBatchSize}..{batchSize}]", srcE.MinBatchSize, srcE.StartingBatchSize) - srcE, context, - { groupName = x.ProcessorName; start = startPos; checkpointInterval = srcE.CheckpointInterval; tailInterval = srcE.TailInterval - forceRestart = srcE.ForceRestart - batchSize = srcE.StartingBatchSize; minBatchSize = srcE.MinBatchSize; gorge = srcE.Gorge; streamReaders = 0 } + buildSourceConfig, target, ignore + | Choice2Of4 a -> + let context = a.Connect() + let buildSourceConfig log groupName = + let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval, streamsDop = a.MonitoringParams(log) + let checkpoints = a.CreateCheckpointStore(groupName, cache) + let load = DynamoLoadModeConfig.Hydrate (context, streamsDop) + SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) +#if kafka + let target = a.Kafka +#else + let target = () #endif -//#if sss - member val SqlStreamStore = SqlStreamStoreSourceArguments(c, a.GetResult SqlMs) -//#endif -//#if kafka - member val Target = TargetInfo (c, a) - and TargetInfo(c : Configuration, a : ParseResults) = - member val Broker = a.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) - member x.BuildTargetParams() = x.Broker, x.Topic -//#endif + buildSourceConfig, target, Equinox.DynamoStore.Core.Log.InternalMetrics.dump + | Choice3Of4 a -> + let connection = a.Connect(Log.Logger, appName, EventStore.Client.NodePreference.Leader) + let targetStore = a.ConnectTarget(cache) + let buildSourceConfig log groupName = + let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) + let checkpoints = a.CreateCheckpointStore(groupName, targetStore) + let hydrateBodies = true + SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) +#if kafka + let target = a.Kafka +#else + let target = () +#endif + buildSourceConfig, target, fun log -> + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log + Equinox.DynamoStore.Core.Log.InternalMetrics.dump log + | Choice4Of4 a -> + let connection = a.Connect() + let buildSourceConfig log groupName = + let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) + let checkpoints = a.CreateCheckpointStoreSql(groupName) + let hydrateBodies = true + SourceConfig.Sss (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) +#if kafka + let target = a.Kafka +#else + let target = () +#endif + buildSourceConfig, target, fun log -> + Equinox.SqlStreamStore.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log + Equinox.DynamoStore.Core.Log.InternalMetrics.dump log /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args let parse tryGetConfigValue argv : Arguments = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name let parser = ArgumentParser.Create(programName=programName) - Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) - -//#if esdb -module Checkpoints = - - // In this implementation, we keep the checkpoints in Cosmos when consuming from EventStore - module Cosmos = + Arguments(SourceArgs.Configuration tryGetConfigValue, parser.ParseCommandLine argv) - let create groupName (context, cache) = - Propulsion.Feed.ReaderCheckpoint.CosmosStore.create Log.Logger groupName (context, cache) - -//#endif // esdb let [] AppName = "ProjectorTemplate" -#if cosmos // cosmos -open Propulsion.CosmosStore.Infrastructure // AwaitKeyboardInterruptAsTaskCancelledException +open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -#endif let build (args : Args.Arguments) = - let maxReadAhead, maxConcurrentStreams = args.ProcessorParams() -#if cosmos // cosmos -#if kafka // cosmos && kafka - let broker, topic = args.Target.BuildTargetParams() - let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) -#if parallelOnly // cosmos && kafka && parallelOnly - let sink = Propulsion.Kafka.ParallelProducerSink.Start(maxReadAhead, maxConcurrentStreams, Handler.render, producer, args.StatsInterval) -#else // cosmos && kafka && !parallelOnly - let stats = Handler.ProductionStats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.render, producer, stats, args.StatsInterval) -#endif // cosmos && kafka && !parallelOnly -#else // cosmos && !kafka - let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Streams.StreamsProjector.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.handle, stats, args.StatsInterval) -#endif // cosmos && !kafka - let source = - let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, Handler.mapToStreamItems) - let monitored, leases, processorName, startFromTail, maxItems, lagFrequency = args.MonitoringParams() - Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitored, leases, processorName, observer, startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) - [ Async.AwaitKeyboardInterruptAsTaskCancelledException(); source.AwaitWithStopOnCancellation(); sink.AwaitWithStopOnCancellation() ] -#endif // cosmos -#if esdb - let srcE, context, spec = args.BuildEventStoreParams() - - let connectEs () = srcE.Connect(Log.Logger, Log.Logger, AppName, Equinox.EventStore.NodePreference.Master) - let cache = Equinox.Cache(AppName, sizeMb = 10) - - let checkpoints = Checkpoints.Cosmos.create spec.groupName (context, cache) - -#if kafka // esdb && kafka - let broker, topic = args.Target.BuildTargetParams() + let consumerGroupName, maxReadAhead, maxConcurrentProcessors = args.ProcessorParams() + let buildSourceConfig, target, dumpMetrics = args.ConnectSource(AppName) +#if kafka // kafka + let broker, topic = target.BuildTargetParams() let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) +#if parallelOnly // kafka && parallelOnly + let sink = Propulsion.Kafka.ParallelProducerSink.Start(maxReadAhead, maxConcurrentProcessors, Handler.render, producer, args.StatsInterval) +#else // kafka && !parallelOnly let stats = Handler.ProductionStats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.render, producer, stats, args.StatsInterval) -#else // esdb && !kafka + let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentProcessors, Handler.render, producer, stats, statsInterval = args.StatsInterval) +#endif // kafka && !parallelOnly +#else // !kafka let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) let sink = Propulsion.Streams.StreamsProjector.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.handle, stats, args.StatsInterval) -#endif // esdb && !kafka - let pumpSource = - let filterByStreamName _ = true // see `dotnet new proReactor --filter` for an example of how to rig filtering arguments - Propulsion.EventStore.EventStoreSource.Run( - Log.Logger, sink, checkpoints, connectEs, spec, Handler.tryMapEvent filterByStreamName, - maxReadAhead, args.StatsInterval) - [ pumpSource; sink.AwaitWithStopOnCancellation() ] -#endif // esdb -//#if sss - let srcSql = args.SqlStreamStore - - let monitored = srcSql.Connect() - - let connectionString = srcSql.BuildCheckpointsConnectionString() - let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint; relevant for Cosmos - let groupName = "default" - let checkpoints = Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString, groupName, checkpointEventInterval) - -#if kafka // sss && kafka - let broker, topic = args.Target.BuildTargetParams() - let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) - let stats = Handler.ProductionStats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Kafka.StreamsProducerSink.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.render, producer, stats, args.StatsInterval) -#else // sss && !kafka - let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.handle, stats, args.StatsInterval) -#endif // sss && !kafka - let pumpSource = - let source = - Propulsion.SqlStreamStore.SqlStreamStoreSource - ( Log.Logger, args.StatsInterval, - monitored, srcSql.MaxBatchSize, srcSql.TailInterval, - checkpoints, sink, Handler.categoryFilter, hydrateBodies = true) - source.Pump - [ pumpSource; sink.AwaitWithStopOnCancellation() ] -//#endif // sss - +#endif // !kafka + let source, _awaitReactions = + let sourceConfig = buildSourceConfig Log.Logger consumerGroupName + Handler.Config.StartSource(Log.Logger, sink, sourceConfig) + [| Async.AwaitKeyboardInterruptAsTaskCanceledException() + source.AwaitWithStopOnCancellation() + sink.AwaitWithStopOnCancellation() |] + let run args = build args |> Async.Parallel |> Async.Ignore @@ -440,9 +176,9 @@ let run args = let main argv = try let args = Args.parse EnvVar.tryGet argv try Log.Logger <- LoggerConfiguration().Configure(verbose=args.Verbose).CreateLogger() - try run args |> Async.RunSynchronously; 0 - with e when not (e :? MissingArg) -> Log.Fatal(e, "Exiting"); 2 + try run args |> Async.RunSynchronously; 0 + with e when not (e :? Args.MissingArg) && not (e :? System.Threading.Tasks.TaskCanceledException) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() - with MissingArg msg -> eprintfn "%s" msg; 1 + with Args.MissingArg msg -> eprintfn "%s" msg; 1 | :? Argu.ArguParseException as e -> eprintfn "%s" e.Message; 1 | e -> eprintf "Exception %s" e.Message; 1 diff --git a/propulsion-projector/Projector.fsproj b/propulsion-projector/Projector.fsproj index 3d39d28a0..93763c63f 100644 --- a/propulsion-projector/Projector.fsproj +++ b/propulsion-projector/Projector.fsproj @@ -4,11 +4,16 @@ Exe net6.0 5 + esdb;sss;cosmos;dynamo;kafka + + + + @@ -16,22 +21,25 @@ - + + - + + + + - + - - + + - - + diff --git a/propulsion-projector/README.md b/propulsion-projector/README.md index c675b452f..1b3ea9d98 100644 --- a/propulsion-projector/README.md +++ b/propulsion-projector/README.md @@ -16,6 +16,24 @@ This project was generated using: dotnet new proProjector # use --help to see options //#endif // cosmos && !kafka //#endif // cosmos +//#if dynamo +//#if kafka // dynamo && kafka +# Propulsion DynamoDb -> Kafka Projector + +This project was generated using: + + dotnet new -i Equinox.Templates # just once, to install/update in the local templates store + dotnet new proProjector -s dynamo -k # -k => include Kafka projection logic +//#else // dynamo && !kafka +# Propulsion DynamoDb Projector (without Kafka emission) + +This project was generated using: + + dotnet new -i Equinox.Templates # just once, to install/update in the local templates store + # add -k to add Kafka Projection logic + dotnet new proProjector -s dynamo # use --help to see options +//#endif // dynamo && !kafka +//#endif // dynamo //#if esdb //#if kafka // esdb && kafka # Propulsion EventStoreDB -> Kafka Projector @@ -141,8 +159,8 @@ This project was generated using: # `-g default` defines the Projector Group identity - each has separated state in the Leases (`-aux`) Container (aka processorName) # `-t topic0` identifies the Kafka topic to which the Projector should write # cosmos specifies the source (if you have specified 3x EQUINOX_COSMOS_* environment vars, no arguments are needed) - # `-mi 1000` sets the change feed maximum item limit to 1000 - dotnet run -- -g default -t topic0 cosmos -mi 1000 + # `-b 1000` sets the change feed maximum item limit to 1000 + dotnet run -- -g default -t topic0 cosmos -b 1000 # (assuming you've scaled up enough to have >1 physical partition range, you can run a second instance [in a second console] with the same arguments) //#endif // kafka && cosmos @@ -167,8 +185,8 @@ This project was generated using: # `-g default` defines the Projector Group identity - each has separated state in the Leases (`-aux`) Container (aka processorName) # cosmos specifies the source (if you have specified 3x EQUINOX_COSMOS_* environment vars, no arguments are needed) - # `-mi 1000` sets the max batch size to 1000 - dotnet run -- -g default cosmos -mi 1000 + # `-b 1000` sets the max batch size to 1000 + dotnet run -- -g default cosmos -b 1000 # NB (assuming you've scaled up enough to have >1 physical partition range, you can run a second instance in a second console with the same arguments) //#endif // !kafka && cosmos diff --git a/propulsion-projector/SourceArgs.fs b/propulsion-projector/SourceArgs.fs new file mode 100644 index 000000000..42249bba3 --- /dev/null +++ b/propulsion-projector/SourceArgs.fs @@ -0,0 +1,295 @@ +module ProjectorTemplate.SourceArgs + +open Argu +open Serilog +open System + +type Configuration(tryGet) = + inherit Args.Configuration(tryGet) +#if dynamo + member _.DynamoIndexTable = tryGet Args.INDEX_TABLE +#endif + +#if cosmos +module Cosmos = + + type [] Parameters = + | [] Verbose + | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode + | [] Connection of string + | [] Database of string + | [] Container of string + | [] Timeout of float + | [] Retries of int + | [] RetriesWaitTime of float + + | [] LeaseContainer of string + | [] FromTail + | [] MaxItems of int + | [] LagFreqM of float + +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "request Verbose Logging from ChangeFeedProcessor and Store. Default: off" + | ConnectionMode _ -> "override the connection mode. Default: Direct." + | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" + | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" + | Container _ -> "specify a container name for store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" + | Timeout _ -> "specify operation timeout in seconds. Default: 5." + | Retries _ -> "specify operation retries. Default: 9." + | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." + + | LeaseContainer _ -> "specify Container Name (in this [target] Database) for Leases container. Default: `SourceContainer` + `-aux`." + | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." + | MaxItems _ -> "maximum item count to supply for the Change Feed query. Default: use response size limit" + | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" + +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + type Arguments(c : Args.Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let containerId = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + let leaseContainerId = p.GetResult(LeaseContainer, containerId + "-aux") + let fromTail = p.Contains FromTail + let maxItems = p.TryGetResult MaxItems + let tailSleepInterval = TimeSpan.FromMilliseconds 500. + let lagFrequency = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member _.Verbose = p.Contains Verbose + member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) + member x.MonitoringParams(log : ILogger) = + let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() + log.Information("ChangeFeed Leases Database {db} Container {container}. MaxItems limited to {maxItems}", + leases.Database.Id, leases.Id, Option.toNullable maxItems) + if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") + (leases, fromTail, maxItems, tailSleepInterval, lagFrequency) + member x.ConnectMonitored() = + connector.ConnectMonitored(database, containerId) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) + | _ -> Args.missingArg "Must specify `kafka` arguments" +#endif + +#endif // cosmos +#if dynamo +module Dynamo = + + type [] Parameters = + | [] Verbose + | [] ServiceUrl of string + | [] AccessKey of string + | [] SecretKey of string + | [] Table of string + | [] Retries of int + | [] RetriesTimeoutS of float + | [] IndexTable of string + | [] IndexSuffix of string + | [] MaxItems of int + | [] FromTail + | [] StreamsDop of int +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." + | ServiceUrl _ -> "specify a server endpoint for a Dynamo account. (optional if environment variable " + Args.SERVICE_URL + " specified)" + | AccessKey _ -> "specify an access key id for a Dynamo account. (optional if environment variable " + Args.ACCESS_KEY + " specified)" + | SecretKey _ -> "specify a secret access key for a Dynamo account. (optional if environment variable " + Args.SECRET_KEY + " specified)" + | Retries _ -> "specify operation retries (default: 9)." + | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 60)" + | Table _ -> "specify a table name for the primary store. (optional if environment variable " + Args.TABLE + " specified)" + | IndexTable _ -> "specify a table name for the index store. (optional if environment variable " + Args.INDEX_TABLE + " specified. default: `Table`+`IndexSuffix`)" + | IndexSuffix _ -> "specify a suffix for the index store. (optional if environment variable " + Args.INDEX_TABLE + " specified. default: \"-index\")" + | MaxItems _ -> "maximum events to load in a batch. Default: 100" + | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." + | StreamsDop _ -> "parallelism when loading events from Store Feed Source. Default 4" +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + + type Arguments(c : Configuration, p : ParseResults) = + let serviceUrl = p.TryGetResult ServiceUrl |> Option.defaultWith (fun () -> c.DynamoServiceUrl) + let accessKey = p.TryGetResult AccessKey |> Option.defaultWith (fun () -> c.DynamoAccessKey) + let secretKey = p.TryGetResult SecretKey |> Option.defaultWith (fun () -> c.DynamoSecretKey) + let table = p.TryGetResult Table |> Option.defaultWith (fun () -> c.DynamoTable) + let indexSuffix = p.GetResult(IndexSuffix, "-index") + let indexTable = p.TryGetResult IndexTable |> Option.orElseWith (fun () -> c.DynamoIndexTable) |> Option.defaultWith (fun () -> table + indexSuffix) + let fromTail = p.Contains FromTail + let tailSleepInterval = TimeSpan.FromMilliseconds 500. + let batchSizeCutoff = p.GetResult(MaxItems, 100) + let streamsDop = p.GetResult(StreamsDop, 4) + let timeout = p.GetResult(RetriesTimeoutS, 60.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let connector = Equinox.DynamoStore.DynamoStoreConnector(serviceUrl, accessKey, secretKey, timeout, retries) + let client = connector.CreateClient() + let indexStoreClient = lazy client.ConnectStore("Index", indexTable) + member val Verbose = p.Contains Verbose + member _.Connect() = connector.LogConfiguration() + client.ConnectStore("Main", table) |> DynamoStoreContext.create + member _.MonitoringParams(log : ILogger) = + log.Information("DynamoStoreSource BatchSizeCutoff {batchSizeCutoff} Hydrater parallelism {streamsDop}", batchSizeCutoff, streamsDop) + let indexStoreClient = indexStoreClient.Value + if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") + indexStoreClient, fromTail, batchSizeCutoff, tailSleepInterval, streamsDop + member _.CreateCheckpointStore(group, cache) = + let indexTable = indexStoreClient.Value + indexTable.CreateCheckpointService(group, cache, Config.log) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) + | _ -> Args.missingArg "Must specify `kafka` arguments" +#endif + +#endif // dynamo +#if esdb +module Esdb = + + type [] Parameters = + | [] Verbose + | [] BatchSize of int + | [] Connection of string + | [] Credentials of string + | [] Timeout of float + | [] Retries of int + + | [] FromTail + + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." + | BatchSize _ -> "maximum events to load in a batch. Default: 100" + | Connection _ -> "EventStore Connection String. (optional if environment variable EQUINOX_ES_CONNECTION specified)" + | Credentials _ -> "Credentials string for EventStore (used as part of connection string, but NOT logged). Default: use EQUINOX_ES_CREDENTIALS environment variable (or assume no credentials)" + | Timeout _ -> "specify operation timeout in seconds. Default: 20." + | Retries _ -> "specify operation retries. Default: 3." + + | FromTail -> "Start the processing from the Tail" + + | Cosmos _ -> "CosmosDB Target Store parameters (also used for checkpoint storage)." + | Dynamo _ -> "DynamoDB Target Store parameters (also used for checkpoint storage)." + type Arguments(c : Configuration, p : ParseResults) = + let startFromTail = p.Contains FromTail + let batchSize = p.GetResult(BatchSize, 100) + let tailSleepInterval = TimeSpan.FromSeconds 0.5 + let connectionStringLoggable = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.EventStoreConnection) + let credentials = p.TryGetResult Credentials |> Option.orElseWith (fun () -> c.MaybeEventStoreCredentials) + let discovery = match credentials with Some x -> String.Join(";", connectionStringLoggable, x) | None -> connectionStringLoggable + |> Equinox.EventStoreDb.Discovery.ConnectionString + let retries = p.GetResult(Retries, 3) + let timeout = p.GetResult(Timeout, 20.) |> TimeSpan.FromSeconds + let checkpointInterval = TimeSpan.FromHours 1. + member val Verbose = p.Contains Verbose + + member _.Connect(log : ILogger, appName, nodePreference) : Equinox.EventStoreDb.EventStoreConnection = + log.Information("EventStore {discovery}", connectionStringLoggable) + let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] + Equinox.EventStoreDb.EventStoreConnector(timeout, retries, tags = tags) + .Establish(appName, discovery, Equinox.EventStoreDb.ConnectionStrategy.ClusterSingle nodePreference) + + member _.MonitoringParams(log : ILogger) = + log.Information("EventStoreSource BatchSize {batchSize} ", batchSize) + startFromTail, batchSize, tailSleepInterval + member _.CreateCheckpointStore(group, store : Config.Store) : Propulsion.Feed.IFeedCheckpointStore = + Args.Checkpoints.createCheckpointStore (group, checkpointInterval, store) + member private _.TargetStoreArgs : Args.TargetStoreArgs = + match p.GetSubCommand() with + | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) + | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) + | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" +#if kafka + member x.Kafka = + match x.TargetStoreArgs with + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Kafka + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Kafka +#endif + member x.ConnectTarget(cache) : Config.Store = + Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache + +#endif // esdb +#if sss +module Sss = + + // TOCONSIDER: add DB connectors other than MsSql + type [] Parameters = + | [] Tail of intervalS: float + | [] Connection of string + | [] Credentials of string + | [] Schema of string + + | [] BatchSize of int + | [] FromTail + + | [] CheckpointsConnection of string + | [] CheckpointsCredentials of string +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Tail _ -> "Polling interval in Seconds. Default: 1" + | BatchSize _ -> "Maximum events to request from feed. Default: 512" + | Connection _ -> "Connection string for SqlStreamStore db. Optional if SQLSTREAMSTORE_CONNECTION specified" + | Credentials _ -> "Credentials string for SqlStreamStore db (used as part of connection string, but NOT logged). Default: use SQLSTREAMSTORE_CREDENTIALS environment variable (or assume no credentials)" + | Schema _ -> "Database schema name" + | FromTail -> "Start the processing from the Tail" + | CheckpointsConnection _ ->"Connection string for Checkpoints sql db. Optional if SQLSTREAMSTORE_CONNECTION_CHECKPOINTS specified. Default: same as `Connection`" + | CheckpointsCredentials _ ->"Credentials string for Checkpoints sql db. (used as part of checkpoints connection string, but NOT logged). Default (when no `CheckpointsConnection`: use `Credentials. Default (when `CheckpointsConnection` specified): use SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS environment variable (or assume no credentials)" +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + + type Arguments(c : Configuration, p : ParseResults) = + let startFromTail = p.Contains FromTail + let tailSleepInterval = p.GetResult(Tail, 1.) |> TimeSpan.FromSeconds + let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint + let batchSize = p.GetResult(BatchSize, 512) + let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.SqlStreamStoreConnection) + let credentials = p.TryGetResult Credentials |> Option.orElseWith (fun () -> c.SqlStreamStoreCredentials) |> Option.toObj + let schema = p.GetResult(Schema, null) + member val Verbose = false + + member x.BuildCheckpointsConnectionString() = + let c, cs = + match p.TryGetResult CheckpointsConnection, p.TryGetResult CheckpointsCredentials with + | Some c, Some p -> c, String.Join(";", c, p) + | None, Some p -> let c = connection in c, String.Join(";", c, p) + | None, None -> let c = connection in c, String.Join(";", c, credentials) + | Some cc, None -> let p = c.SqlStreamStoreCredentialsCheckpoints |> Option.toObj + cc, String.Join(";", cc, p) + Log.Information("Checkpoints MsSql Connection {connectionString}", c) + cs + member x.Connect() = + let conn, creds, schema, autoCreate = connection, credentials, schema, false + let sssConnectionString = String.Join(";", conn, creds) + Log.Information("SqlStreamStore MsSql Connection {connectionString} Schema {schema} AutoCreate {autoCreate}", conn, schema, autoCreate) + let rawStore = Equinox.SqlStreamStore.MsSql.Connector(sssConnectionString, schema, autoCreate=autoCreate).Connect() |> Async.RunSynchronously + Equinox.SqlStreamStore.SqlStreamStoreConnection rawStore + member _.MonitoringParams(log : ILogger) = + log.Information("SqlStreamStoreSource BatchSize {batchSize} ", batchSize) + startFromTail, batchSize, tailSleepInterval + member x.CreateCheckpointStoreSql(groupName) : Propulsion.Feed.IFeedCheckpointStore = + let connectionString = x.BuildCheckpointsConnectionString() + Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString, groupName, checkpointEventInterval) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) + | _ -> Args.missingArg "Must specify `kafka` arguments" +#endif + +#endif // sss diff --git a/propulsion-projector/SourceConfig.fs b/propulsion-projector/SourceConfig.fs new file mode 100644 index 000000000..284432e77 --- /dev/null +++ b/propulsion-projector/SourceConfig.fs @@ -0,0 +1,102 @@ +namespace ProjectorTemplate + +open System + +[] +type SourceConfig = + | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container + * leasesContainer : Microsoft.Azure.Cosmos.Container + * checkpoints : CosmosFeedConfig + * tailSleepInterval : TimeSpan + | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient + * checkpoints : Propulsion.Feed.IFeedCheckpointStore + * loading : DynamoLoadModeConfig + * startFromTail : bool + * batchSizeCutoff : int + * tailSleepInterval : TimeSpan + * statsInterval : TimeSpan + | Esdb of client : EventStore.Client.EventStoreClient + * checkpoints : Propulsion.Feed.IFeedCheckpointStore + * hydrateBodies : bool + * startFromTail : bool + * batchSize : int + * tailSleepInterval : TimeSpan + * statsInterval : TimeSpan + | Sss of client : SqlStreamStore.IStreamStore + * checkpoints : Propulsion.Feed.IFeedCheckpointStore + * hydrateBodies : bool + * startFromTail : bool + * batchSize : int + * tailSleepInterval : TimeSpan + * statsInterval : TimeSpan +and [] CosmosFeedConfig = + | Ephemeral of processorName : string + | Persistent of processorName : string * startFromTail : bool * maxItems : int option * lagFrequency : TimeSpan +and [] DynamoLoadModeConfig = + | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int + +module SourceConfig = + module Cosmos = + open Propulsion.CosmosStore + let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter + (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let parseFeedDoc = EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter + let observer = CosmosStoreSource.CreateObserver(log, sink.StartIngester, Seq.collect parseFeedDoc) + let source = + match checkpointConfig with + | Ephemeral processorName -> + let withStartTime1sAgo (x : Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = + x.WithStartTime(let t = DateTime.UtcNow in t.AddSeconds -1.) + let lagFrequency = TimeSpan.FromMinutes 1. + CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, + startFromTail = true, customize = withStartTime1sAgo, tailSleepInterval = tailSleepInterval, + lagReportFreq = lagFrequency) + | Persistent (processorName, startFromTail, maxItems, lagFrequency) -> + CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, + startFromTail = startFromTail, ?maxItems = maxItems, tailSleepInterval = tailSleepInterval, + lagReportFreq = lagFrequency) + source, None + module Dynamo = + open Propulsion.DynamoStore + let start (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter + (indexStore, checkpoints, loadModeConfig, startFromTail, tailSleepInterval, batchSizeCutoff, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let loadMode = + match loadModeConfig with + | Hydrate (monitoredContext, hydrationConcurrency) -> LoadMode.Hydrated (categoryFilter, hydrationConcurrency, monitoredContext) + let source = + DynamoStoreSource( + log, statsInterval, + indexStore, batchSizeCutoff, tailSleepInterval, + checkpoints, sink, loadMode, + startFromTail = startFromTail, storeLog = storeLog) + source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) + module Esdb = + open Propulsion.EventStoreDb + let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter + (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let source = + EventStoreSource( + log, statsInterval, + client, batchSize, tailSleepInterval, + checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) + module Sss = + open Propulsion.SqlStreamStore + let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter + (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let source = + SqlStreamStoreSource( + log, statsInterval, + client, batchSize, tailSleepInterval, + checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) + + let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Async) option = function + | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> + Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) + | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> + Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, tailSleepInterval, batchSizeCutoff, statsInterval) + | SourceConfig.Esdb (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Esdb.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) + | SourceConfig.Sss (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Sss.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) diff --git a/propulsion-pruner/Program.fs b/propulsion-pruner/Program.fs index 6506fc47b..d21c19641 100644 --- a/propulsion-pruner/Program.fs +++ b/propulsion-pruner/Program.fs @@ -27,23 +27,23 @@ module Args = | [] MaxWriters of int | [] SrcCosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Logging. Default: off" | PrometheusPort _ -> "port from which to expose a Prometheus /metrics endpoint. Default: off" | ProcessorName _ -> "Projector consumer group name." | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 8." | MaxWriters _ -> "maximum number of concurrent writes to target. Default: 4." | SrcCosmos _ -> "Cosmos Archive parameters." - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose - member val PrometheusPort = a.TryGetResult PrometheusPort - member val ProcessorName = a.GetResult ProcessorName - member val MaxReadAhead = a.GetResult(MaxReadAhead, 8) - member val MaxWriters = a.GetResult(MaxWriters, 4) + and Arguments(c : Configuration, p : ParseResults) = + member val Verbose = p.Contains Parameters.Verbose + member val PrometheusPort = p.TryGetResult PrometheusPort + member val ProcessorName = p.GetResult ProcessorName + member val MaxReadAhead = p.GetResult(MaxReadAhead, 8) + member val MaxWriters = p.GetResult(MaxWriters, 4) member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. member val Source : CosmosSourceArguments = - match a.GetSubCommand() with + match p.GetSubCommand() with | SrcCosmos cosmos -> CosmosSourceArguments(c, cosmos) | _ -> missingArg "Must specify cosmos for Source" member x.DeletionTarget = x.Source.Target @@ -66,7 +66,7 @@ module Args = and [] CosmosSourceParameters = | [] Verbose | [] FromTail - | [] MaxItems of int + | [] MaxItems of int | [] LagFreqM of float | [] LeaseContainer of string @@ -80,7 +80,7 @@ module Args = | [] DstCosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Change Feed Processor Logging. Default: off" | FromTail -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | MaxItems _ -> "maximum item count to request from feed. Default: unlimited" @@ -96,26 +96,26 @@ module Args = | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." | DstCosmos _ -> "CosmosDb Pruning Target parameters." - and CosmosSourceArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult CosmosSourceParameters.ConnectionMode - let timeout = a.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSourceParameters.Retries, 5) - let maxRetryWaitTime = a.GetResult(CosmosSourceParameters.RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + and CosmosSourceArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult CosmosSourceParameters.ConnectionMode + let timeout = p.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(CosmosSourceParameters.Retries, 5) + let maxRetryWaitTime = p.GetResult(CosmosSourceParameters.RetriesWaitTime, 30.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - member val DatabaseId = a.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId = a.GetResult CosmosSourceParameters.Container + member val DatabaseId = p.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + member val ContainerId = p.GetResult CosmosSourceParameters.Container member x.MonitoredContainer() = connector.ConnectMonitored(x.DatabaseId, x.ContainerId) - member val Verbose = a.Contains Verbose - member val FromTail = a.Contains CosmosSourceParameters.FromTail - member val MaxItems = a.TryGetResult MaxItems - member val LagFrequency : TimeSpan = a.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes - member val LeaseContainerId = a.TryGetResult CosmosSourceParameters.LeaseContainer + member val Verbose = p.Contains Verbose + member val FromTail = p.Contains CosmosSourceParameters.FromTail + member val MaxItems = p.TryGetResult MaxItems + member val LagFrequency : TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member val LeaseContainerId = p.TryGetResult CosmosSourceParameters.LeaseContainer member x.ConnectLeases containerId = connector.CreateUninitialized(x.DatabaseId, containerId) member val Target = - match a.GetSubCommand() with + match p.GetSubCommand() with | DstCosmos cosmos -> CosmosSinkArguments(c, cosmos) | _ -> missingArg "Must specify cosmos for Target" and [] CosmosSinkParameters = @@ -128,7 +128,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for Cosmos account. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" @@ -137,19 +137,19 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 0." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosSinkArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSinkParameters.Retries, 0) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosSinkArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(CosmosSinkParameters.Retries, 0) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - member val DatabaseId = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + member val DatabaseId = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + member val ContainerId = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) member x.Connect() = connector.ConnectStore("DELETION Target", x.DatabaseId, x.ContainerId) - member val LeaseContainerId = a.TryGetResult LeaseContainer + member val LeaseContainerId = p.TryGetResult LeaseContainer member x.ConnectLeases containerId = connector.CreateUninitialized(x.DatabaseId, containerId) diff --git a/propulsion-pruner/Pruner.fsproj b/propulsion-pruner/Pruner.fsproj index f37e1fd96..5c5e918e4 100644 --- a/propulsion-pruner/Pruner.fsproj +++ b/propulsion-pruner/Pruner.fsproj @@ -14,9 +14,9 @@ - + - + diff --git a/propulsion-reactor/.template.config/template.json b/propulsion-reactor/.template.config/template.json index cdaef17bc..1afed92c4 100644 --- a/propulsion-reactor/.template.config/template.json +++ b/propulsion-reactor/.template.config/template.json @@ -13,7 +13,8 @@ "Reactor" ], "tags": { - "language": "F#" + "language": "F#", + "type": "project" }, "identity": "Propulsion.Template.Reactor", "name": "Propulsion EventStore/Cosmos/Kafka Reactor", @@ -31,35 +32,16 @@ "choice": "kafkaEventSpans", "description": "Use Kafka EventSpan feed as input" }, - { - "choice": "changeFeedOnly", - "description": "Only wire for CosmosDB ChangeFeedProcessor source" - }, { "choice": "multiSource", - "description": "Support EventStore $all or CosmosDB ChangeFeedProcessor" + "description": "Support EventStore $all, CosmosDB ChangeFeedProcessor or DynamoStore Index" } ] }, - "kafkaEventSpans": { + "sourceKafka": { "type": "computed", "value": "(source == \"kafkaEventSpans\")" }, - "changeFeedOnly": { - "type": "computed", - "value": "(source == \"changeFeedOnly\")" - }, - "multiSource": { - "type": "computed", - "value": "(source == \"multiSource\")" - }, - "filter": { - "type": "parameter", - "datatype": "bool", - "isRequired": false, - "defaultValue": "false", - "description": "Include logic and commandline handling relating to filtering based on stream names." - }, "blank": { "type": "parameter", "datatype": "bool", @@ -99,13 +81,13 @@ ] }, { - "condition": "!multiSource && !kafka", + "condition": "sourceKafka && !kafka", "exclude": [ "Handler.fs" ] }, { - "condition": "kafkaEventSpans", + "condition": "sourceKafka", "exclude": [ "README.md" ] @@ -113,4 +95,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/propulsion-reactor/Args.fs b/propulsion-reactor/Args.fs new file mode 100644 index 000000000..d8c4136e2 --- /dev/null +++ b/propulsion-reactor/Args.fs @@ -0,0 +1,224 @@ +/// Commandline arguments and/or secrets loading specifications +module ReactorTemplate.Args + +open System + +exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) + +#if !(sourceKafka && blank && kafka) +let [] REGION = "EQUINOX_DYNAMO_REGION" +let [] SERVICE_URL = "EQUINOX_DYNAMO_SERVICE_URL" +let [] ACCESS_KEY = "EQUINOX_DYNAMO_ACCESS_KEY_ID" +let [] SECRET_KEY = "EQUINOX_DYNAMO_SECRET_ACCESS_KEY" +let [] TABLE = "EQUINOX_DYNAMO_TABLE" +let [] INDEX_TABLE = "EQUINOX_DYNAMO_TABLE_INDEX" +#endif + +type Configuration(tryGet : string -> string option) = + + member val tryGet = tryGet + member _.get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" + +#if !(sourceKafka && blank && kafka) + member x.CosmosConnection = x.get "EQUINOX_COSMOS_CONNECTION" + member x.CosmosDatabase = x.get "EQUINOX_COSMOS_DATABASE" + member x.CosmosContainer = x.get "EQUINOX_COSMOS_CONTAINER" + + member x.DynamoServiceUrl = x.get SERVICE_URL + member x.DynamoAccessKey = x.get ACCESS_KEY + member x.DynamoSecretKey = x.get SECRET_KEY + member x.DynamoTable = x.get TABLE + member x.DynamoRegion = x.tryGet REGION + + member x.EventStoreConnection = x.get "EQUINOX_ES_CONNECTION" + // member x.EventStoreCredentials = x.get "EQUINOX_ES_CREDENTIALS" + member _.MaybeEventStoreConnection = tryGet "EQUINOX_ES_CONNECTION" + member _.MaybeEventStoreCredentials = tryGet "EQUINOX_ES_CREDENTIALS" + + member x.SqlStreamStoreConnection = x.get "SQLSTREAMSTORE_CONNECTION" + member x.SqlStreamStoreCredentials = tryGet "SQLSTREAMSTORE_CREDENTIALS" + member x.SqlStreamStoreCredentialsCheckpoints = tryGet "SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS" + member x.SqlStreamStoreDatabase = x.get "SQLSTREAMSTORE_DATABASE" + member x.SqlStreamStoreContainer = x.get "SQLSTREAMSTORE_CONTAINER" + +#endif +//#if kafka + member x.Broker = x.get "PROPULSION_KAFKA_BROKER" + member x.Topic = x.get "PROPULSION_KAFKA_TOPIC" +//#endif + + member x.PrometheusPort = tryGet "PROMETHEUS_PORT" |> Option.map int + +#if !(sourceKafka && blank && kafka) +// Type used to represent where checkpoints (for either the FeedConsumer position, or for a Reactor's Event Store subscription position) will be stored +// In a typical app you don't have anything like this as you'll simply use your primary Event Store (see) +module Checkpoints = + + [] + type Store = + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache + (* Propulsion.EventStoreDb does not implement a native checkpoint storage mechanism, + perhaps port https://github.com/absolutejam/Propulsion.EventStoreDB ? + or fork/finish https://github.com/jet/dotnet-templates/pull/81 + alternately one could use a SQL Server DB via Propulsion.SqlStreamStore + + For now, we store the Checkpoints in one of the above stores as this sample uses one for the read models anyway *) + + let create (consumerGroup, checkpointInterval) storeLog : Store -> Propulsion.Feed.IFeedCheckpointStore = function + | Store.Cosmos (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.CosmosStore.create storeLog (consumerGroup, checkpointInterval) (context, cache) + | Store.Dynamo (context, cache) -> + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create storeLog (consumerGroup, checkpointInterval) (context, cache) + let createCheckpointStore (group, checkpointInterval, store : Config.Store) : Propulsion.Feed.IFeedCheckpointStore = + let checkpointStore = + match store with + | Config.Store.Cosmos (context, cache) -> Store.Cosmos (context, cache) + | Config.Store.Dynamo (context, cache) -> Store.Dynamo (context, cache) +#if !(sourceKafka && kafka) + | Config.Store.Esdb _ + | Config.Store.Sss _ -> failwith "unexpected" +#endif + create (group, checkpointInterval) Config.log checkpointStore +#endif + +open Argu + +#if kafka + type [] KafkaSinkParameters = + | [] Broker of string + | [] Topic of string + interface IArgParserTemplate with + member p.Usage = p |> function + | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" + | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" + +type KafkaSinkArguments(c : Configuration, p : ParseResults) = + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member x.BuildTargetParams() = x.Broker, x.Topic + +#endif + +#if !(sourceKafka && blank && kafka) +module Cosmos = + + type [] Parameters = + | [] Verbose + | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode + | [] Connection of string + | [] Database of string + | [] Container of string + | [] Timeout of float + | [] Retries of int + | [] RetriesWaitTime of float +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose _ -> "request verbose logging." + | ConnectionMode _ -> "override the connection mode. Default: Direct." + | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" + | Database _ -> "specify a database name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" + | Container _ -> "specify a container name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" + | Timeout _ -> "specify operation timeout in seconds (default: 5)." + | Retries _ -> "specify operation retries (default: 1)." + | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + type Arguments(c : Configuration, p : ParseResults) = + let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) + let discovery = Equinox.CosmosStore.Discovery.ConnectionString connection + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 1) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + member val Verbose = p.Contains Verbose + member _.Connect() = connector.ConnectStore("Target", database, container) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Parameters.Kafka kafka -> KafkaSinkArguments(c, kafka) + | _ -> missingArg "Must specify `kafka` arguments" +#endif + +module Dynamo = + + type [] Parameters = + | [] Verbose + | [] RegionProfile of string + | [] ServiceUrl of string + | [] AccessKey of string + | [] SecretKey of string + | [] Table of string + | [] Retries of int + | [] RetriesTimeoutS of float +#if kafka + | [] Kafka of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." + | RegionProfile _ -> "specify an AWS Region (aka System Name, e.g. \"us-east-1\") to connect to using the implicit AWS SDK/tooling config and/or environment variables etc. Optional if:\n" + + "1) $" + REGION + " specified OR\n" + + "2) Explicit `ServiceUrl`/$" + SERVICE_URL + "+`AccessKey`/$" + ACCESS_KEY + "+`Secret Key`/$" + SECRET_KEY + " specified.\n" + + "See https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html for details" + | ServiceUrl _ -> "specify a server endpoint for a Dynamo account. (Not applicable if `ServiceRegion`/$" + REGION + " specified; Optional if $" + SERVICE_URL + " specified)" + | AccessKey _ -> "specify an access key id for a Dynamo account. (Not applicable if `ServiceRegion`/$" + REGION + " specified; Optional if $" + ACCESS_KEY + " specified)" + | SecretKey _ -> "specify a secret access key for a Dynamo account. (Not applicable if `ServiceRegion`/$" + REGION + " specified; Optional if $" + SECRET_KEY + " specified)" + | Table _ -> "specify a table name for the primary store. (optional if $" + TABLE + " specified)" + | Retries _ -> "specify operation retries (default: 1)." + | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 5)" +#if kafka + | Kafka _ -> "Kafka Sink parameters." +#endif + type Arguments(c : Configuration, p : ParseResults) = + let conn = match p.TryGetResult RegionProfile |> Option.orElseWith (fun () -> c.DynamoRegion) with + | Some systemName -> + Choice1Of2 systemName + | None -> + let serviceUrl = p.TryGetResult ServiceUrl |> Option.defaultWith (fun () -> c.DynamoServiceUrl) + let accessKey = p.TryGetResult AccessKey |> Option.defaultWith (fun () -> c.DynamoAccessKey) + let secretKey = p.TryGetResult SecretKey |> Option.defaultWith (fun () -> c.DynamoSecretKey) + Choice2Of2 (serviceUrl, accessKey, secretKey) + let retries = p.GetResult(Retries, 1) + let timeout = p.GetResult(RetriesTimeoutS, 5.) |> TimeSpan.FromSeconds + let connector = match conn with + | Choice1Of2 systemName -> + Equinox.DynamoStore.DynamoStoreConnector(systemName, timeout, retries) + | Choice2Of2 (serviceUrl, accessKey, secretKey) -> + Equinox.DynamoStore.DynamoStoreConnector(serviceUrl, accessKey, secretKey, timeout, retries) + let table = p.TryGetResult Table |> Option.defaultWith (fun () -> c.DynamoTable) + member val Verbose = p.Contains Verbose + member _.Connect() = connector.LogConfiguration() + let client = connector.CreateClient() + client.ConnectStore("Main", table) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> KafkaSinkArguments(c, kafka) + | _ -> missingArg "Must specify `kafka` arguments" +#endif + +type [] + TargetStoreArgs = + | Cosmos of Cosmos.Arguments + | Dynamo of Dynamo.Arguments + +module TargetStoreArgs = + + let connectTarget targetStore cache: Config.Store = + match targetStore with + | TargetStoreArgs.Cosmos a -> + let context = a.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create + Config.Store.Cosmos (context, cache) + | TargetStoreArgs.Dynamo a -> + let context = a.Connect() |> DynamoStoreContext.create + Config.Store.Dynamo (context, cache) +#endif diff --git a/propulsion-reactor/Config.fs b/propulsion-reactor/Config.fs index 73edc0d09..bbe338157 100644 --- a/propulsion-reactor/Config.fs +++ b/propulsion-reactor/Config.fs @@ -1,25 +1,27 @@ module ReactorTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -let createDecider stream = Equinox.Decider(log, stream, maxAttempts = 3) +let createDecider cat = Equinox.Decider.resolve log cat module EventCodec = open FsCodec.SystemTextJson let private defaultOptions = Options.Create() - let create<'t when 't :> TypeShape.UnionContract.IUnionContract> () = - Codec.Create<'t>(options = defaultOptions).ToByteArrayCodec() + let gen<'t when 't :> TypeShape.UnionContract.IUnionContract> = + Codec.Create<'t>(options = defaultOptions) + let genJe<'t when 't :> TypeShape.UnionContract.IUnionContract> = + CodecJsonElement.Create<'t>(options = defaultOptions) let private withUpconverter<'c, 'e when 'c :> TypeShape.UnionContract.IUnionContract> up : FsCodec.IEventCodec<'e, _, _> = let down (_ : 'e) = failwith "Unexpected" - Codec.Create<'e, 'c, _>(up, down, options = defaultOptions).ToByteArrayCodec() + Codec.Create<'e, 'c, _>(up, down, options = defaultOptions) let withIndex<'c when 'c :> TypeShape.UnionContract.IUnionContract> : FsCodec.IEventCodec = - let up (raw : FsCodec.ITimelineEvent<_>, e) = raw.Index, e + let up struct (raw : FsCodec.ITimelineEvent<_>, e) = raw.Index, e withUpconverter<'c, int64 * 'c> up module Cosmos = - let private createCached codec initial fold accessStrategy (context, cache) = + let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = let cacheStrategy = Equinox.CosmosStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.CosmosStore.CosmosStoreCategory(context, codec, fold, initial, cacheStrategy, accessStrategy) @@ -31,17 +33,39 @@ module Cosmos = let accessStrategy = Equinox.CosmosStore.AccessStrategy.RollingState toSnapshot createCached codec initial fold accessStrategy (context, cache) -//#if multiSource +module Dynamo = + + let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = + let cacheStrategy = Equinox.DynamoStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.DynamoStore.DynamoStoreCategory(context, FsCodec.Deflate.EncodeUncompressed codec, fold, initial, cacheStrategy, accessStrategy) + + let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = + let accessStrategy = Equinox.DynamoStore.AccessStrategy.Snapshot (isOrigin, toSnapshot) + createCached codec initial fold accessStrategy (context, cache) + + let createRollingState codec initial fold toSnapshot (context, cache) = + let accessStrategy = Equinox.DynamoStore.AccessStrategy.RollingState toSnapshot + createCached codec initial fold accessStrategy (context, cache) + +#if !(sourceKafka && kafka) module Esdb = let create codec initial fold (context, cache) = - let cacheStrategy = Equinox.EventStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) - Equinox.EventStore.EventStoreCategory(context, codec, fold, initial, cacheStrategy) + let cacheStrategy = Equinox.EventStoreDb.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.EventStoreDb.EventStoreCategory(context, codec, fold, initial, cacheStrategy) + +module Sss = + + let create codec initial fold (context, cache) = + let cacheStrategy = Equinox.SqlStreamStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) + Equinox.SqlStreamStore.SqlStreamStoreCategory(context, codec, fold, initial, cacheStrategy) -//#endif +#endif [] type Store = | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache -//#if multiSource - | Esdb of Equinox.EventStore.EventStoreContext * Equinox.Core.ICache -//#endif + | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache +#if !(sourceKafka && kafka) + | Esdb of Equinox.EventStoreDb.EventStoreContext * Equinox.Core.ICache + | Sss of Equinox.SqlStreamStore.SqlStreamStoreContext * Equinox.Core.ICache +#endif diff --git a/propulsion-reactor/Contract.fs b/propulsion-reactor/Contract.fs index c133803cf..9c10891e2 100644 --- a/propulsion-reactor/Contract.fs +++ b/propulsion-reactor/Contract.fs @@ -27,12 +27,13 @@ module Input = interface TypeShape.UnionContract.IUnionContract let private codec : FsCodec.IEventCodec<_, _, _> = Config.EventCodec.withIndex + open Propulsion.Internal let (|Decode|) (stream, span : Propulsion.Streams.StreamSpan<_>) = - span.events |> Array.choose (EventCodec.tryDecode codec stream) - let (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> Some clientId | _ -> None - let (|Parse|_|) = function - | (StreamName clientId, _) & Decode events -> Some (clientId, events) - | _ -> None + span |> Array.chooseV (EventCodec.tryDecode codec stream) + let [](|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId | _ -> ValueNone + let [] (|Parse|_|) = function + | (StreamName clientId, _) & Decode events -> ValueSome struct (clientId, events) + | _ -> ValueNone type Data = { value : int } type SummaryEvent = @@ -45,6 +46,6 @@ type SummaryEvent = | [] Summary of SummaryInfo interface TypeShape.UnionContract.IUnionContract #endif -let codec = Config.EventCodec.create() -let encode summary = codec.Encode(None, summary) +let codec = Config.EventCodec.gen +let encode summary = codec.Encode((), summary) //#endif diff --git a/propulsion-reactor/Handler.fs b/propulsion-reactor/Handler.fs index 626359243..c041287a6 100644 --- a/propulsion-reactor/Handler.fs +++ b/propulsion-reactor/Handler.fs @@ -1,16 +1,5 @@ module ReactorTemplate.Handler -//#if multiSource -open Propulsion.EventStore - -/// Responsible for inspecting and then either dropping or tweaking events coming from EventStore -// NB the `index` needs to be contiguous with existing events - IOW filtering needs to be at stream (and not event) level -let tryMapEvent filterByStreamName (x : EventStore.ClientAPI.ResolvedEvent) = - match x.Event with - | e when not e.IsJson || e.EventStreamId.StartsWith "$" || not (filterByStreamName e.EventStreamId) -> None - | PropulsionStreamEvent e -> Some e - -//#endif //#if kafka [] type Outcome = @@ -22,15 +11,11 @@ type Outcome = | NotApplicable of count : int /// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` -type Stats(log, statsInterval, stateInterval, ?logExternalStats) = -#if kafkaEventSpans - inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) -#else -#if blank +type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = +#if sourceKafka || blank inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) #else inherit Propulsion.Streams.Sync.Stats(log, statsInterval, stateInterval) -#endif #endif let mutable ok, skipped, na = 0, 0, 0 @@ -40,7 +25,7 @@ type Stats(log, statsInterval, stateInterval, ?logExternalStats) = | Outcome.Skipped count -> skipped <- skipped + count | Outcome.NotApplicable count -> na <- na + count override _.HandleExn(log, exn) = - log.Information(exn, "Unhandled") + Exception.dump verboseStore log exn override _.DumpStats() = base.DumpStats() @@ -54,11 +39,15 @@ let generate stream version summary = Propulsion.Codec.NewtonsoftJson.RenderedSummary.ofStreamEvent stream version event #if blank +let categoryFilter = function + | Contract.Input.Category -> true + | _ -> false + let handle (produceSummary : Propulsion.Codec.NewtonsoftJson.RenderedSummary -> Async) - (stream, span : Propulsion.Streams.StreamSpan<_>) = async { + struct (stream, span : Propulsion.Streams.StreamSpan<_>) = async { match stream, span with - | Contract.Input.Parse (clientId, events) -> + | Contract.Input.Parse (_clientId, events) -> for version, event in events do let summary = match event with @@ -66,22 +55,38 @@ let handle | Contract.Input.EventB { field = x } -> Contract.EventB { value = x } let wrapped = generate stream version summary let! _ = produceSummary wrapped in () - return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (events.Length, 0) - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.events.Length } + return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (events.Length, 0)) + | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } #else +let categoryFilter = function + | Todo.Reactions.Category -> true + | _ -> false + let handle (service : Todo.Service) (produceSummary : Propulsion.Codec.NewtonsoftJson.RenderedSummary -> Async) - (stream, span : Propulsion.Streams.StreamSpan<_>) = async { + struct (stream, span) = async { match stream, span with | Todo.Reactions.Parse (clientId, events) -> if events |> Seq.exists Todo.Reactions.impliesStateChange then let! version', summary = service.QueryWithVersion(clientId, Contract.ofState) let wrapped = generate stream version' (Contract.Summary summary) let! _ = produceSummary wrapped - return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, events.Length - 1) + return struct (Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, events.Length - 1)) else return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped events.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.events.Length } + | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } #endif //#endif + +type Config private () = + + static member StartSink(log : Serilog.ILogger, stats, + handle : struct (FsCodec.StreamName * Propulsion.Streams.Default.StreamSpan) + -> Async, + maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + + static member StartSource(log, sink, sourceConfig) = + SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig diff --git a/propulsion-reactor/Infrastructure.fs b/propulsion-reactor/Infrastructure.fs index e0e2c26cb..808a97492 100644 --- a/propulsion-reactor/Infrastructure.fs +++ b/propulsion-reactor/Infrastructure.fs @@ -1,13 +1,13 @@ [] module ReactorTemplate.Infrastructure -#if (kafka || !blank) +// #if (kafka || !blank) open FSharp.UMX // see https://github.com/fsprojects/FSharp.UMX - % operator and ability to apply units of measure to Guids+strings -#endif +// #endif open Serilog open System -#if (kafka || !blank) +// #if (kafka || !blank) module Guid = let inline toStringN (x : Guid) = x.ToString "N" @@ -20,25 +20,40 @@ module ClientId = let parse (value : string) : ClientId = let raw = Guid.Parse value in % raw let (|Parse|) = parse -#endif +// #endif module EnvVar = let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj -#if (kafka || !blank) +module Exception = + + let dump verboseStore (log : ILogger) (exn : exn) = + match exn with // TODO provide override option? + | :? Microsoft.Azure.Cosmos.CosmosException as e + when (e.StatusCode = System.Net.HttpStatusCode.TooManyRequests + || e.StatusCode = System.Net.HttpStatusCode.ServiceUnavailable) + && not verboseStore -> () + + | Equinox.DynamoStore.Exceptions.ProvisionedThroughputExceeded + | :? TimeoutException when not verboseStore -> () + + | _ -> + log.Information(exn, "Unhandled") + +// #if (kafka || !blank) module EventCodec = /// Uses the supplied codec to decode the supplied event record `x` (iff at LogEventLevel.Debug, detail fails to `log` citing the `stream` and content) - let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = + let tryDecode (codec : FsCodec.IEventCodec<_, _, _>) streamName (x : FsCodec.ITimelineEvent) = match codec.TryDecode x with - | None -> + | ValueNone -> if Log.IsEnabled Serilog.Events.LogEventLevel.Debug then - Log.ForContext("event", System.Text.Encoding.UTF8.GetString(x.Data), true) + Log.ForContext("event", System.Text.Encoding.UTF8.GetString(let d = x.Data in d.Span), true) .Debug("Codec {type} Could not decode {eventType} in {stream}", codec.GetType().FullName, x.EventType, streamName) - None + ValueNone | x -> x -#endif +// #endif type Equinox.CosmosStore.CosmosStoreConnector with member private x.LogConfiguration(connectionName, databaseId, containerId) = @@ -75,6 +90,53 @@ module CosmosStoreContext = let create (storeClient : Equinox.CosmosStore.CosmosStoreClient) = let maxEvents = 256 Equinox.CosmosStore.CosmosStoreContext(storeClient, tipMaxEvents=maxEvents) + +module Dynamo = + + open Equinox.DynamoStore + + let defaultCacheDuration = System.TimeSpan.FromMinutes 20. + let private createCached codec initial fold accessStrategy (context, cache) = + let cacheStrategy = CachingStrategy.SlidingWindow (cache, defaultCacheDuration) + DynamoStoreCategory(context, FsCodec.Deflate.EncodeTryDeflate codec, fold, initial, cacheStrategy, accessStrategy) + + let createSnapshotted codec initial fold (isOrigin, toSnapshot) (context, cache) = + let accessStrategy = AccessStrategy.Snapshot (isOrigin, toSnapshot) + createCached codec initial fold accessStrategy (context, cache) + +type Equinox.DynamoStore.DynamoStoreConnector with + + member x.LogConfiguration() = + Log.Information("DynamoStore {endpoint} Timeout {timeoutS}s Retries {retries}", + x.Endpoint, (let t = x.Timeout in t.TotalSeconds), x.Retries) + +type Equinox.DynamoStore.DynamoStoreClient with + + member internal x.LogConfiguration(role, ?log) = + (defaultArg log Log.Logger).Information("DynamoStore {role:l} Table {table} Archive {archive}", role, x.TableName, Option.toObj x.ArchiveTableName) + member client.CreateCheckpointService(consumerGroupName, cache, log, ?checkpointInterval) = + let checkpointInterval = defaultArg checkpointInterval (TimeSpan.FromHours 1.) + let context = Equinox.DynamoStore.DynamoStoreContext(client) + Propulsion.Feed.ReaderCheckpoint.DynamoStore.create log (consumerGroupName, checkpointInterval) (context, cache) + +type Equinox.DynamoStore.DynamoStoreContext with + + member internal x.LogConfiguration(log : ILogger) = + log.Information("DynamoStore Tip thresholds: {maxTipBytes}b {maxTipEvents}e Query Paging {queryMaxItems} items", + x.TipOptions.MaxBytes, Option.toNullable x.TipOptions.MaxEvents, x.QueryOptions.MaxItems) + +type Amazon.DynamoDBv2.IAmazonDynamoDB with + + member x.ConnectStore(role, table) = + let storeClient = Equinox.DynamoStore.DynamoStoreClient(x, table) + storeClient.LogConfiguration(role) + storeClient + +module DynamoStoreContext = + + /// Create with default packing and querying policies. Search for other `module DynamoStoreContext` impls for custom variations + let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = + Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) [] type Logging() = @@ -82,7 +144,6 @@ type Logging() = [] static member Configure(configuration : LoggerConfiguration, ?verbose) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose = Some true then c.MinimumLevel.Debug() else c |> fun c -> let t = "[{Timestamp:HH:mm:ss} {Level:u3}] {Message:lj} {NewLine}{Exception}" diff --git a/propulsion-reactor/Ingester.fs b/propulsion-reactor/Ingester.fs index b1725ec92..70fb927f1 100644 --- a/propulsion-reactor/Ingester.fs +++ b/propulsion-reactor/Ingester.fs @@ -11,7 +11,7 @@ type Outcome = | NotApplicable of count : int /// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` -type Stats(log, statsInterval, stateInterval) = +type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) let mutable ok, skipped, na = 0, 0, 0 @@ -21,24 +21,25 @@ type Stats(log, statsInterval, stateInterval) = | Outcome.Skipped count -> skipped <- skipped + count | Outcome.NotApplicable count -> na <- na + count override _.HandleExn(log, exn) = - log.Information(exn, "Unhandled") + Exception.dump verboseStore log exn override _.DumpStats() = base.DumpStats() if ok <> 0 || skipped <> 0 || na <> 0 then log.Information(" used {ok} skipped {skipped} n/a {na}", ok, skipped, na) ok <- 0; skipped <- 0; na <- 0 + logExternalStats |> Option.iter (fun dumpTo -> dumpTo log) #if blank -let handle (stream, span : Propulsion.Streams.StreamSpan<_>) = async { +let handle struct (stream, span : Propulsion.Streams.StreamSpan<_>) = async { match stream, span with | FsCodec.StreamName.CategoryAndId ("Todos", id), _ -> let ok = true // "TODO: add handler code" match ok with - | true -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (1, span.events.Length - 1) - | false -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped span.events.Length - | _ -> return Propulsion.Streams.AllProcessed, Outcome.NotApplicable span.events.Length } + | true -> return struct (Propulsion.Streams.SpanResult.AllProcessed, Outcome.Ok (1, span.Length - 1)) + | false -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped span.Length + | _ -> return Propulsion.Streams.AllProcessed, Outcome.NotApplicable span.Length } #else // map from external contract to internal contract defined by the aggregate let toSummaryEventData ( x : Contract.SummaryInfo) : TodoSummary.Events.SummaryData = @@ -49,12 +50,12 @@ let toSummaryEventData ( x : Contract.SummaryInfo) : TodoSummary.Events.SummaryD let handle (sourceService : Todo.Service) (summaryService : TodoSummary.Service) - (stream, span : Propulsion.Streams.StreamSpan<_>) = async { + struct (stream, span : Propulsion.Streams.StreamSpan<_>) = async { match stream, span with | Todo.Reactions.Parse (clientId, events) when events |> Seq.exists Todo.Reactions.impliesStateChange -> let! version', summary = sourceService.QueryWithVersion(clientId, Contract.ofState) match! summaryService.TryIngest(clientId, version', toSummaryEventData summary) with - | true -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, span.events.Length - 1) - | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.events.Length - | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.events.Length } + | true -> return struct (Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Ok (1, span.Length - 1)) + | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.Length + | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } #endif diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index eaf5af2cf..4dbda990e 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -1,460 +1,171 @@ module ReactorTemplate.Program -//#if (!kafkaEventSpans) -//#if multiSource -open Propulsion.EventStore -//#endif -//#endif +open Equinox.EventStoreDb +open Equinox.SqlStreamStore +open Infrastructure open Serilog open System -exception MissingArg of message : string with override this.Message = this.message - type Configuration(tryGet) = - - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) - let isTrue varName = tryGet varName |> Option.exists (fun s -> String.Equals(s, bool.TrueString, StringComparison.OrdinalIgnoreCase)) - - member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" - member _.CosmosDatabase = get "EQUINOX_COSMOS_DATABASE" - member _.CosmosContainer = get "EQUINOX_COSMOS_CONTAINER" -//#if multiSource - member _.EventStoreTcp = isTrue "EQUINOX_ES_TCP" - member _.EventStoreProjectionTcp = isTrue "EQUINOX_ES_PROJ_TCP" - member _.EventStorePort = tryGet "EQUINOX_ES_PORT" |> Option.map int - member _.EventStoreProjectionPort = tryGet "EQUINOX_ES_PROJ_PORT" |> Option.map int - member _.EventStoreHost = get "EQUINOX_ES_HOST" - member _.EventStoreProjectionHost = tryGet "EQUINOX_ES_PROJ_HOST" - member _.EventStoreUsername = get "EQUINOX_ES_USERNAME" - member _.EventStoreProjectionUsername = tryGet "EQUINOX_ES_PROJ_USERNAME" - member _.EventStorePassword = get "EQUINOX_ES_PASSWORD" - member _.EventStoreProjectionPassword = tryGet "EQUINOX_ES_PROJ_PASSWORD" -//#endif - member _.Broker = get "PROPULSION_KAFKA_BROKER" - member _.Topic = get "PROPULSION_KAFKA_TOPIC" + inherit SourceArgs.Configuration(tryGet) module Args = open Argu -//#if multiSource - open Equinox.EventStore -//#endif [] type Parameters = | [] Verbose | [] ProcessorName of string | [] MaxReadAhead of int | [] MaxWriters of int -//#if filter - - | [] CategoryBlacklist of string - | [] CategoryWhitelist of string -//#endif -#if kafkaEventSpans - | [] Kafka of ParseResults +#if sourceKafka + | [] Kafka of ParseResults #else - | [] Cosmos of ParseResults -//#if multiSource - | [] Es of ParseResults -//#endif + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults + | [] Esdb of ParseResults + | [] SqlMs of ParseResults #endif interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Logging. Default: off." | ProcessorName _ -> "Projector consumer group name." | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 16." | MaxWriters _ -> "maximum number of concurrent streams on which to process at any time. Default: 8." -//#if filter - | CategoryBlacklist _ -> "category whitelist" - | CategoryWhitelist _ -> "category blacklist" -//#endif -#if (!kafkaEventSpans) - | Cosmos _ -> "specify CosmosDB input parameters." -//#if multiSource - | Es _ -> "specify EventStore input parameters." -//#endif -#else +#if sourceKafka | Kafka _ -> "specify Kafka input parameters." +#else + | Cosmos _ -> "specify CosmosDB input parameters." + | Dynamo _ -> "specify DynamoDB input parameters." + | Esdb _ -> "specify EventStore input parameters." + | SqlMs _ -> "specify SqlStreamStore input parameters." #endif - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose - member val ProcessorName = a.GetResult ProcessorName - member val MaxReadAhead = a.GetResult(MaxReadAhead, 16) - member val MaxConcurrentStreams = a.GetResult(MaxWriters, 8) + and Arguments(c : Configuration, p : ParseResults) = + let processorName = p.GetResult ProcessorName + let maxReadAhead = p.GetResult(MaxReadAhead, 16) + let maxConcurrentStreams = p.GetResult(MaxWriters, 8) + let cacheSizeMb = 10 + member val Verbose = p.Contains Verbose member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. -//#if filter - member _.FilterFunction(?excludeLong, ?longOnly): string -> bool = - let isLong (streamName : string) = - streamName.StartsWith "Inventory-" // Too long - || streamName.StartsWith "InventoryCount-" // No Longer used - || streamName.StartsWith "InventoryLog" // 5GB, causes lopsided partitions, unused - let excludeLong = defaultArg excludeLong true - match a.GetResults CategoryBlacklist, a.GetResults CategoryWhitelist with - | [], [] when longOnly = Some true -> - Log.Information("Only including long streams") - isLong - | [], [] -> - let black = set [ - "SkuFileUpload-534e4362c641461ca27e3d23547f0852" - "SkuFileUpload-778f1efeab214f5bab2860d1f802ef24" - "PurchaseOrder-5791" ] - let isCheckpoint (streamName : string) = - streamName.EndsWith "_checkpoint" - || streamName.EndsWith "_checkpoints" - || streamName.StartsWith "#serial" - || streamName.StartsWith "marvel_bookmark" - Log.Information("Using well-known stream blacklist {black} excluding checkpoints and #serial streams, excluding long streams: {excludeLong}", black, excludeLong) - fun x -> not (black.Contains x) && (not << isCheckpoint) x && (not excludeLong || (not << isLong) x) - | bad, [] -> let black = Set.ofList bad in Log.Warning("Excluding categories: {cats}", black); fun x -> not (black.Contains x) - | [], good -> let white = Set.ofList good in Log.Warning("Only copying categories: {cats}", white); fun x -> white.Contains x - | _, _ -> raise (MissingArg "BlackList and Whitelist are mutually exclusive; inclusions and exclusions cannot be mixed") -//#endif -#if changeFeedOnly - member val Source : CosmosSourceArguments = - match a.TryGetSubCommand() with - | Some (Parameters.Cosmos cosmos) -> CosmosSourceArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos for Src") - member x.SourceParams() = - let srcC = x.Source -#endif -//#if (!kafkaEventSpans) -//#if (!changeFeedOnly) - member val Source : Choice = - match a.TryGetSubCommand() with - | Some (Es es) -> Choice1Of2 (EsSourceArguments (c, es)) - | Some (Parameters.Cosmos cosmos) -> Choice2Of2 (CosmosSourceArguments (c, cosmos)) - | _ -> raise (MissingArg "Must specify one of cosmos or es for Src") - member x.SourceParams() : Choice = - match x.Source with - | Choice1Of2 srcE -> - let startPos, cosmos = srcE.StartPos, srcE.Cosmos - Log.Information("Processing Consumer Group {groupName} from {startPos} (force: {forceRestart}) in Database {db} Container {container}", - x.ProcessorName, startPos, srcE.ForceRestart, cosmos.DatabaseId, cosmos.ContainerId) - Log.Information("Ingesting in batches of [{minBatchSize}..{batchSize}], reading up to {maxReadAhead} uncommitted batches ahead", - srcE.MinBatchSize, srcE.StartingBatchSize, x.MaxReadAhead) - let context = cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - Choice1Of2 (srcE, context, - { groupName = x.ProcessorName; start = startPos; checkpointInterval = srcE.CheckpointInterval; tailInterval = srcE.TailInterval - forceRestart = srcE.ForceRestart - batchSize = srcE.StartingBatchSize; minBatchSize = srcE.MinBatchSize; gorge = srcE.Gorge; streamReaders = 0 }) - | Choice2Of2 srcC -> -//#endif // !changeFeedOnly - let leases = srcC.ConnectLeases() - Log.Information("Reacting... {dop} writers, max {maxReadAhead} batches read ahead", x.MaxConcurrentStreams, x.MaxReadAhead) - Log.Information("ChangeFeed {processorName} Leases Database {db} Container {container}. MaxItems limited to {maxItems}", - x.ProcessorName, srcC.DatabaseId, srcC.ContainerId, Option.toNullable srcC.MaxItems) - if srcC.FromTail then Log.Warning("(If new projector group) Skipping projection of all existing events.") - Log.Information("ChangeFeed Lag stats interval {lagS:n0}s", let f = srcC.LagFrequency in f.TotalSeconds) - let storeClient, monitored = srcC.ConnectStoreAndMonitored() - let context = CosmosStoreContext.create storeClient -#if changeFeedOnly - (srcC, context, monitored, leases, x.ProcessorName, srcC.FromTail, srcC.MaxItems, srcC.LagFrequency) + member val PurgeInterval = TimeSpan.FromHours 1. + + member _.ProcessorParams() = Log.Information("Reacting... {processorName}, reading {maxReadAhead} ahead, {dop} streams", + processorName, maxReadAhead, maxConcurrentStreams) + (processorName, maxReadAhead, maxConcurrentStreams) +#if sourceKafka + member _.ConnectStoreAndSource(appName) : _ * _ * Args.KafkaSinkArguments * (string -> FsKafka.KafkaConsumerConfig) * (ILogger -> unit) = + let p = + match p.GetSubCommand() with + | Kafka p -> SourceArgs.Kafka.Arguments(c, p) + | p -> Args.missingArg $"Unexpected Source subcommand %A{p}" + let createConsumerConfig groupName = + FsKafka.KafkaConsumerConfig.Create( + appName, p.Broker, [p.Topic], groupName, Confluent.Kafka.AutoOffsetReset.Earliest, + maxInFlightBytes = p.MaxInFlightBytes, ?statisticsInterval = p.LagFrequency) +#if kafka && blank + let targetStore = () in targetStore, targetStore, p.Kafka, createConsumerConfig, ignore #else - Choice2Of2 (srcC, context, monitored, leases, x.ProcessorName, srcC.FromTail, srcC.MaxItems, srcC.LagFrequency) -#endif -//#endif // kafkaEventSpans -#if kafkaEventSpans - member val Source : KafkaSourceArguments = - match a.TryGetSubCommand() with - | Some (Parameters.Kafka kafka) -> KafkaSourceArguments (c, kafka) - | _ -> raise (MissingArg "Must specify kafka for Src") - and [] KafkaSourceParameters = - | [] Broker of string - | [] Topic of string - | [] MaxInflightMb of float - | [] LagFreqM of float -#if (kafka && blank) - | [] Kafka of ParseResults -#else - | [] Cosmos of ParseResults + let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) + let targetStore = p.ConnectTarget cache + targetStore, targetStore, p.Kafka, createConsumerConfig, fun log -> + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log + Equinox.DynamoStore.Core.Log.InternalMetrics.dump log +#endif + member val VerboseStore = false +#else + member val Store : Choice = + match p.GetSubCommand() with + | Cosmos p -> Choice1Of4 <| SourceArgs.Cosmos.Arguments(c, p) + | Dynamo p -> Choice2Of4 <| SourceArgs.Dynamo.Arguments(c, p) + | Esdb p -> Choice3Of4 <| SourceArgs.Esdb.Arguments(c, p) + | SqlMs p -> Choice4Of4 <| SourceArgs.Sss.Arguments(c, p) + | p -> Args.missingArg $"Unexpected Store subcommand %A{p}" + member x.VerboseStore = match x.Store with + | Choice1Of4 s -> s.Verbose + | Choice2Of4 s -> s.Verbose + | Choice3Of4 s -> s.Verbose + | Choice4Of4 s -> false + member x.ConnectStoreAndSource(appName) : Config.Store * _ * _ * (ILogger -> string -> SourceConfig) * (ILogger -> unit) = + let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) + match x.Store with + | Choice1Of4 a -> + let client, monitored = a.ConnectStoreAndMonitored() + let buildSourceConfig log groupName = + let leases, startFromTail, maxItems, tailSleepInterval, lagFrequency = a.MonitoringParams(log) + let checkpointConfig = CosmosFeedConfig.Persistent (groupName, startFromTail, maxItems, lagFrequency) + SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) + let context = client |> CosmosStoreContext.create + let store = Config.Store.Cosmos (context, cache) +#if kafka + let kafka = a.Kafka +#if blank + let targetStore = store +#else + let targetStore = a.ConnectTarget(cache) #endif - interface IArgParserTemplate with - member a.Usage = a |> function - | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" - | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" - | MaxInflightMb _ -> "maximum MiB of data to read ahead. Default: 10." - | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1." -#if (kafka && blank) - | Kafka _ -> "Kafka Source parameters." #else - | Cosmos _ -> "CosmosDb Sink parameters." + let kafka, targetStore = (), a.ConnectTarget(cache) #endif - and KafkaSourceArguments(c : Configuration, a : ParseResults) = - member val Broker = a.TryGetResult KafkaSourceParameters.Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult KafkaSourceParameters.Topic |> Option.defaultWith (fun () -> c.Topic) - member val MaxInFlightBytes = a.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 - member val LagFrequency = a.TryGetResult LagFreqM |> Option.map System.TimeSpan.FromMinutes - member x.BuildSourceParams() = x.Broker, x.Topic -#if (kafka && blank) - member val Sink = - match a.TryGetSubCommand() with - | Some (KafkaSourceParameters.Kafka kafka) -> KafkaSinkArguments (c, kafka) - | _ -> raise (MissingArg "Must specify kafka arguments") -#else - member val Cosmos = - match a.TryGetSubCommand() with - | Some (KafkaSourceParameters.Cosmos cosmos) -> CosmosArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos details") + store, targetStore, kafka, buildSourceConfig, Equinox.CosmosStore.Core.Log.InternalMetrics.dump + | Choice2Of4 a -> + let context = a.Connect() + let buildSourceConfig log groupName = + let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval, streamsDop = a.MonitoringParams(log) + let checkpoints = a.CreateCheckpointStore(groupName, cache) + let load = DynamoLoadModeConfig.Hydrate (context, streamsDop) + SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) + let store = Config.Store.Dynamo (context, cache) +#if kafka + let kafka = a.Kafka +#if blank + let targetStore = store +#else + let targetStore = a.ConnectTarget(cache) #endif #else - and [] CosmosSourceParameters = - | [] FromTail - | [] MaxItems of int - | [] LagFreqM of float - | [] LeaseContainer of string - - | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode - | [] Connection of string - | [] Database of string - | [] Container of string // Actually Mandatory, but stating that is not supported - | [] Timeout of float - | [] Retries of int - | [] RetriesWaitTime of float - -#if (!multiSource && kafka && blank) - | [] Kafka of ParseResults -#else - | [] Cosmos of ParseResults + let kafka, targetStore = (), a.ConnectTarget(cache) #endif - interface IArgParserTemplate with - member a.Usage = a |> function - | FromTail -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." - | MaxItems _ -> "maximum item count to request from feed. Default: unlimited" - | LagFreqM _ -> "frequency (in minutes) to dump lag stats. Default: 1" - | LeaseContainer _ -> "specify Container Name for Leases container. Default: `sourceContainer` + `-aux`." - - | ConnectionMode _ -> "override the connection mode. Default: Direct." - | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" - | Database _ -> "specify a database name for Cosmos account. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" - | Container _ -> "specify a container name within `Database`" - | Timeout _ -> "specify operation timeout in seconds. Default: 5." - | Retries _ -> "specify operation retries. Default: 1." - | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - -#if (!multiSource && kafka && blank) - | Kafka _ -> "Kafka Source parameters." + store, targetStore, kafka, buildSourceConfig, Equinox.DynamoStore.Core.Log.InternalMetrics.dump + | Choice3Of4 a -> + let connection = a.Connect(Log.Logger, appName, EventStore.Client.NodePreference.Leader) + let context = EventStoreContext connection + let store = Config.Store.Esdb (context, cache) + let targetStore = a.ConnectTarget(cache) + let buildSourceConfig log groupName = + let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) + let checkpoints = a.CreateCheckpointStore(groupName, targetStore) + let hydrateBodies = true + SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) +#if kafka + let kafka = a.Kafka #else - | Cosmos _ -> "CosmosDb Sink parameters." + let kafka = () #endif - and CosmosSourceArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult CosmosSourceParameters.ConnectionMode - let timeout = a.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSourceParameters.Retries, 1) - let maxRetryWaitTime = a.GetResult(CosmosSourceParameters.RetriesWaitTime, 5.) |> TimeSpan.FromSeconds - let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - member val DatabaseId = a.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId = a.GetResult CosmosSourceParameters.Container - - member val FromTail = a.Contains CosmosSourceParameters.FromTail - member val MaxItems = a.TryGetResult MaxItems - member val LagFrequency : TimeSpan = a.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes - member val private LeaseContainerId = a.TryGetResult CosmosSourceParameters.LeaseContainer - member private x.ConnectLeases containerId = connector.CreateUninitialized(x.DatabaseId, containerId) - member x.ConnectLeases() = match x.LeaseContainerId with - | None -> x.ConnectLeases(x.ContainerId + "-aux") - | Some sc -> x.ConnectLeases(sc) - member x.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(x.DatabaseId, x.ContainerId) -#if (!multiSource && kafka && blank) - member val Sink = - match a.TryGetSubCommand() with - | Some (CosmosSourceParameters.Kafka kafka) -> KafkaSinkArguments (c, kafka) - | _ -> raise (MissingArg "Must specify `kafka` arguments") + store, targetStore, kafka, buildSourceConfig, fun log -> + Equinox.EventStoreDb.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log + Equinox.DynamoStore.Core.Log.InternalMetrics.dump log + | Choice4Of4 a -> + let connection = a.Connect() + let context = SqlStreamStoreContext connection + let store = Config.Store.Sss (context, cache) + let targetStore = a.ConnectTarget(cache) + let buildSourceConfig log groupName = + let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) + let checkpoints = a.CreateCheckpointStoreSql(groupName) + let hydrateBodies = true + SourceConfig.Sss (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) +#if kafka + let kafka = a.Kafka #else - member val Cosmos = - match a.TryGetSubCommand() with - | Some (CosmosSourceParameters.Cosmos cosmos) -> CosmosArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos details") + let kafka = () #endif -//#if multiSource - and [] EsSourceParameters = - | [] FromTail - | [] Gorge of int - | [] Tail of intervalS: float - | [] ForceRestart - | [] BatchSize of int - - | [] MinBatchSize of int - | [] Position of int64 - | [] Chunk of int - | [] Percent of float - - | [] Verbose - | [] Timeout of float - | [] Retries of int - | [] HeartbeatTimeout of float - | [] Tcp - | [] Host of string - | [] Port of int - | [] Username of string - | [] Password of string - | [] ProjTcp - | [] ProjHost of string - | [] ProjPort of int - | [] ProjUsername of string - | [] ProjPassword of string - - | [] Cosmos of ParseResults - interface IArgParserTemplate with - member a.Usage = a |> function - | FromTail -> "Start the processing from the Tail" - | Gorge _ -> "Request Parallel readers phase during initial catchup, running one chunk (256MB) apart. Default: off" - | Tail _ -> "attempt to read from tail at specified interval in Seconds. Default: 1" - | ForceRestart _ -> "Forget the current committed position; start from (and commit) specified position. Default: start from specified position or resume from committed." - | BatchSize _ -> "maximum item count to request from feed. Default: 4096" - | MinBatchSize _ -> "minimum item count to drop down to in reaction to read failures. Default: 512" - | Position _ -> "EventStore $all Stream Position to commence from" - | Chunk _ -> "EventStore $all Chunk to commence from" - | Percent _ -> "EventStore $all Stream Position to commence from (as a percentage of current tail position)" - - | Verbose -> "Include low level Store logging." - | Tcp -> "Request connecting EventStore direct to a TCP/IP endpoint. Default: Use Clustered mode with Gossip-driven discovery (unless environment variable EQUINOX_ES_TCP specifies 'true')." - | Host _ -> "TCP mode: specify EventStore hostname to connect to directly. Clustered mode: use Gossip protocol against all A records returned from DNS query. (optional if environment variable EQUINOX_ES_HOST specified)" - | Port _ -> "specify EventStore custom port. Uses value of environment variable EQUINOX_ES_PORT if specified. Defaults for Cluster and Direct TCP/IP mode are 30778 and 1113 respectively." - | Username _ -> "specify username for EventStore. (optional if environment variable EQUINOX_ES_USERNAME specified)" - | Password _ -> "specify Password for EventStore. (optional if environment variable EQUINOX_ES_PASSWORD specified)" - | ProjTcp -> "Request connecting Projection EventStore direct to a TCP/IP endpoint. Default: Use Clustered mode with Gossip-driven discovery (unless environment variable EQUINOX_ES_PROJ_TCP specifies 'true')." - | ProjHost _ -> "TCP mode: specify Projection EventStore hostname to connect to directly. Clustered mode: use Gossip protocol against all A records returned from DNS query. Defaults to value of es host (-h) unless environment variable EQUINOX_ES_PROJ_HOST is specified." - | ProjPort _ -> "specify Projection EventStore custom port. Defaults to value of es port (-x) unless environment variable EQUINOX_ES_PROJ_PORT is specified." - | ProjUsername _ -> "specify username for Projection EventStore. Defaults to value of es user (-u) unless environment variable EQUINOX_ES_PROJ_USERNAME is specified." - | ProjPassword _ -> "specify Password for Projection EventStore. Defaults to value of es password (-p) unless environment variable EQUINOX_ES_PROJ_PASSWORD is specified." - | Timeout _ -> "specify operation timeout in seconds. Default: 20." - | Retries _ -> "specify operation retries. Default: 3." - | HeartbeatTimeout _ -> "specify heartbeat timeout in seconds. Default: 1.5." - - | Cosmos _ -> "CosmosDB (Checkpoint) Store parameters." - and EsSourceArguments(c : Configuration, a : ParseResults) = - let ts (x : TimeSpan) = x.TotalSeconds - let discovery (host, port, tcp) = - match tcp, port with - | false, None -> Discovery.GossipDns host - | false, Some p -> Discovery.GossipDnsCustomPort (host, p) - | true, None -> Discovery.Uri (UriBuilder("tcp", host, 1113).Uri) - | true, Some p -> Discovery.Uri (UriBuilder("tcp", host, p).Uri) - let tcp = a.Contains Tcp || c.EventStoreTcp - let host = a.TryGetResult Host |> Option.defaultWith (fun () -> c.EventStoreHost) - let port = a.TryGetResult Port |> Option.orElseWith (fun () -> c.EventStorePort) - let user = a.TryGetResult Username |> Option.defaultWith (fun () -> c.EventStoreUsername) - let password = a.TryGetResult Password |> Option.defaultWith (fun () -> c.EventStorePassword) - member val Gorge = a.TryGetResult Gorge - member val TailInterval = a.GetResult(Tail, 1.) |> TimeSpan.FromSeconds - member val ForceRestart = a.Contains ForceRestart - member val StartingBatchSize = a.GetResult(BatchSize, 4096) - member val MinBatchSize = a.GetResult(MinBatchSize, 512) - member val StartPos = - match a.TryGetResult Position, a.TryGetResult Chunk, a.TryGetResult Percent, a.Contains EsSourceParameters.FromTail with - | Some p, _, _, _ -> Absolute p - | _, Some c, _, _ -> StartPos.Chunk c - | _, _, Some p, _ -> Percentage p - | None, None, None, true -> StartPos.TailOrCheckpoint - | None, None, None, _ -> StartPos.StartOrCheckpoint - member val Tcp = tcp - member val Host = host - member val Port = port - member val User = user - member val Password = password - member val ProjTcp = a.Contains ProjTcp || c.EventStoreProjectionTcp - member val ProjPort = match a.TryGetResult ProjPort with - | Some x -> Some x - | None -> c.EventStoreProjectionPort |> Option.orElse port - member val ProjHost = match a.TryGetResult ProjHost with - | Some x -> x - | None -> c.EventStoreProjectionHost |> Option.defaultValue host - member val ProjUser = match a.TryGetResult ProjUsername with - | Some x -> x - | None -> c.EventStoreProjectionUsername |> Option.defaultValue user - member val ProjPassword = match a.TryGetResult ProjPassword with - | Some x -> x - | None -> c.EventStoreProjectionPassword |> Option.defaultValue password - member val Retries = a.GetResult(EsSourceParameters.Retries, 3) - member val Timeout = a.GetResult(EsSourceParameters.Timeout, 20.) |> TimeSpan.FromSeconds - member val Heartbeat = a.GetResult(HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds - - member x.ConnectProj(log: ILogger, storeLog: ILogger, appName, nodePreference) = - let discovery = discovery (x.ProjHost, x.ProjPort, x.ProjTcp) - log.ForContext("projHost", x.ProjHost).ForContext("projPort", x.ProjPort) - .Information("Projection EventStore {discovery} heartbeat: {heartbeat}s Timeout: {timeout}s Retries {retries}", - discovery, ts x.Heartbeat, ts x.Timeout, x.Retries) - let log=if storeLog.IsEnabled Serilog.Events.LogEventLevel.Debug then Logger.SerilogVerbose storeLog else Logger.SerilogNormal storeLog - let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] - Connector(x.ProjUser, x.ProjPassword, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) - .Connect(appName + "-Proj", discovery, nodePreference) |> Async.RunSynchronously - - member x.Connect(log: ILogger, storeLog: ILogger, appName, connectionStrategy) = - let discovery = discovery (x.Host, x.Port, x.Tcp) - log.ForContext("host", x.Host).ForContext("port", x.Port) - .Information("EventStore {discovery} heartbeat: {heartbeat}s Timeout: {timeout}s Retries {retries}", - discovery, ts x.Heartbeat, ts x.Timeout, x.Retries) - let log=if storeLog.IsEnabled Serilog.Events.LogEventLevel.Debug then Logger.SerilogVerbose storeLog else Logger.SerilogNormal storeLog - let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] - Connector(x.User, x.Password, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) - .Establish(appName, discovery, connectionStrategy) |> Async.RunSynchronously - - member val CheckpointInterval = TimeSpan.FromHours 1. - member val Cosmos : CosmosArguments = - match a.TryGetSubCommand() with - | Some (EsSourceParameters.Cosmos cosmos) -> CosmosArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify `cosmos` checkpoint store when source is `es`") -//#endif + store, targetStore, kafka, buildSourceConfig, fun log -> + Equinox.SqlStreamStore.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log + Equinox.DynamoStore.Core.Log.InternalMetrics.dump log #endif -//#if (multiSource || !(blank && kafka)) - and [] CosmosParameters = - | [] Connection of string - | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode - | [] Database of string - | [] Container of string - | [] Timeout of float - | [] Retries of int - | [] RetriesWaitTime of float -//#if kafka - | [] Kafka of ParseResults -//#endif - interface IArgParserTemplate with - member a.Usage = a |> function - | ConnectionMode _ -> "override the connection mode. Default: Direct." - | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" - | Database _ -> "specify a database name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" - | Container _ -> "specify a container name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" - | Timeout _ -> "specify operation timeout in seconds. Default: 5." - | Retries _ -> "specify operation retries. Default: 1." - | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." -//#if kafka - | Kafka _ -> "Kafka Sink parameters." -//#endif - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 30.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 9) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds - let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode=mode) - member val DatabaseId = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) - member x.Connect() = connector.ConnectStore("Main", x.DatabaseId, x.ContainerId) -//#if kafka - member val Sink = - match a.TryGetSubCommand() with - | Some (CosmosParameters.Kafka kafka) -> KafkaSinkArguments (c, kafka) - | _ -> raise (MissingArg "Must specify `kafka` arguments") -//#endif -//#endif // (!(!multiSource && kafka && blank)) -//#if kafka - and [] KafkaSinkParameters = - | [] Broker of string - | [] Topic of string - interface IArgParserTemplate with - member a.Usage = a |> function - | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" - | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" - and KafkaSinkArguments(c : Configuration, a : ParseResults) = - member val Broker = a.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) - member x.BuildTargetParams() = x.Broker, x.Topic -//#endif /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args let parse tryGetConfigValue argv : Arguments = @@ -464,201 +175,87 @@ module Args = let [] AppName = "ReactorTemplate" -//#if multiSource -#if (!kafkaEventSpans) -//#if (!changeFeedOnly) -module Checkpoints = - - open Equinox.CosmosStore - - // In this implementation, we keep the checkpoints in Cosmos when consuming from EventStore - module Cosmos = - - let codec = FsCodec.NewtonsoftJson.Codec.Create() - let access = AccessStrategy.Custom (Checkpoint.Fold.isOrigin, Checkpoint.Fold.transmute) - let create groupName (context, cache) = - let caching = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - let cat = CosmosStoreCategory(context, codec, Checkpoint.Fold.fold, Checkpoint.Fold.initial, caching, access) - let resolve streamName = cat.Resolve(streamName, Equinox.AllowStale) - Checkpoint.CheckpointSeries(groupName, resolve) - -//#endif -#endif -module EventStoreContext = - - let create connection = - Equinox.EventStore.EventStoreContext(connection, Equinox.EventStore.BatchingPolicy(maxBatchSize=500)) - -//#endif +open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException -#if (!kafkaEventSpans) -open Propulsion.CosmosStore.Infrastructure // AwaitKeyboardInterruptAsTaskCancelledException - -#endif let build (args : Args.Arguments) = -#if (!kafkaEventSpans) -//#if (!changeFeedOnly) - match args.SourceParams() with - | Choice1Of2 (srcE, context, spec) -> - let connectEs () = srcE.Connect(Log.Logger, Log.Logger, AppName, Equinox.EventStore.ConnectionStrategy.ClusterSingle Equinox.EventStore.NodePreference.Master) - let connectProjEs () = srcE.ConnectProj(Log.Logger, Log.Logger, AppName, Equinox.EventStore.NodePreference.PreferSlave) - - let cache = Equinox.Cache(AppName, sizeMb = 10) - - let checkpoints = Checkpoints.Cosmos.create spec.groupName (context, cache) -#if kafka - let broker, topic = srcE.Cosmos.Sink.BuildTargetParams() - let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) - let produceSummary (x : Propulsion.Codec.NewtonsoftJson.RenderedSummary) = - producer.Produce(x.s, Propulsion.Codec.NewtonsoftJson.Serdes.Serialize x) -#if blank - let handle = Handler.handle produceSummary -#else - let srcService = - let esStore = - let esConn = connectEs () - let srcCache = Equinox.Cache(AppName, sizeMb = 10) - Config.Store.Esdb (EventStoreContext.create esConn, srcCache) - Todo.Config.create esStore - let handle = Handler.handle srcService produceSummary -#endif - let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval, logExternalStats=producer.DumpStats) - let sink = -#if (kafka && !blank) - Propulsion.Streams.Sync.StreamsSync.Start( - Log.Logger, args.MaxReadAhead, args.MaxConcurrentStreams, handle, - stats, statsInterval=args.StatsInterval) + let consumerGroupName, maxReadAhead, maxConcurrentStreams = args.ProcessorParams() +#if sourceKafka + let store, targetStore, sinkParams, createConsumerConfig, dumpMetrics = args.ConnectStoreAndSource(AppName) #else - Propulsion.Streams.StreamsProjector.Start(Log.Logger, args.MaxReadAhead, args.MaxConcurrentStreams, handle, stats, args.StatsInterval) + let store, targetStore, sinkParams, buildSourceConfig, dumpMetrics = args.ConnectStoreAndSource(AppName) #endif -#else // !kafka -> ingestion -#if blank - // TODO: establish any relevant inputs, or re-run without `--blank` for example wiring code - let handle = Ingester.handle -#else // blank - let srcService = - let esStore = - let esConn = connectEs () - let srcCache = Equinox.Cache(AppName, sizeMb = 10) - Config.Store.Esdb (EventStoreContext.create esConn, srcCache) - Todo.Config.create esStore - let dstService = - let cosmosStore = Config.Store.Cosmos (context, cache) - TodoSummary.Config.create cosmosStore - let handle = Ingester.handle srcService dstService + let log = Log.Logger + + (* ESTABLISH stats, handle *) + +#if kafka // kafka + let broker, topic = sinkParams.BuildTargetParams() + let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) + let produceSummary (x : Propulsion.Codec.NewtonsoftJson.RenderedSummary) = + producer.Produce(x.s, Propulsion.Codec.NewtonsoftJson.Serdes.Serialize x) + let dumpMetrics log = + dumpMetrics log + producer.DumpStats log + let stats = Handler.Stats(log, args.StatsInterval, args.StateInterval, args.VerboseStore, dumpMetrics) +#if blank // kafka && blank + let handle = Handler.handle produceSummary +#else // kafka && !blank + let srcService = Todo.Config.create store + let handle = Handler.handle srcService produceSummary +#endif // kafka && !blank +#else // !kafka (i.e., ingester) +#if blank // !kafka && blank + // TODO: establish any relevant inputs, or re-run without `--blank` for example wiring code + let handle = Ingester.handle + let stats = Ingester.Stats(log, args.StatsInterval, args.StateInterval, args.VerboseStore, dumpMetrics) +#else // !kafka && !blank + let srcService = Todo.Config.create store + let dstService = TodoSummary.Config.create targetStore + let handle = Ingester.handle srcService dstService + let stats = Ingester.Stats(log, args.StatsInterval, args.StateInterval, args.VerboseStore, dumpMetrics) #endif // blank - let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Streams.StreamsProjector.Start(Log.Logger, args.MaxReadAhead, args.MaxConcurrentStreams, handle, stats, args.StatsInterval) -#endif // !kafka -#if filter - let filterByStreamName = args.FilterFunction() -#else - let filterByStreamName _ = true #endif - let runPipeline = - EventStoreSource.Run( - Log.Logger, sink, checkpoints, connectProjEs, spec, Handler.tryMapEvent filterByStreamName, - args.MaxReadAhead, args.StatsInterval) - [ runPipeline; sink.AwaitWithStopOnCancellation() ] - | Choice2Of2 (source, context, monitored, leases, processorName, startFromTail, maxItems, lagFrequency) -> -//#endif // !changeFeedOnly -#if changeFeedOnly - let source, context, monitored, leases, processorName, startFromTail, maxItems, lagFrequency = args.SourceParams() -#endif -#else // kafkaEventSpans -> wire up consumption from Kafka, with auxiliary `cosmos` store - let source = args.Source - let consumerConfig = - FsKafka.KafkaConsumerConfig.Create( - AppName, source.Broker, [source.Topic], args.ProcessorName, Confluent.Kafka.AutoOffsetReset.Earliest, - maxInFlightBytes = source.MaxInFlightBytes, ?statisticsInterval = source.LagFrequency) -#endif // kafkaEventSpans -#if (!kafka) -#if (!blank) //!kafka && !blank -> wire up a cosmos context to an ingester -#if kafkaEventSpans - let context = source.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create -#endif // kafkaEventSpans - let cache = Equinox.Cache(AppName, sizeMb = 10) - let cosmosStore = Config.Store.Cosmos (context, cache) - let srcService = Todo.Config.create cosmosStore - let dstService = TodoSummary.Config.create cosmosStore - let handle = Ingester.handle srcService dstService -#else // !kafka && blank -> no specific Ingester source/destination wire-up - // TODO: establish any relevant inputs, or re-run without `-blank` for example wiring code - let handle = Ingester.handle -#endif // !kafka && blank - let stats = Ingester.Stats(Log.Logger, args.StatsInterval, args.StateInterval) -#else // kafka -#if (blank && !multiSource) - let broker, topic = source.Sink.BuildTargetParams() -#else - let context = source.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - let broker, topic = source.Cosmos.Sink.BuildTargetParams() -#endif - let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic) - let produceSummary (x : Propulsion.Codec.NewtonsoftJson.RenderedSummary) = - producer.Produce(x.s, Propulsion.Codec.NewtonsoftJson.Serdes.Serialize x) -#if blank - let handle = Handler.handle produceSummary -#else - let cache = Equinox.Cache(AppName, sizeMb = 10) - let cosmosStore = Config.Store.Cosmos (context, cache) - let service = Todo.Config.create cosmosStore - let handle = Handler.handle service produceSummary -#endif - let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval, logExternalStats=producer.DumpStats) -#endif // kafka -#if filter - let filterByStreamName = args.FilterFunction() -#endif -#if kafkaEventSpans + (* ESTABLISH sink; AWAIT *) - let parseStreamEvents (res : Confluent.Kafka.ConsumeResult<_, _>) : seq> = - Propulsion.Codec.NewtonsoftJson.RenderedSpan.parse res.Message.Value -#if filter - |> Seq.filter (fun e -> e.stream |> FsCodec.StreamName.toString |> filterByStreamName) -#endif +#if sourceKafka + let parseStreamEvents (res : Confluent.Kafka.ConsumeResult<_, _>) : seq> = + Propulsion.Codec.NewtonsoftJson.RenderedSpan.parse res.Message.Value + let consumerConfig = createConsumerConfig consumerGroupName + let pipeline = Propulsion.Kafka.StreamsConsumer.Start - ( Log.Logger, consumerConfig, parseStreamEvents, handle, args.MaxConcurrentStreams, - stats=stats, statsInterval=args.StateInterval) -#else // !kafkaEventSpans => Default consumption, from CosmosDb -#if (kafka && !blank) - let sink = - Propulsion.Streams.Sync.StreamsSync.Start( - Log.Logger, args.MaxReadAhead, args.MaxConcurrentStreams, handle, - stats, statsInterval=args.StateInterval) -#else - let sink = Propulsion.Streams.StreamsProjector.Start(Log.Logger, args.MaxReadAhead, args.MaxConcurrentStreams, handle, stats, args.StatsInterval) -#endif - - let source = - let mapToStreamItems docs : Propulsion.Streams.StreamEvent<_> seq = - // TODO: customize parsing to events if source is not an Equinox Container - docs - |> Seq.collect Propulsion.CosmosStore.EquinoxNewtonsoftParser.enumStreamEvents -#if filter - |> Seq.filter (fun e -> e.stream |> FsCodec.StreamName.toString |> filterByStreamName) -#endif - let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, mapToStreamItems) - Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitored, leases, processorName, observer, startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) - [ Async.AwaitKeyboardInterruptAsTaskCancelledException(); source.AwaitWithStopOnCancellation(); sink.AwaitWithStopOnCancellation() ] -#endif // !kafkaEventSpans - -let run args = -#if (!kafkaEventSpans) - build args |> Async.Parallel |> Async.Ignore + ( Log.Logger, consumerConfig, parseStreamEvents, handle, maxConcurrentStreams, + stats = stats, statsInterval = args.StateInterval) + [| pipeline.AwaitWithStopOnCancellation() #else - let sink = build args - sink.AwaitWithStopOnCancellation() + let sink = +#if kafka // kafka +#if blank // kafka && blank + Handler.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) +#else // kafka && !blank + Propulsion.Streams.Sync.StreamsSync.Start( + Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval, + Propulsion.Streams.Default.jsonSize, Propulsion.Streams.Default.eventSize) +#endif // kafka && !blank +#else // !kafka (i.e., ingester) + Handler.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) +#endif // !kafka + let source, _awaitReactions = + let sourceConfig = buildSourceConfig log consumerGroupName + Handler.Config.StartSource(log, sink, sourceConfig) + + [| source.AwaitWithStopOnCancellation() + sink.AwaitWithStopOnCancellation() #endif + Async.AwaitKeyboardInterruptAsTaskCanceledException() |] [] let main argv = try let args = Args.parse EnvVar.tryGet argv try Log.Logger <- LoggerConfiguration().Configure(verbose=args.Verbose).CreateLogger() - try run args |> Async.RunSynchronously; 0 - with e when not (e :? MissingArg) -> Log.Fatal(e, "Exiting"); 2 + try build args |> Async.Parallel |> Async.Ignore |> Async.RunSynchronously; 0 + with e when not (e :? Args.MissingArg) && not (e :? System.Threading.Tasks.TaskCanceledException) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() - with MissingArg msg -> eprintfn "%s" msg; 1 + with Args.MissingArg msg -> eprintfn "%s" msg; 1 | :? Argu.ArguParseException as e -> eprintfn "%s" e.Message; 1 | e -> eprintf "Exception %s" e.Message; 1 diff --git a/propulsion-reactor/README.md b/propulsion-reactor/README.md index f782d050f..3a2b15f7e 100644 --- a/propulsion-reactor/README.md +++ b/propulsion-reactor/README.md @@ -1,30 +1,10 @@ //#if kafka -//#if changeFeedOnly -# Propulsion CosmosDb ChangeFeedProcessor -> Kafka Projector +# Propulsion EventStore $all/CosmosDb ChangeFeedProcessor/DynamoStoreSource -> Kafka Projector //#else -# Propulsion EventStore $all/CosmosDb ChangeFeedProcessor -> Kafka Projector -//#endif -//#else -//#if changeFeedOnly -# Propulsion CosmosDb ChangeFeedProcessor Projector (without Kafka emission) -//#else -# Propulsion EventStore $all/CosmosDb ChangeFeedProcessor Projector (without Kafka emission) -//#endif +# Propulsion EventStore $all/CosmosDb ChangeFeedProcessor/DynamoStoreSource Projector (without Kafka emission) //#endif This project was generated using: -//#if changeFeedOnly -//#if kafka - - dotnet new -i Equinox.Templates # just once, to install/update in the local templates store - dotnet new proReactor --source changeFeedOnly -k # -k => include Kafka projection logic -//#else - - dotnet new -i Equinox.Templates # just once, to install/update in the local templates store - # add -k to add Kafka Projection logic - dotnet new proReactor --source changeFeedOnly # use --help to see options -//#endif -//#else //#if kafka dotnet new -i Equinox.Templates # just once, to install/update in the local templates store @@ -35,7 +15,6 @@ This project was generated using: # add -k to add Kafka Projection logic dotnet new proReactor # use --help to see options //#endif -//#endif ## Usage instructions @@ -59,12 +38,10 @@ This project was generated using: # default name is "($EQUINOX_COSMOS_CONTAINER)-aux" propulsion init -ru 400 cosmos -//#if (!changeFeedOnly) - NOTE when projecting from EventStore, the current implementation stores the checkpoints within the CosmosDB store in order to remove feedback effects. + NOTE when projecting from EventStore, the current implementation stores the checkpoints within a CosmosStore or DynamoStore in order to remove feedback effects. (Yes, someone should do a PR to store the checkpoints in EventStore itself; this is extracted from working code, which can assume there's always a CosmosDB around) -//#endif 3. To run an instance of the Projector from a CosmosDb ChangeFeed //#if kafka @@ -107,4 +84,4 @@ This project was generated using: # NB running more than one projector will cause them to duel, and is hence not advised -5. To create a Consumer, use `dotnet new proConsumer` (see README therein for details) \ No newline at end of file +5. To create a Consumer, use `dotnet new proConsumer` (see README therein for details) diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index a12789fa3..3e68c22d5 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -2,12 +2,12 @@ Exe - netcoreapp3.1 + net6.0 5 - + @@ -24,27 +24,29 @@ - + + + - - - - + + + + - - - - - + + + + - - + + + - + diff --git a/propulsion-reactor/SourceArgs.fs b/propulsion-reactor/SourceArgs.fs new file mode 100644 index 000000000..a9c1a9b66 --- /dev/null +++ b/propulsion-reactor/SourceArgs.fs @@ -0,0 +1,396 @@ +module ReactorTemplate.SourceArgs + +open Argu +open Serilog +open System + +type Configuration(tryGet) = + inherit Args.Configuration(tryGet) +#if !sourceKafka + member _.DynamoIndexTable = tryGet Args.INDEX_TABLE +#endif + +#if !sourceKafka +module Cosmos = + + type [] Parameters = + | [] Verbose + | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode + | [] Connection of string + | [] Database of string + | [] Container of string + | [] Timeout of float + | [] Retries of int + | [] RetriesWaitTime of float + + | [] LeaseContainer of string + | [] FromTail + | [] MaxItems of int + | [] LagFreqM of float + +#if (kafka && blank) + | [] Kafka of ParseResults +#else + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "request Verbose Logging from ChangeFeedProcessor and Store. Default: off" + | ConnectionMode _ -> "override the connection mode. Default: Direct." + | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" + | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" + | Container _ -> "specify a container name for store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" + | Timeout _ -> "specify operation timeout in seconds. Default: 5." + | Retries _ -> "specify operation retries. Default: 9." + | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 30." + + | LeaseContainer _ -> "specify Container Name (in this [target] Database) for Leases container. Default: `SourceContainer` + `-aux`." + | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." + | MaxItems _ -> "maximum item count to supply for the Change Feed query. Default: use response size limit" + | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1" + +#if (kafka && blank) + | Kafka _ -> "Kafka Sink parameters." +#else + | Cosmos _ -> "CosmosDb Sink parameters." + | Dynamo _ -> "DynamoDb Sink parameters." +#endif + type Arguments(c : Args.Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 30.) |> TimeSpan.FromSeconds + let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let containerId = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + let leaseContainerId = p.GetResult(LeaseContainer, containerId + "-aux") + let fromTail = p.Contains FromTail + let maxItems = p.TryGetResult MaxItems + let tailSleepInterval = TimeSpan.FromMilliseconds 500. + let lagFrequency = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member _.Verbose = p.Contains Verbose + member private _.ConnectLeases() = connector.CreateUninitialized(database, leaseContainerId) + member x.MonitoringParams(log : ILogger) = + let leases : Microsoft.Azure.Cosmos.Container = x.ConnectLeases() + log.Information("ChangeFeed Leases Database {db} Container {container}. MaxItems limited to {maxItems}", + leases.Database.Id, leases.Id, Option.toNullable maxItems) + if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") + (leases, fromTail, maxItems, tailSleepInterval, lagFrequency) + member x.ConnectStoreAndMonitored() = + connector.ConnectStoreAndMonitored(database, containerId) +#if (kafka && blank) +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) + | _ -> Args.missingArg "Must specify `kafka` arguments" +#endif + member x.ConnectTarget(_cache) = () +#else + member private _.TargetStoreArgs : Args.TargetStoreArgs = + match p.GetSubCommand() with + | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) + | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) + | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" + member x.ConnectTarget(cache) : Config.Store = + Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache +#if kafka + member x.Kafka = + match x.TargetStoreArgs with + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Kafka + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Kafka +#endif +#endif + +module Dynamo = + + type [] Parameters = + | [] Verbose + | [] ServiceUrl of string + | [] AccessKey of string + | [] SecretKey of string + | [] Table of string + | [] Retries of int + | [] RetriesTimeoutS of float + | [] IndexTable of string + | [] IndexSuffix of string + | [] MaxItems of int + | [] FromTail + | [] StreamsDop of int +#if (kafka && blank) + | [] Kafka of ParseResults +#else + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." + | ServiceUrl _ -> "specify a server endpoint for a Dynamo account. (optional if environment variable " + Args.SERVICE_URL + " specified)" + | AccessKey _ -> "specify an access key id for a Dynamo account. (optional if environment variable " + Args.ACCESS_KEY + " specified)" + | SecretKey _ -> "specify a secret access key for a Dynamo account. (optional if environment variable " + Args.SECRET_KEY + " specified)" + | Retries _ -> "specify operation retries (default: 9)." + | RetriesTimeoutS _ -> "specify max wait-time including retries in seconds (default: 60)" + | Table _ -> "specify a table name for the primary store. (optional if environment variable " + Args.TABLE + " specified)" + | IndexTable _ -> "specify a table name for the index store. (optional if environment variable " + Args.INDEX_TABLE + " specified. default: `Table`+`IndexSuffix`)" + | IndexSuffix _ -> "specify a suffix for the index store. (optional if environment variable " + Args.INDEX_TABLE + " specified. default: \"-index\")" + | MaxItems _ -> "maximum events to load in a batch. Default: 100" + | FromTail _ -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." + | StreamsDop _ -> "parallelism when loading events from Store Feed Source. Default 4" +#if (kafka && blank) + | Kafka _ -> "Kafka Sink parameters." +#else + | Cosmos _ -> "CosmosDb Sink parameters." + | Dynamo _ -> "DynamoDb Sink parameters." +#endif + + type Arguments(c : Configuration, p : ParseResults) = + let serviceUrl = p.TryGetResult ServiceUrl |> Option.defaultWith (fun () -> c.DynamoServiceUrl) + let accessKey = p.TryGetResult AccessKey |> Option.defaultWith (fun () -> c.DynamoAccessKey) + let secretKey = p.TryGetResult SecretKey |> Option.defaultWith (fun () -> c.DynamoSecretKey) + let table = p.TryGetResult Table |> Option.defaultWith (fun () -> c.DynamoTable) + let indexSuffix = p.GetResult(IndexSuffix, "-index") + let indexTable = p.TryGetResult IndexTable |> Option.orElseWith (fun () -> c.DynamoIndexTable) |> Option.defaultWith (fun () -> table + indexSuffix) + let fromTail = p.Contains FromTail + let tailSleepInterval = TimeSpan.FromMilliseconds 500. + let batchSizeCutoff = p.GetResult(MaxItems, 100) + let streamsDop = p.GetResult(StreamsDop, 4) + let timeout = p.GetResult(RetriesTimeoutS, 60.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 9) + let connector = Equinox.DynamoStore.DynamoStoreConnector(serviceUrl, accessKey, secretKey, timeout, retries) + let client = connector.CreateClient() + let indexStoreClient = lazy client.ConnectStore("Index", indexTable) + member val Verbose = p.Contains Verbose + member _.Connect() = connector.LogConfiguration() + client.ConnectStore("Main", table) |> DynamoStoreContext.create + member _.MonitoringParams(log : ILogger) = + log.Information("DynamoStoreSource BatchSizeCutoff {batchSizeCutoff} Hydrater parallelism {streamsDop}", batchSizeCutoff, streamsDop) + let indexStoreClient = indexStoreClient.Value + if fromTail then log.Warning("(If new projector group) Skipping projection of all existing events.") + indexStoreClient, fromTail, batchSizeCutoff, tailSleepInterval, streamsDop + member _.CreateCheckpointStore(group, cache) = + let indexTable = indexStoreClient.Value + indexTable.CreateCheckpointService(group, cache, Config.log) +#if (kafka && blank) + member x.ConnectTarget(_cache) = () +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) + | _ -> Args.missingArg "Must specify `kafka` arguments" +#endif +#else + member private _.TargetStoreArgs : Args.TargetStoreArgs = + match p.GetSubCommand() with + | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) + | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) + | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" + member x.ConnectTarget(cache) : Config.Store = + Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache +#if kafka + member x.Kafka = + match x.TargetStoreArgs with + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Kafka + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Kafka +#endif +#endif + +module Esdb = + + type [] Parameters = + | [] Verbose + | [] Connection of string + | [] Credentials of string + | [] Timeout of float + | [] Retries of int + + | [] BatchSize of int + | [] FromTail + + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults + interface IArgParserTemplate with + member p.Usage = p |> function + | Verbose -> "Include low level Store logging." + | Connection _ -> "EventStore Connection String. (optional if environment variable EQUINOX_ES_CONNECTION specified)" + | Credentials _ -> "Credentials string for EventStore (used as part of connection string, but NOT logged). Default: use EQUINOX_ES_CREDENTIALS environment variable (or assume no credentials)" + | Timeout _ -> "specify operation timeout in seconds. Default: 20." + | Retries _ -> "specify operation retries. Default: 3." + + | FromTail -> "Start the processing from the Tail" + | BatchSize _ -> "maximum events to load in a batch. Default: 100" + + | Cosmos _ -> "CosmosDB Target Store parameters (also used for checkpoint storage)." + | Dynamo _ -> "DynamoDB Target Store parameters (also used for checkpoint storage)." + type Arguments(c : Configuration, p : ParseResults) = + let startFromTail = p.Contains FromTail + let batchSize = p.GetResult(BatchSize, 100) + let tailSleepInterval = TimeSpan.FromSeconds 0.5 + let connectionStringLoggable = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.EventStoreConnection) + let credentials = p.TryGetResult Credentials |> Option.orElseWith (fun () -> c.MaybeEventStoreCredentials) + let discovery = match credentials with Some x -> String.Join(";", connectionStringLoggable, x) | None -> connectionStringLoggable + |> Equinox.EventStoreDb.Discovery.ConnectionString + let retries = p.GetResult(Retries, 3) + let timeout = p.GetResult(Timeout, 20.) |> TimeSpan.FromSeconds + let checkpointInterval = TimeSpan.FromHours 1. + member val Verbose = p.Contains Verbose + + member _.Connect(log : ILogger, appName, nodePreference) : Equinox.EventStoreDb.EventStoreConnection = + log.Information("EventStore {discovery}", connectionStringLoggable) + let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] + Equinox.EventStoreDb.EventStoreConnector(timeout, retries, tags = tags) + .Establish(appName, discovery, Equinox.EventStoreDb.ConnectionStrategy.ClusterSingle nodePreference) + + member _.MonitoringParams(log : ILogger) = + log.Information("EventStoreSource BatchSize {batchSize} ", batchSize) + startFromTail, batchSize, tailSleepInterval + member _.CreateCheckpointStore(group, store : Config.Store) : Propulsion.Feed.IFeedCheckpointStore = + Args.Checkpoints.createCheckpointStore (group, checkpointInterval, store) + member private _.TargetStoreArgs : Args.TargetStoreArgs = + match p.GetSubCommand() with + | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) + | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) + | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `esdb`" +#if kafka + member x.Kafka = + match x.TargetStoreArgs with + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Kafka + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Kafka +#endif + member x.ConnectTarget(cache) : Config.Store = + Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache + +module Sss = + + // TOCONSIDER: add DB connectors other than MsSql + type [] Parameters = + | [] Tail of intervalS: float + | [] Connection of string + | [] Credentials of string + | [] Schema of string + + | [] BatchSize of int + | [] FromTail + + | [] CheckpointsConnection of string + | [] CheckpointsCredentials of string + + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults + interface IArgParserTemplate with + member p.Usage = p |> function + | Tail _ -> "Polling interval in Seconds. Default: 1" + | BatchSize _ -> "Maximum events to request from feed. Default: 512" + | Connection _ -> "Connection string for SqlStreamStore db. Optional if SQLSTREAMSTORE_CONNECTION specified" + | Credentials _ -> "Credentials string for SqlStreamStore db (used as part of connection string, but NOT logged). Default: use SQLSTREAMSTORE_CREDENTIALS environment variable (or assume no credentials)" + | Schema _ -> "Database schema name" + | FromTail -> "Start the processing from the Tail" + | CheckpointsConnection _ ->"Connection string for Checkpoints sql db. Optional if SQLSTREAMSTORE_CONNECTION_CHECKPOINTS specified. Default: same as `Connection`" + | CheckpointsCredentials _ ->"Credentials string for Checkpoints sql db. (used as part of checkpoints connection string, but NOT logged). Default (when no `CheckpointsConnection`: use `Credentials. Default (when `CheckpointsConnection` specified): use SQLSTREAMSTORE_CREDENTIALS_CHECKPOINTS environment variable (or assume no credentials)" + | Cosmos _ -> "CosmosDB Target Store parameters" + | Dynamo _ -> "DynamoDB Target Store parameters" + + type Arguments(c : Configuration, p : ParseResults) = + let startFromTail = p.Contains FromTail + let tailSleepInterval = p.GetResult(Tail, 1.) |> TimeSpan.FromSeconds + let checkpointEventInterval = TimeSpan.FromHours 1. // Ignored when storing to Propulsion.SqlStreamStore.ReaderCheckpoint + let batchSize = p.GetResult(BatchSize, 512) + let connection = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.SqlStreamStoreConnection) + let credentials = p.TryGetResult Credentials |> Option.orElseWith (fun () -> c.SqlStreamStoreCredentials) |> Option.toObj + let schema = p.GetResult(Schema, null) + + member x.BuildCheckpointsConnectionString() = + let c, cs = + match p.TryGetResult CheckpointsConnection, p.TryGetResult CheckpointsCredentials with + | Some c, Some p -> c, String.Join(";", c, p) + | None, Some p -> let c = connection in c, String.Join(";", c, p) + | None, None -> let c = connection in c, String.Join(";", c, credentials) + | Some cc, None -> let p = c.SqlStreamStoreCredentialsCheckpoints |> Option.toObj + cc, String.Join(";", cc, p) + Log.Information("Checkpoints MsSql Connection {connectionString}", c) + cs + member x.Connect() = + let conn, creds, schema, autoCreate = connection, credentials, schema, false + let sssConnectionString = String.Join(";", conn, creds) + Log.Information("SqlStreamStore MsSql Connection {connectionString} Schema {schema} AutoCreate {autoCreate}", conn, schema, autoCreate) + let rawStore = Equinox.SqlStreamStore.MsSql.Connector(sssConnectionString, schema, autoCreate=autoCreate).Connect() |> Async.RunSynchronously + Equinox.SqlStreamStore.SqlStreamStoreConnection rawStore + member _.MonitoringParams(log : ILogger) = + log.Information("SqlStreamStoreSource BatchSize {batchSize} ", batchSize) + startFromTail, batchSize, tailSleepInterval + member private _.TargetStoreArgs : Args.TargetStoreArgs = + match p.GetSubCommand() with + | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) + | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) + | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `sss`" + member x.CreateCheckpointStoreSql(groupName) : Propulsion.Feed.IFeedCheckpointStore = + let connectionString = x.BuildCheckpointsConnectionString() + Propulsion.SqlStreamStore.ReaderCheckpoint.Service(connectionString, groupName, checkpointEventInterval) +#if kafka + member x.Kafka = + match x.TargetStoreArgs with + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Kafka + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Kafka +#endif + member x.ConnectTarget(cache) : Config.Store = + Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache + +#else // sourceKafka +module Kafka = + + type [] Parameters = + | [] Broker of string + | [] Topic of string + | [] MaxInflightMb of float + | [] LagFreqM of float +#if (kafka && blank) + | [] Kafka of ParseResults +#else + | [] Cosmos of ParseResults + | [] Dynamo of ParseResults +#endif + interface IArgParserTemplate with + member p.Usage = p |> function + | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" + | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" + | MaxInflightMb _ -> "maximum MiB of data to read ahead. Default: 10." + | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: 1." +#if (kafka && blank) + | Kafka _ -> "Kafka Sink parameters." +#else + | Cosmos _ -> "CosmosDb Sink parameters." + | Dynamo _ -> "CosmosDb Sink parameters." +#endif + type Arguments(c : Configuration, p : ParseResults) = + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member val MaxInFlightBytes = p.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 + member val LagFrequency = p.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes + member x.BuildSourceParams() = x.Broker, x.Topic + +#if (kafka && blank) + member x.ConnectTarget(_cache) = () +#if kafka + member val Kafka = + match p.GetSubCommand() with + | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) + | _ -> Args.missingArg "Must specify kafka arguments" +#endif +#else + member private _.TargetStoreArgs : Args.TargetStoreArgs = + match p.GetSubCommand() with + | Cosmos cosmos -> Args.TargetStoreArgs.Cosmos (Args.Cosmos.Arguments(c, cosmos)) + | Dynamo dynamo -> Args.TargetStoreArgs.Dynamo (Args.Dynamo.Arguments(c, dynamo)) + | _ -> Args.missingArg "Must specify `cosmos` or `dynamo` target store when source is `kafka`" +#if kafka + member x.Kafka = + match x.TargetStoreArgs with + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Sink + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Sink +#endif + member x.ConnectTarget(cache) : Config.Store = + Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache +#endif +#endif diff --git a/propulsion-reactor/SourceConfig.fs b/propulsion-reactor/SourceConfig.fs new file mode 100644 index 000000000..e868b0d3d --- /dev/null +++ b/propulsion-reactor/SourceConfig.fs @@ -0,0 +1,102 @@ +namespace ReactorTemplate + +open System + +[] +type SourceConfig = + | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container + * leasesContainer : Microsoft.Azure.Cosmos.Container + * checkpoints : CosmosFeedConfig + * tailSleepInterval : TimeSpan + | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient + * checkpoints : Propulsion.Feed.IFeedCheckpointStore + * loading : DynamoLoadModeConfig + * startFromTail : bool + * batchSizeCutoff : int + * tailSleepInterval : TimeSpan + * statsInterval : TimeSpan + | Esdb of client : EventStore.Client.EventStoreClient + * checkpoints : Propulsion.Feed.IFeedCheckpointStore + * hydrateBodies : bool + * startFromTail : bool + * batchSize : int + * tailSleepInterval : TimeSpan + * statsInterval : TimeSpan + | Sss of client : SqlStreamStore.IStreamStore + * checkpoints : Propulsion.Feed.IFeedCheckpointStore + * hydrateBodies : bool + * startFromTail : bool + * batchSize : int + * tailSleepInterval : TimeSpan + * statsInterval : TimeSpan +and [] CosmosFeedConfig = + | Ephemeral of processorName : string + | Persistent of processorName : string * startFromTail : bool * maxItems : int option * lagFrequency : TimeSpan +and [] DynamoLoadModeConfig = + | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int + +module SourceConfig = + module Cosmos = + open Propulsion.CosmosStore + let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter + (monitoredContainer, leasesContainer, checkpointConfig, tailSleepInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let parseFeedDoc = EquinoxSystemTextJsonParser.enumStreamEvents categoryFilter + let observer = CosmosStoreSource.CreateObserver(log, sink.StartIngester, Seq.collect parseFeedDoc) + let source = + match checkpointConfig with + | Ephemeral processorName -> + let withStartTime1sAgo (x : Microsoft.Azure.Cosmos.ChangeFeedProcessorBuilder) = + x.WithStartTime(let t = DateTime.UtcNow in t.AddSeconds -1.) + let lagFrequency = TimeSpan.FromMinutes 1. + CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, + startFromTail = true, customize = withStartTime1sAgo, tailSleepInterval = tailSleepInterval, + lagReportFreq = lagFrequency) + | Persistent (processorName, startFromTail, maxItems, lagFrequency) -> + CosmosStoreSource.Start(log, monitoredContainer, leasesContainer, processorName, observer, + startFromTail = startFromTail, ?maxItems = maxItems, tailSleepInterval = tailSleepInterval, + lagReportFreq = lagFrequency) + source, None + module Dynamo = + open Propulsion.DynamoStore + let start (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter + (indexStore, checkpoints, loadModeConfig, startFromTail, tailSleepInterval, batchSizeCutoff, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let loadMode = + match loadModeConfig with + | Hydrate (monitoredContext, hydrationConcurrency) -> LoadMode.Hydrated (categoryFilter, hydrationConcurrency, monitoredContext) + let source = + DynamoStoreSource( + log, statsInterval, + indexStore, batchSizeCutoff, tailSleepInterval, + checkpoints, sink, loadMode, + startFromTail = startFromTail, storeLog = storeLog) + source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) + module Esdb = + open Propulsion.EventStoreDb + let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter + (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let source = + EventStoreSource( + log, statsInterval, + client, batchSize, tailSleepInterval, + checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) + module Sss = + open Propulsion.SqlStreamStore + let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter + (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) : Propulsion.Pipeline * (TimeSpan -> Async) option = + let source = + SqlStreamStoreSource( + log, statsInterval, + client, batchSize, tailSleepInterval, + checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) + source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) + + let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Async) option = function + | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> + Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) + | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> + Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, tailSleepInterval, batchSizeCutoff, statsInterval) + | SourceConfig.Esdb (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Esdb.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) + | SourceConfig.Sss (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> + Sss.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) diff --git a/propulsion-reactor/Todo.fs b/propulsion-reactor/Todo.fs index 5ed40156a..b54f901b7 100644 --- a/propulsion-reactor/Todo.fs +++ b/propulsion-reactor/Todo.fs @@ -1,8 +1,10 @@ module ReactorTemplate.Todo +open Propulsion.Internal + let [] Category = "Todos" -let streamName (clientId: ClientId) = FsCodec.StreamName.create Category (ClientId.toString clientId) -let (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> Some clientId | _ -> None +let streamName (clientId : ClientId) = struct (Category, ClientId.toString clientId) +let [] (|StreamName|_|) = function FsCodec.StreamName.CategoryAndId (Category, ClientId.Parse clientId) -> ValueSome clientId | _ -> ValueNone // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -18,15 +20,16 @@ module Events = | Cleared of ClearedData | Snapshotted of SnapshotData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe module Reactions = + let [] Category = Category let (|Decode|) (stream, span : Propulsion.Streams.StreamSpan<_>) = - span.events |> Array.choose (EventCodec.tryDecode Events.codec stream) - let (|Parse|_|) = function - | (StreamName clientId, _) & Decode events -> Some (clientId, events) - | _ -> None + span |> Array.chooseV (EventCodec.tryDecode Events.codec stream) + let [] (|Parse|_|) = function + | (StreamName clientId, _) & Decode events -> ValueSome struct (clientId, events) + | _ -> ValueNone /// Allows us to skip producing summaries for events that we know won't result in an externally discernable change to the summary output let impliesStateChange = function Events.Snapshotted _ -> false | _ -> true @@ -63,14 +66,11 @@ type Service internal (resolve : ClientId -> Equinox.Decider - let cat = Config.Cosmos.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) - cat.Resolve -//#if multiSource - | Config.Store.Esdb (context, cache) -> - let cat = Config.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) - cat.Resolve -//#endif - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createSnapshotted Events.codec Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createSnapshotted Events.codecJe Fold.initial Fold.fold (Fold.isOrigin, Fold.toSnapshot) (context, cache) +#if !(sourceKafka && kafka) + | Config.Store.Esdb (context, cache) -> Config.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) + | Config.Store.Sss (context, cache) -> Config.Sss.create Events.codec Fold.initial Fold.fold (context, cache) +#endif + let create (Category cat) = Service(streamName >> Config.createDecider cat) diff --git a/propulsion-reactor/TodoSummary.fs b/propulsion-reactor/TodoSummary.fs index 8ca42bd4c..48f9baa0c 100644 --- a/propulsion-reactor/TodoSummary.fs +++ b/propulsion-reactor/TodoSummary.fs @@ -1,7 +1,7 @@ module ReactorTemplate.TodoSummary let [] Category = "TodoSummary" -let streamName (clientId: ClientId) = FsCodec.StreamName.create Category (ClientId.toString clientId) +let streamName (clientId : ClientId) = struct (Category, ClientId.toString clientId) // NB - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -12,7 +12,7 @@ module Events = type Event = | Ingested of IngestedData interface TypeShape.UnionContract.IUnionContract - let codec = Config.EventCodec.create() + let codec, codecJe = Config.EventCodec.gen, Config.EventCodec.genJe module Fold = @@ -51,14 +51,11 @@ type Service internal (resolve : ClientId -> Equinox.Decider - let cat = Config.Cosmos.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) - cat.Resolve -//#if multiSource - | Config.Store.Esdb (context, cache) -> - let cat = Config.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) - cat.Resolve -//#endif - let private resolveDecider store = streamName >> resolveStream store >> Config.createDecider - let create = resolveDecider >> Service + let private (|Category|) = function + | Config.Store.Cosmos (context, cache) -> Config.Cosmos.createRollingState Events.codecJe Fold.initial Fold.fold Fold.toSnapshot (context, cache) + | Config.Store.Dynamo (context, cache) -> Config.Dynamo.createRollingState Events.codec Fold.initial Fold.fold Fold.toSnapshot (context, cache) +#if !(sourceKafka && kafka) + | Config.Store.Esdb (context, cache) -> Config.Esdb.create Events.codec Fold.initial Fold.fold (context, cache) + | Config.Store.Sss (context, cache) -> Config.Sss.create Events.codec Fold.initial Fold.fold (context, cache) +#endif + let create (Category cat) = streamName >> Config.createDecider cat |> Service diff --git a/propulsion-summary-consumer/Program.fs b/propulsion-summary-consumer/Program.fs index 80d9b85b1..a852fb761 100644 --- a/propulsion-summary-consumer/Program.fs +++ b/propulsion-summary-consumer/Program.fs @@ -33,7 +33,7 @@ module Args = | [] Cosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" | Topic _ -> "specify Kafka Topic name. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" | Group _ -> "specify Kafka Consumer Group Id. (optional if environment variable PROPULSION_KAFKA_GROUP specified)" @@ -43,17 +43,17 @@ module Args = | MaxWriters _ -> "maximum number of items to process in parallel. Default: 8" | Verbose _ -> "request verbose logging." | Cosmos _ -> "specify CosmosDb input parameters" - and Arguments(c : Configuration, a : ParseResults) = - member val Cosmos = CosmosArguments(c, a.GetResult Cosmos) - member val Broker = a.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) - member val Group = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) - member val MaxInFlightBytes = a.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 - member val LagFrequency = a.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes + and Arguments(c : Configuration, p : ParseResults) = + member val Cosmos = CosmosArguments(c, p.GetResult Cosmos) + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member val Group = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) + member val MaxInFlightBytes = p.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 + member val LagFrequency = p.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes - member val MaxConcurrentStreams = a.GetResult(MaxWriters, 8) + member val MaxConcurrentStreams = p.GetResult(MaxWriters, 8) - member val Verbose = a.Contains Verbose + member val Verbose = p.Contains Verbose member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. and [] CosmosParameters = @@ -65,7 +65,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" @@ -73,15 +73,15 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 5." | Retries _ -> "specify operation retries. Default: 1." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds. Default: 5." - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 1) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 1) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.GetResult Container + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.GetResult Container member _.Connect() = connector.ConnectStore("Main", database, container) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args @@ -96,7 +96,7 @@ let start (args : Args.Arguments) = let service = let store = let context = args.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - let cache = Equinox.Cache (AppName, sizeMb = 10) + let cache = Equinox.Cache(AppName, sizeMb = 10) Config.Store.Cosmos (context, cache) TodoSummary.Config.create store let config = diff --git a/propulsion-summary-consumer/SummaryConsumer.fsproj b/propulsion-summary-consumer/SummaryConsumer.fsproj index cf01e061d..f2e9ca200 100644 --- a/propulsion-summary-consumer/SummaryConsumer.fsproj +++ b/propulsion-summary-consumer/SummaryConsumer.fsproj @@ -17,10 +17,9 @@ - + - - + diff --git a/propulsion-sync/Infrastructure.fs b/propulsion-sync/Infrastructure.fs index ee5f45bef..04450f551 100644 --- a/propulsion-sync/Infrastructure.fs +++ b/propulsion-sync/Infrastructure.fs @@ -55,12 +55,11 @@ module CosmosStoreContext = type Logging() = [] - static member Configure(configuration : LoggerConfiguration, verbose, storeVerbose, ?maybeSeqEndpoint) = + static member Configure(configuration : LoggerConfiguration, verbose, verboseStore, ?maybeSeqEndpoint) = configuration - .Destructure.FSharpTypes() .Enrich.FromLogContext() |> fun c -> if verbose then c.MinimumLevel.Debug() else c - |> fun c -> let ingesterLevel = if storeVerbose then LogEventLevel.Debug else LogEventLevel.Information + |> fun c -> let ingesterLevel = if verboseStore then LogEventLevel.Debug else LogEventLevel.Information c.MinimumLevel.Override(typeof.FullName, ingesterLevel) |> fun c -> let generalLevel = if verbose then LogEventLevel.Information else LogEventLevel.Warning c.MinimumLevel.Override(typeof.FullName, generalLevel) @@ -74,7 +73,7 @@ type Logging() = let isWriterA = Filters.Matching.FromSource().Invoke let isWriterB = Filters.Matching.FromSource().Invoke let l = - if storeVerbose then l + if verboseStore then l else l.Filter.ByExcluding(fun x -> Log.isStoreMetrics x || isWriterA x || isWriterB x) l.WriteTo.Console(theme=Sinks.SystemConsole.Themes.AnsiConsoleTheme.Code, outputTemplate=t) |> ignore) |> ignore diff --git a/propulsion-sync/Program.fs b/propulsion-sync/Program.fs index 2b909c3a5..c0416103e 100644 --- a/propulsion-sync/Program.fs +++ b/propulsion-sync/Program.fs @@ -9,13 +9,11 @@ open Serilog open System exception MissingArg of message : string with override this.Message = this.message +let missingArg msg = raise (MissingArg msg) type Configuration(tryGet) = - let get key = - match tryGet key with - | Some value -> value - | None -> raise (MissingArg (sprintf "Missing Argument/Environment Variable %s" key)) + let get key = match tryGet key with Some value -> value | None -> missingArg $"Missing Argument/Environment Variable %s{key}" let isTrue varName = tryGet varName |> Option.exists (fun s -> String.Equals(s, bool.TrueString, StringComparison.OrdinalIgnoreCase)) member _.CosmosConnection = get "EQUINOX_COSMOS_CONNECTION" @@ -37,7 +35,7 @@ module Args = [] type Parameters = | [] Verbose - | [] StoreVerbose + | [] VerboseStore | [] LocalSeq | [] ProcessorName of string | [] MaxReadAhead of int @@ -49,9 +47,9 @@ module Args = | [] SrcEs of ParseResults | [] SrcCosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "request Verbose Logging. Default: off" - | StoreVerbose -> "request Verbose Ingester Logging. Default: off" + | VerboseStore -> "request Verbose Ingester Logging. Default: off" | LocalSeq -> "configures writing to a local Seq endpoint at http://localhost:5341, see https://getseq.net" | ProcessorName _ -> "Projector consumer group name." | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 16." @@ -63,21 +61,21 @@ module Args = | SrcCosmos _ -> "Cosmos input parameters." | SrcEs _ -> "EventStore input parameters." - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Parameters.Verbose - member val StoreVerbose = a.Contains StoreVerbose - member val MaybeSeqEndpoint = if a.Contains LocalSeq then Some "http://localhost:5341" else None - member val ProcessorName = a.GetResult ProcessorName - member val MaxReadAhead = a.GetResult(MaxReadAhead, 2048) - member val MaxWriters = a.GetResult(MaxWriters, 512) - member val MaxConnections = a.GetResult(MaxConnections, 1) - member val MaxSubmit = a.GetResult(MaxSubmit, 8) + and Arguments(c : Configuration, p : ParseResults) = + member val Verbose = p.Contains Parameters.Verbose + member val VerboseStore = p.Contains VerboseStore + member val MaybeSeqEndpoint = if p.Contains LocalSeq then Some "http://localhost:5341" else None + member val ProcessorName = p.GetResult ProcessorName + member val MaxReadAhead = p.GetResult(MaxReadAhead, 2048) + member val MaxWriters = p.GetResult(MaxWriters, 512) + member val MaxConnections = p.GetResult(MaxConnections, 1) + member val MaxSubmit = p.GetResult(MaxSubmit, 8) member val Source : Choice = - match a.TryGetSubCommand() with - | Some (SrcCosmos cosmos) -> Choice1Of2 (CosmosSourceArguments (c, cosmos)) - | Some (SrcEs es) -> Choice2Of2 (EsSourceArguments (c, es)) - | _ -> raise (MissingArg "Must specify one of cosmos or es for Src") + match p.GetSubCommand() with + | SrcCosmos cosmos -> Choice1Of2 (CosmosSourceArguments(c, cosmos)) + | SrcEs es -> Choice2Of2 (EsSourceArguments(c, es)) + | _ -> missingArg "Must specify one of cosmos or es for Src" member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. @@ -87,7 +85,7 @@ module Args = || streamName.StartsWith "InventoryCount-" // No Longer used || streamName.StartsWith "InventoryLog" // 5GB, causes lopsided partitions, unused let excludeLong = defaultArg excludeLong true - match a.GetResults CategoryBlacklist, a.GetResults CategoryWhitelist with + match p.GetResults CategoryBlacklist, p.GetResults CategoryWhitelist with | [], [] when longOnly = Some true -> Log.Information("Only including long streams") isLong @@ -105,7 +103,7 @@ module Args = fun x -> not (black.Contains x) && (not << isCheckpoint) x && (not excludeLong || (not << isLong) x) | bad, [] -> let black = Set.ofList bad in Log.Warning("Excluding categories: {cats}", black); fun x -> not (black.Contains x) | [], good -> let white = Set.ofList good in Log.Warning("Only copying categories: {cats}", white); fun x -> white.Contains x - | _, _ -> raise (MissingArg "BlackList and Whitelist are mutually exclusive; inclusions and exclusions cannot be mixed") + | _, _ -> missingArg "BlackList and Whitelist are mutually exclusive; inclusions and exclusions cannot be mixed" member x.Sink : Choice = match x.Source with @@ -120,7 +118,7 @@ module Args = match srcC.LeaseContainerId, dstC.LeaseContainerId with | _, None -> srcC.ConnectLeases() | None, Some dc -> dstC.ConnectLeases dc - | Some _, Some _ -> raise (MissingArg "LeaseContainerSource and LeaseContainerDestination are mutually exclusive - can only store in one database") + | Some _, Some _ -> missingArg "LeaseContainerSource and LeaseContainerDestination are mutually exclusive - can only store in one database" | Choice2Of2 _dstE -> srcC.ConnectLeases() Log.Information("Syncing... {dop} writers, max {maxReadAhead} batches read ahead", x.MaxWriters, x.MaxReadAhead) Log.Information("ChangeFeed {processorName} Leases Database {db} Container {container}. MaxItems limited to {maxItems}", @@ -142,7 +140,7 @@ module Args = batchSize = srcE.StartingBatchSize; minBatchSize = srcE.MinBatchSize; gorge = srcE.Gorge; streamReaders = srcE.StreamReaders }) and [] CosmosSourceParameters = | [] FromTail - | [] MaxItems of int + | [] MaxItems of int | [] LagFreqM of float | [] LeaseContainer of string @@ -157,7 +155,7 @@ module Args = | [] DstEs of ParseResults | [] DstCosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | FromTail -> "(iff the Consumer Name is fresh) - force skip to present Position. Default: Never skip an event." | MaxItems _ -> "maximum item count to request from feed. Default: unlimited" | LagFreqM _ -> "frequency (in minutes) to dump lag stats. Default: 1" @@ -173,30 +171,30 @@ module Args = | DstEs _ -> "EventStore Sink parameters." | DstCosmos _ -> "CosmosDb Sink parameters." - and CosmosSourceArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult CosmosSourceParameters.ConnectionMode - let timeout = a.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSourceParameters.Retries, 1) - let maxRetryWaitTime = a.GetResult(CosmosSourceParameters.RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosSourceArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult CosmosSourceParameters.Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult CosmosSourceParameters.ConnectionMode + let timeout = p.GetResult(CosmosSourceParameters.Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(CosmosSourceParameters.Retries, 1) + let maxRetryWaitTime = p.GetResult(CosmosSourceParameters.RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - member val ContainerId : string = a.GetResult CosmosSourceParameters.Container + let database = p.TryGetResult CosmosSourceParameters.Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + member val ContainerId : string = p.GetResult CosmosSourceParameters.Container member x.MonitoredContainer() = connector.ConnectMonitored(database, x.ContainerId) - member val FromTail = a.Contains CosmosSourceParameters.FromTail - member val MaxItems = a.TryGetResult MaxItems - member val LagFrequency : TimeSpan = a.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes - member val LeaseContainerId = a.TryGetResult CosmosSourceParameters.LeaseContainer + member val FromTail = p.Contains CosmosSourceParameters.FromTail + member val MaxItems = p.TryGetResult MaxItems + member val LagFrequency : TimeSpan = p.GetResult(LagFreqM, 1.) |> TimeSpan.FromMinutes + member val LeaseContainerId = p.TryGetResult CosmosSourceParameters.LeaseContainer member private _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) member x.ConnectLeases() = match x.LeaseContainerId with | None -> x.ConnectLeases(x.ContainerId + "-aux") | Some sc -> x.ConnectLeases(sc) member val Sink = - match a.TryGetSubCommand() with - | Some (DstCosmos cosmos) -> Choice1Of2 (CosmosSinkArguments (c, cosmos)) - | Some (DstEs es) -> Choice2Of2 (EsSinkArguments (c, es)) - | _ -> raise (MissingArg "Must specify one of cosmos or es for Sink") + match p.GetSubCommand() with + | DstCosmos cosmos -> Choice1Of2 (CosmosSinkArguments(c, cosmos)) + | DstEs es -> Choice2Of2 (EsSinkArguments(c, es)) + | _ -> missingArg "Must specify one of cosmos or es for Sink" and [] EsSourceParameters = | [] FromTail | [] Gorge of int @@ -222,7 +220,7 @@ module Args = | [] Es of ParseResults | [] Cosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | FromTail -> "Start the processing from the Tail" | Gorge _ -> "Request Parallel readers phase during initial catchup, running one chunk (256MB) apart. Default: off" | StreamReaders _ -> "number of concurrent readers that will fetch a missing stream when in tailing mode. Default: 1. TODO: IMPLEMENT!" @@ -246,15 +244,15 @@ module Args = | Cosmos _ -> "CosmosDb Sink parameters." | Es _ -> "EventStore Sink parameters." - and EsSourceArguments(c : Configuration, a : ParseResults) = - member val Gorge = a.TryGetResult Gorge - member val StreamReaders = a.GetResult(StreamReaders, 1) - member val TailInterval = a.GetResult(Tail, 1.) |> TimeSpan.FromSeconds - member val ForceRestart = a.Contains ForceRestart - member val StartingBatchSize = a.GetResult(BatchSize, 4096) - member val MinBatchSize = a.GetResult(MinBatchSize, 512) + and EsSourceArguments(c : Configuration, p : ParseResults) = + member val Gorge = p.TryGetResult Gorge + member val StreamReaders = p.GetResult(StreamReaders, 1) + member val TailInterval = p.GetResult(Tail, 1.) |> TimeSpan.FromSeconds + member val ForceRestart = p.Contains ForceRestart + member val StartingBatchSize = p.GetResult(BatchSize, 4096) + member val MinBatchSize = p.GetResult(MinBatchSize, 512) member val StartPos = - match a.TryGetResult Position, a.TryGetResult Chunk, a.TryGetResult Percent, a.Contains FromTail with + match p.TryGetResult Position, p.TryGetResult Chunk, p.TryGetResult Percent, p.Contains FromTail with | Some p, _, _, _ -> Absolute p | _, Some c, _, _ -> StartPos.Chunk c | _, _, Some p, _ -> Percentage p @@ -267,14 +265,14 @@ module Args = | false, Some p -> Discovery.GossipDnsCustomPort (x.Host, p) | true, None -> Discovery.Uri (UriBuilder("tcp", x.Host, 1113).Uri) | true, Some p -> Discovery.Uri (UriBuilder("tcp", x.Host, p).Uri) - member val Tcp = a.Contains EsSourceParameters.Tcp || c.EventStoreTcp - member val Port = match a.TryGetResult EsSourceParameters.Port with Some x -> Some x | None -> c.EventStorePort - member val Host = a.TryGetResult EsSourceParameters.Host |> Option.defaultWith (fun () -> c.EventStoreHost) - member val User = a.TryGetResult EsSourceParameters.Username |> Option.defaultWith (fun () -> c.EventStoreUsername) - member val Password = a.TryGetResult EsSourceParameters.Password |> Option.defaultWith (fun () -> c.EventStorePassword) - member val Retries = a.GetResult(EsSourceParameters.Retries, 3) - member val Timeout = a.GetResult(EsSourceParameters.Timeout, 20.) |> TimeSpan.FromSeconds - member val Heartbeat = a.GetResult(EsSourceParameters.HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds + member val Tcp = p.Contains EsSourceParameters.Tcp || c.EventStoreTcp + member val Port = match p.TryGetResult EsSourceParameters.Port with Some x -> Some x | None -> c.EventStorePort + member val Host = p.TryGetResult EsSourceParameters.Host |> Option.defaultWith (fun () -> c.EventStoreHost) + member val User = p.TryGetResult EsSourceParameters.Username |> Option.defaultWith (fun () -> c.EventStoreUsername) + member val Password = p.TryGetResult EsSourceParameters.Password |> Option.defaultWith (fun () -> c.EventStorePassword) + member val Retries = p.GetResult(EsSourceParameters.Retries, 3) + member val Timeout = p.GetResult(EsSourceParameters.Timeout, 20.) |> TimeSpan.FromSeconds + member val Heartbeat = p.GetResult(EsSourceParameters.HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds member x.Connect(log: ILogger, storeLog: ILogger, appName, connectionStrategy) = let discovery = x.Discovery let s (x : TimeSpan) = x.TotalSeconds @@ -283,14 +281,14 @@ module Args = discovery, s x.Heartbeat, s x.Timeout, x.Retries) let log=if storeLog.IsEnabled Serilog.Events.LogEventLevel.Debug then Logger.SerilogVerbose storeLog else Logger.SerilogNormal storeLog let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] - Connector(x.User, x.Password, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) + EventStoreConnector(x.User, x.Password, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) .Establish(appName, discovery, connectionStrategy) member _.CheckpointInterval = TimeSpan.FromHours 1. member val Sink = - match a.TryGetSubCommand() with - | Some (Cosmos cosmos) -> CosmosSinkArguments (c, cosmos) - | _ -> raise (MissingArg "Must specify cosmos for Sink if source is `es`") + match p.GetSubCommand() with + | Cosmos cosmos -> CosmosSinkArguments(c, cosmos) + | _ -> missingArg "Must specify cosmos for Sink if source is `es`" and [] CosmosSinkParameters = | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Connection of string @@ -304,7 +302,7 @@ module Args = | [] Kafka of ParseResults #endif interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for Cosmos account. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" @@ -316,23 +314,23 @@ module Args = #if kafka | Kafka _ -> "specify Kafka target for non-Synced categories. Default: None." #endif - and CosmosSinkArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.GetResult(ConnectionMode, Microsoft.Azure.Cosmos.ConnectionMode.Direct) - let timeout = a.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(CosmosSinkParameters.Retries, 0) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosSinkArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.GetResult(ConnectionMode, Microsoft.Azure.Cosmos.ConnectionMode.Direct) + let timeout = p.GetResult(CosmosSinkParameters.Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(CosmosSinkParameters.Retries, 0) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.TryGetResult Container |> Option.defaultWith (fun () -> c.CosmosContainer) member _.Connect() = connector.ConnectStore("Destination", database, container) - member val LeaseContainerId = a.TryGetResult LeaseContainer + member val LeaseContainerId = p.TryGetResult LeaseContainer member _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) #if kafka member val KafkaSink = - match a.TryGetSubCommand() with - | Some (Kafka kafka) -> Some (KafkaSinkArguments (c, kafka)) + match a.GetSubCommand() with + | Kafka kafka -> Some (KafkaSinkArguments(c, kafka)) | _ -> None #endif and [] EsSinkParameters = @@ -346,7 +344,7 @@ module Args = | [] Retries of int | [] HeartbeatTimeout of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose -> "Include low level Store logging." | Tcp -> "Request connecting direct to a TCP/IP endpoint. Default: Use Clustered mode with Gossip-driven discovery (unless environment variable EQUINOX_ES_TCP specifies 'true')." | Host _ -> "TCP mode: specify a hostname to connect to directly. Clustered mode: use Gossip protocol against all A records returned from DNS query. (optional if environment variable EQUINOX_ES_HOST specified)" @@ -356,21 +354,21 @@ module Args = | Timeout _ -> "specify operation timeout in seconds. Default: 20." | Retries _ -> "specify operation retries. Default: 3." | HeartbeatTimeout _ -> "specify heartbeat timeout in seconds. Default: 1.5." - and EsSinkArguments(c : Configuration, a : ParseResults) = + and EsSinkArguments(c : Configuration, p : ParseResults) = member x.Discovery = match x.Tcp, x.Port with | false, None -> Discovery.GossipDns x.Host | false, Some p -> Discovery.GossipDnsCustomPort (x.Host, p) | true, None -> Discovery.Uri (UriBuilder("tcp", x.Host, 1113).Uri) | true, Some p -> Discovery.Uri (UriBuilder("tcp", x.Host, p).Uri) - member val Tcp = a.Contains EsSinkParameters.Tcp || c.EventStoreTcp - member val Port = match a.TryGetResult Port with Some x -> Some x | None -> c.EventStorePort - member val Host = a.TryGetResult Host |> Option.defaultWith (fun () -> c.EventStoreHost) - member val User = a.TryGetResult Username |> Option.defaultWith (fun () -> c.EventStoreUsername) - member val Password = a.TryGetResult Password |> Option.defaultWith (fun () -> c.EventStorePassword) - member val Retries = a.GetResult(Retries, 3) - member val Timeout = a.GetResult(Timeout, 20.) |> TimeSpan.FromSeconds - member val Heartbeat = a.GetResult(HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds + member val Tcp = p.Contains EsSinkParameters.Tcp || c.EventStoreTcp + member val Port = match p.TryGetResult Port with Some x -> Some x | None -> c.EventStorePort + member val Host = p.TryGetResult Host |> Option.defaultWith (fun () -> c.EventStoreHost) + member val User = p.TryGetResult Username |> Option.defaultWith (fun () -> c.EventStoreUsername) + member val Password = p.TryGetResult Password |> Option.defaultWith (fun () -> c.EventStorePassword) + member val Retries = p.GetResult(Retries, 3) + member val Timeout = p.GetResult(Timeout, 20.) |> TimeSpan.FromSeconds + member val Heartbeat = p.GetResult(HeartbeatTimeout, 1.5) |> TimeSpan.FromSeconds member x.Connect(log: ILogger, storeLog: ILogger, connectionStrategy, appName, connIndex) = let discovery = x.Discovery let s (x : TimeSpan) = x.TotalSeconds @@ -379,7 +377,7 @@ module Args = discovery, s x.Heartbeat, s x.Timeout, x.Retries) let log=if storeLog.IsEnabled Serilog.Events.LogEventLevel.Debug then Logger.SerilogVerbose storeLog else Logger.SerilogNormal storeLog let tags=["M", Environment.MachineName; "I", Guid.NewGuid() |> string] - Connector(x.User, x.Password, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) + EventStoreConnector(x.User, x.Password, x.Timeout, x.Retries, log=log, heartbeatTimeout=x.Heartbeat, tags=tags) .Establish(appName, discovery, connectionStrategy) #if kafka and [] KafkaSinkParameters = @@ -387,14 +385,14 @@ module Args = | [] Topic of string | [] Producers of int interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)." | Producers _ -> "specify number of Kafka Producer instances to use. Default: 1." - and KafkaSinkArguments(c : Configuration, a : ParseResults) = - member val Broker = a.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) - member val Producers = a.GetResult(Producers, 1) + and KafkaSinkArguments(c : Configuration, p : ParseResults) = + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member val Producers = p.GetResult(Producers, 1) member x.BuildTargetParams() = x.Broker, x.Topic, x.Producers #endif @@ -404,7 +402,7 @@ module Args = let parser = ArgumentParser.Create(programName=programName) Arguments(Configuration tryGetConfigValue, parser.ParseCommandLine argv) - //#if marveleqx +//#if marveleqx [] module EventV0Parser = open Newtonsoft.Json @@ -430,6 +428,7 @@ module EventV0Parser = interface FsCodec.ITimelineEvent with member x.Index = x.i member x.IsUnfold = false + member x.Size = 0 member x.EventType = x.t member x.Data = x.d member _.Meta = null @@ -444,21 +443,21 @@ module EventV0Parser = document.ToObject<'T>() /// We assume all Documents represent Events laid out as above - let parse (d : Newtonsoft.Json.Linq.JObject) : Propulsion.Streams.StreamEvent<_> = + let parse (d : Newtonsoft.Json.Linq.JObject) : Propulsion.Streams.Default.StreamEvent = let e = d.Cast() - { stream = FsCodec.StreamName.parse e.s; event = e } : _ + FsCodec.StreamName.parse e.s, e |> FsCodec.Core.TimelineEvent.Map ReadOnlyMemory let transformV0 catFilter v0SchemaDocument : Propulsion.Streams.StreamEvent<_> seq = seq { let parsed = EventV0Parser.parse v0SchemaDocument - let (FsCodec.StreamName.CategoryAndId (cat, _)) = parsed.stream + let struct (FsCodec.StreamName.Category cat, _) = parsed if catFilter cat then yield parsed } //#else -let transformOrFilter catFilter changeFeedDocument : Propulsion.Streams.StreamEvent<_> seq = seq { - for { stream = FsCodec.StreamName.CategoryAndId (cat, _) } as e in Propulsion.CosmosStore.EquinoxNewtonsoftParser.enumStreamEvents changeFeedDocument do +let transformOrFilter catFilter changeFeedDocument : Propulsion.Streams.Default.StreamEvent seq = seq { + for FsCodec.StreamName.Category cat, e as x in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents catFilter changeFeedDocument do // NB the `index` needs to be contiguous with existing events - IOW filtering needs to be at stream (and not event) level if catFilter cat then - yield e } + yield x } //#endif let [] AppName = "SyncTemplate" @@ -470,12 +469,13 @@ module Checkpoints = open Equinox.CosmosStore - let codec = FsCodec.NewtonsoftJson.Codec.Create() - let access = AccessStrategy.Custom (Checkpoint.Fold.isOrigin, Checkpoint.Fold.transmute) + let codec = FsCodec.SystemTextJson.CodecJsonElement.Create() + let transmute' xs s = let x, y = Checkpoint.Fold.transmute (Array.toList xs) s in (List.toArray x, List.toArray y) + let access = AccessStrategy.Custom (Checkpoint.Fold.isOrigin, transmute') let create groupName (context, cache) = let caching = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) let cat = CosmosStoreCategory(context, codec, Checkpoint.Fold.fold, Checkpoint.Fold.initial, caching, access) - let resolve streamName = cat.Resolve(streamName, Equinox.AllowStale) + let resolve log () = Equinox.Decider.resolve log cat Checkpoint.CheckpointSeries(groupName, resolve) type Stats(log, statsInterval, stateInterval) = @@ -485,7 +485,7 @@ type Stats(log, statsInterval, stateInterval) = override _.HandleExn(log, exn) = log.Information(exn, "Unhandled") -open Propulsion.CosmosStore.Infrastructure // AwaitKeyboardInterruptAsTaskCancelledException +open Propulsion.Internal // AwaitKeyboardInterruptAsTaskCanceledException let build (args : Args.Arguments, log) = let maybeDstCosmos, sink, streamFilter = @@ -520,7 +520,7 @@ let build (args : Args.Arguments, log) = let connect connIndex = async { let lfc = Config.log.ForContext("ConnId", connIndex) let! c = es.Connect(log, lfc, ConnectionStrategy.ClusterSingle NodePreference.Master, AppName, connIndex) - return Context(c, BatchingPolicy(Int32.MaxValue)) } + return EventStoreContext(c, batchSize = Int32.MaxValue) } let targets = Array.init args.MaxConnections (string >> connect) |> Async.Parallel |> Async.RunSynchronously let sink = EventStoreSink.Start(log, Config.log, args.MaxReadAhead, targets, args.MaxWriters, args.StatsInterval, args.StateInterval, maxSubmissionsPerPartition=args.MaxSubmit) None, sink, args.CategoryFilterFunction() @@ -533,9 +533,9 @@ let build (args : Args.Arguments, log) = #endif let source = Propulsion.CosmosStore.CosmosStoreSource.Start( - Log.Logger, monitored, leases, processorName, observer, startFromTail, - ?maxItems=maxItems, lagReportFreq=lagFrequency) - [ Async.AwaitKeyboardInterruptAsTaskCancelledException(); source.AwaitWithStopOnCancellation(); sink.AwaitWithStopOnCancellation() ] + Log.Logger, monitored, leases, processorName, observer, startFromTail = startFromTail, + ?maxItems = maxItems, lagReportFreq = lagFrequency) + [ Async.AwaitKeyboardInterruptAsTaskCanceledException(); source.AwaitWithStopOnCancellation(); sink.AwaitWithStopOnCancellation() ] | Choice2Of2 (srcE, checkpointsContext, spec) -> match maybeDstCosmos with | None -> failwith "ES->ES checkpointing E_NOTIMPL" @@ -545,16 +545,17 @@ let build (args : Args.Arguments, log) = let checkpoints = Checkpoints.Cosmos.create spec.groupName (checkpointsContext, cache) let withNullData (e : FsCodec.ITimelineEvent<_>) : FsCodec.ITimelineEvent<_> = - FsCodec.Core.TimelineEvent.Create(e.Index, e.EventType, null, e.Meta, timestamp=e.Timestamp) :> _ + FsCodec.Core.TimelineEvent.Create(e.Index, e.EventType, ReadOnlyMemory.Empty, e.Meta, timestamp=e.Timestamp) :> _ let tryMapEvent streamFilter (x : EventStore.ClientAPI.ResolvedEvent) = match x.Event with | e when not e.IsJson || e.EventStreamId.StartsWith "$" || not (streamFilter e.EventStreamId) -> None | PropulsionStreamEvent e -> - if Propulsion.EventStore.Reader.payloadBytes x > 1_000_000 then - Log.Error("replacing {stream} event index {index} with `null` Data due to length of {len}MB", - e.stream, e.event.Index, Propulsion.EventStore.Reader.mb e.event.Data.Length) - Some { e with event = withNullData e.event } + let struct (stream, event) = e + if Reader.payloadBytes x > 1_000_000 then + Log.Error("replacing {stream} event index {index} with `null` Data due to length of {len}MiB", + stream, event.Index, Propulsion.Internal.Log.miB (let d = event.Data in d.Length)) + Some struct (stream, withNullData event) else Some e let connect () = let c = srcE.Connect(log, log, AppName, ConnectionStrategy.ClusterSingle NodePreference.PreferSlave) |> Async.RunSynchronously @@ -572,7 +573,7 @@ let run (args : Args.Arguments) = [] let main argv = try let args = Args.parse EnvVar.tryGet argv - try Log.Logger <- LoggerConfiguration().Configure(args.Verbose, args.StoreVerbose, ?maybeSeqEndpoint = args.MaybeSeqEndpoint).CreateLogger() + try Log.Logger <- LoggerConfiguration().Configure(args.Verbose, args.VerboseStore, ?maybeSeqEndpoint = args.MaybeSeqEndpoint).CreateLogger() try run args |> Async.RunSynchronously; 0 with e when not (e :? MissingArg) -> Log.Fatal(e, "Exiting"); 2 finally Log.CloseAndFlush() diff --git a/propulsion-sync/Sync.fsproj b/propulsion-sync/Sync.fsproj index 372fac038..6a3e6585a 100644 --- a/propulsion-sync/Sync.fsproj +++ b/propulsion-sync/Sync.fsproj @@ -2,7 +2,7 @@ Exe - netcoreapp3.1 + net6.0 5 @@ -14,14 +14,13 @@ - - - + + - + - + diff --git a/propulsion-tracking-consumer/Program.fs b/propulsion-tracking-consumer/Program.fs index 58d03ad41..5ff792def 100644 --- a/propulsion-tracking-consumer/Program.fs +++ b/propulsion-tracking-consumer/Program.fs @@ -32,7 +32,7 @@ module Args = | [] Cosmos of ParseResults interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | Verbose _ -> "request verbose logging." | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" | Topic _ -> "specify Kafka Topic name. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" @@ -41,17 +41,17 @@ module Args = | LagFreqM _ -> "specify frequency (minutes) to dump lag stats. Default: off" | MaxWriters _ -> "maximum number of items to process in parallel. Default: 8" | Cosmos _ -> "specify CosmosDb input parameters" - and Arguments(c : Configuration, a : ParseResults) = - member val Verbose = a.Contains Verbose - member val Broker = a.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) - member val Topic = a.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) - member val Group = a.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) - member val MaxInFlightBytes = a.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 - member val LagFrequency = a.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes - member val MaxConcurrentStreams = a.GetResult(MaxWriters, 8) + and Arguments(c : Configuration, p : ParseResults) = + member val Verbose = p.Contains Verbose + member val Broker = p.TryGetResult Broker |> Option.defaultWith (fun () -> c.Broker) + member val Topic = p.TryGetResult Topic |> Option.defaultWith (fun () -> c.Topic) + member val Group = p.TryGetResult Group |> Option.defaultWith (fun () -> c.Group) + member val MaxInFlightBytes = p.GetResult(MaxInflightMb, 10.) * 1024. * 1024. |> int64 + member val LagFrequency = p.TryGetResult LagFreqM |> Option.map TimeSpan.FromMinutes + member val MaxConcurrentStreams = p.GetResult(MaxWriters, 8) member val StatsInterval = TimeSpan.FromMinutes 1. member val StateInterval = TimeSpan.FromMinutes 5. - member val Cosmos = CosmosArguments(c, a.GetResult Cosmos) + member val Cosmos = CosmosArguments(c, p.GetResult Cosmos) and [] CosmosParameters = | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode | [] Connection of string @@ -61,7 +61,7 @@ module Args = | [] Retries of int | [] RetriesWaitTime of float interface IArgParserTemplate with - member a.Usage = a |> function + member p.Usage = p |> function | ConnectionMode _ -> "override the connection mode. Default: Direct." | Connection _ -> "specify a connection string for a Cosmos account. (optional if environment variable EQUINOX_COSMOS_CONNECTION specified)" | Database _ -> "specify a database name for Cosmos store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" @@ -69,15 +69,15 @@ module Args = | Timeout _ -> "specify operation timeout in seconds (default: 5)." | Retries _ -> "specify operation retries (default: 1)." | RetriesWaitTime _ -> "specify max wait-time for retry when being throttled by Cosmos in seconds (default: 5)" - and CosmosArguments(c : Configuration, a : ParseResults) = - let discovery = a.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString - let mode = a.TryGetResult ConnectionMode - let timeout = a.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds - let retries = a.GetResult(Retries, 1) - let maxRetryWaitTime = a.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds + and CosmosArguments(c : Configuration, p : ParseResults) = + let discovery = p.TryGetResult Connection |> Option.defaultWith (fun () -> c.CosmosConnection) |> Equinox.CosmosStore.Discovery.ConnectionString + let mode = p.TryGetResult ConnectionMode + let timeout = p.GetResult(Timeout, 5.) |> TimeSpan.FromSeconds + let retries = p.GetResult(Retries, 1) + let maxRetryWaitTime = p.GetResult(RetriesWaitTime, 5.) |> TimeSpan.FromSeconds let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, ?mode = mode) - let database = a.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) - let container = a.GetResult Container + let database = p.TryGetResult Database |> Option.defaultWith (fun () -> c.CosmosDatabase) + let container = p.GetResult Container member _.Connect() = connector.ConnectStore("Main", database, container) /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args @@ -92,7 +92,7 @@ let start (args : Args.Arguments) = let service = let store = let context = args.Cosmos.Connect() |> Async.RunSynchronously |> CosmosStoreContext.create - let cache = Equinox.Cache (AppName, sizeMb = 10) + let cache = Equinox.Cache(AppName, sizeMb = 10) Config.Store.Cosmos (context, cache) SkuSummary.Config.create store let config = diff --git a/propulsion-tracking-consumer/TrackingConsumer.fsproj b/propulsion-tracking-consumer/TrackingConsumer.fsproj index f3af02cf1..9d3bf914c 100644 --- a/propulsion-tracking-consumer/TrackingConsumer.fsproj +++ b/propulsion-tracking-consumer/TrackingConsumer.fsproj @@ -17,10 +17,9 @@ - + - - + diff --git a/tests/Equinox.Templates.Tests/DotnetBuild.fs b/tests/Equinox.Templates.Tests/DotnetBuild.fs index 836f69b75..777b46cef 100644 --- a/tests/Equinox.Templates.Tests/DotnetBuild.fs +++ b/tests/Equinox.Templates.Tests/DotnetBuild.fs @@ -21,7 +21,7 @@ type ProProjector() as this = type ProReactor() as this = inherit TheoryData() - do for source in ["multiSource"; (* <-default *) "kafkaEventSpans"; "changeFeedOnly"] do + do for source in ["multiSource"; (* <-default *) "kafkaEventSpans"] do for opts in [ []; ["--blank"]; ["--kafka"]; ["--kafka"; "--blank"] ] do this.Add(["--source " + source] @ opts) @@ -44,7 +44,7 @@ type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = Dotnet.build [folder] #if DEBUG // Use this one to trigger an individual test - let [] ``*pending*`` () = run "proProjector" ["--source cosmos"; "--kafka"; "--synthesizeSequence"] + let [] ``*pending*`` () = run "proProjector" ["--source cosmos"; "--kafka"] #endif let [] eqxPatterns () = run "eqxPatterns" [] @@ -54,7 +54,6 @@ type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = let [] feedConsumer () = run "feedConsumer" [] [)>] let [] proProjector args = run "proProjector" args - let [] proProjectorSynth () = run "proProjector" ["--source cosmos"; "--kafka"; "--synthesizeSequence"] let [] proConsumer () = run "proConsumer" [] let [] trackingConsumer () = run "trackingConsumer" [] let [] summaryConsumer () = run "summaryConsumer" [] @@ -74,7 +73,6 @@ type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = [)>] let [] proReactor args = run "proReactor" args let [] proReactorDefault () = run "proReactor" [] - let [] proReactorFilter () = run "proReactor" ["--filter"] let [] proCosmosReactor () = run "proCosmosReactor" [] From 41d43b83608b5b199b91609d4df7bea06f423c10 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 01:04:06 +0100 Subject: [PATCH 24/43] f --- propulsion-reactor/Reactor.fsproj | 2 ++ 1 file changed, 2 insertions(+) diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index 3e68c22d5..17768949b 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -27,7 +27,9 @@ + + From 1cba2f792c47289edb959180fcc3f6b2feb04dbf Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 01:13:28 +0100 Subject: [PATCH 25/43] f --- propulsion-reactor/Program.fs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index 4dbda990e..b67e3aa6c 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -1,7 +1,9 @@ module ReactorTemplate.Program +#if !sourceKafka open Equinox.EventStoreDb open Equinox.SqlStreamStore +#endif open Infrastructure open Serilog open System From 498a9c802358a41d6cf385a8fb18881e5fc32f63 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 01:18:38 +0100 Subject: [PATCH 26/43] f --- propulsion-reactor/Reactor.fsproj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index 17768949b..63073371b 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -35,16 +35,16 @@ - + - + From d9347a5184f6f5d6b41ce974e141651f5b728cb9 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 01:28:29 +0100 Subject: [PATCH 27/43] f --- propulsion-reactor/Reactor.fsproj | 1 + 1 file changed, 1 insertion(+) diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index 63073371b..26c5fd7b0 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -39,6 +39,7 @@ + From 5cd7155134aaa9ad0b0ccc6b53a53e20f8486abe Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 01:51:05 +0100 Subject: [PATCH 28/43] f --- propulsion-reactor/Handler.fs | 2 +- propulsion-reactor/Ingester.fs | 20 ++++++++++++++++++++ propulsion-reactor/Program.fs | 23 +++++++++++++---------- propulsion-reactor/Reactor.fsproj | 15 +++++++-------- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/propulsion-reactor/Handler.fs b/propulsion-reactor/Handler.fs index c041287a6..d8aeebaec 100644 --- a/propulsion-reactor/Handler.fs +++ b/propulsion-reactor/Handler.fs @@ -87,6 +87,6 @@ type Config private () = maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) - + static member StartSource(log, sink, sourceConfig) = SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig diff --git a/propulsion-reactor/Ingester.fs b/propulsion-reactor/Ingester.fs index 70fb927f1..c08b87b2a 100644 --- a/propulsion-reactor/Ingester.fs +++ b/propulsion-reactor/Ingester.fs @@ -31,6 +31,10 @@ type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = logExternalStats |> Option.iter (fun dumpTo -> dumpTo log) #if blank +let categoryFilter = function + | sn when sn = "Todos" -> true + | _ -> false + let handle struct (stream, span : Propulsion.Streams.StreamSpan<_>) = async { match stream, span with | FsCodec.StreamName.CategoryAndId ("Todos", id), _ -> @@ -47,6 +51,10 @@ let toSummaryEventData ( x : Contract.SummaryInfo) : TodoSummary.Events.SummaryD [| for x in x.items -> { id = x.id; order = x.order; title = x.title; completed = x.completed } |] } +let categoryFilter = function + | Todo.Reactions.Category -> true + | _ -> false + let handle (sourceService : Todo.Service) (summaryService : TodoSummary.Service) @@ -59,3 +67,15 @@ let handle | false -> return Propulsion.Streams.SpanResult.OverrideWritePosition version', Outcome.Skipped span.Length | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } #endif + +type Config private () = + + static member StartSink(log : Serilog.ILogger, stats, + handle : struct (FsCodec.StreamName * Propulsion.Streams.Default.StreamSpan) + -> Async, + maxReadAhead : int, maxConcurrentStreams : int, ?wakeForResults, ?idleDelay, ?purgeInterval) = + Propulsion.Streams.Default.Config.Start(log, maxReadAhead, maxConcurrentStreams, handle, stats, stats.StatsInterval.Period, + ?wakeForResults = wakeForResults, ?idleDelay = idleDelay, ?purgeInterval = purgeInterval) + + static member StartSource(log, sink, sourceConfig) = + SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index b67e3aa6c..f5df54001 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -229,26 +229,29 @@ let build (args : Args.Arguments) = ( Log.Logger, consumerConfig, parseStreamEvents, handle, maxConcurrentStreams, stats = stats, statsInterval = args.StateInterval) [| pipeline.AwaitWithStopOnCancellation() -#else +#else // !sourceKafka let sink = -#if kafka // kafka -#if blank // kafka && blank +#if kafka // !sourceKafka && kafka +#if blank // !sourceKafka && kafka && blank Handler.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) -#else // kafka && !blank +#else // !sourceKafka && kafka && !blank Propulsion.Streams.Sync.StreamsSync.Start( Log.Logger, maxReadAhead, maxConcurrentStreams, handle, stats, args.StatsInterval, Propulsion.Streams.Default.jsonSize, Propulsion.Streams.Default.eventSize) -#endif // kafka && !blank -#else // !kafka (i.e., ingester) - Handler.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) -#endif // !kafka +#endif // !sourceKafka && kafka && !blank +#else // !sourceKafka && !kafka (i.e., ingester) + Ingester.Config.StartSink(log, stats, handle, maxReadAhead, maxConcurrentStreams, purgeInterval = args.PurgeInterval) +#endif // !sourceKafka && !kafka let source, _awaitReactions = let sourceConfig = buildSourceConfig log consumerGroupName +#if kafka Handler.Config.StartSource(log, sink, sourceConfig) - +#else + Ingester.Config.StartSource(log, sink, sourceConfig) +#endif [| source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() -#endif +#endif // !sourceKafka Async.AwaitKeyboardInterruptAsTaskCanceledException() |] [] diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index 26c5fd7b0..ba8e0f16f 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -21,12 +21,12 @@ - - - + + + @@ -35,16 +35,15 @@ - - - - - + + + + From 4c07d4c3b2703fca1d84559f8974f641baebbca9 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 02:04:34 +0100 Subject: [PATCH 29/43] f sync a marveleqx --- propulsion-sync/.template.config/template.json | 5 +++-- propulsion-sync/Program.fs | 2 +- propulsion-sync/Sync.fsproj | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/propulsion-sync/.template.config/template.json b/propulsion-sync/.template.config/template.json index e48130f47..4ee6a3897 100644 --- a/propulsion-sync/.template.config/template.json +++ b/propulsion-sync/.template.config/template.json @@ -11,7 +11,8 @@ "Kafka" ], "tags": { - "language": "F#" + "language": "F#", + "type": "project" }, "identity": "Propulsion.Template.Sync", "name": "Propulsion Sync tool", @@ -35,4 +36,4 @@ "description": "Include example code to transform from the Equinox V0 (internal) schema to that of `Equinox.Cosmos`" } } -} \ No newline at end of file +} diff --git a/propulsion-sync/Program.fs b/propulsion-sync/Program.fs index c0416103e..ba1656acf 100644 --- a/propulsion-sync/Program.fs +++ b/propulsion-sync/Program.fs @@ -329,7 +329,7 @@ module Args = member _.ConnectLeases containerId = connector.CreateUninitialized(database, containerId) #if kafka member val KafkaSink = - match a.GetSubCommand() with + match p.GetSubCommand() with | Kafka kafka -> Some (KafkaSinkArguments(c, kafka)) | _ -> None #endif diff --git a/propulsion-sync/Sync.fsproj b/propulsion-sync/Sync.fsproj index 6a3e6585a..caf099ec7 100644 --- a/propulsion-sync/Sync.fsproj +++ b/propulsion-sync/Sync.fsproj @@ -16,7 +16,7 @@ - + From 2fd4d7d50ffc362dd45e482c8e1e3714412746bf Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 02:08:28 +0100 Subject: [PATCH 30/43] f reactor handler --- propulsion-reactor/Handler.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/propulsion-reactor/Handler.fs b/propulsion-reactor/Handler.fs index d8aeebaec..3cd1ebf60 100644 --- a/propulsion-reactor/Handler.fs +++ b/propulsion-reactor/Handler.fs @@ -77,7 +77,6 @@ let handle return Propulsion.Streams.SpanResult.AllProcessed, Outcome.Skipped events.Length | _ -> return Propulsion.Streams.SpanResult.AllProcessed, Outcome.NotApplicable span.Length } #endif -//#endif type Config private () = @@ -90,3 +89,4 @@ type Config private () = static member StartSource(log, sink, sourceConfig) = SourceConfig.start (log, Config.log) sink categoryFilter sourceConfig +//#endif From 2fa63b38c446ff8d86f5833fb3bd47c0895d8aee Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 02:21:22 +0100 Subject: [PATCH 31/43] f --- propulsion-reactor/SourceArgs.fs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/propulsion-reactor/SourceArgs.fs b/propulsion-reactor/SourceArgs.fs index a9c1a9b66..6fd07320e 100644 --- a/propulsion-reactor/SourceArgs.fs +++ b/propulsion-reactor/SourceArgs.fs @@ -81,12 +81,10 @@ module Cosmos = member x.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(database, containerId) #if (kafka && blank) -#if kafka member val Kafka = match p.GetSubCommand() with | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) | _ -> Args.missingArg "Must specify `kafka` arguments" -#endif member x.ConnectTarget(_cache) = () #else member private _.TargetStoreArgs : Args.TargetStoreArgs = @@ -175,12 +173,10 @@ module Dynamo = indexTable.CreateCheckpointService(group, cache, Config.log) #if (kafka && blank) member x.ConnectTarget(_cache) = () -#if kafka member val Kafka = match p.GetSubCommand() with | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) | _ -> Args.missingArg "Must specify `kafka` arguments" -#endif #else member private _.TargetStoreArgs : Args.TargetStoreArgs = match p.GetSubCommand() with @@ -372,12 +368,10 @@ module Kafka = #if (kafka && blank) member x.ConnectTarget(_cache) = () -#if kafka member val Kafka = match p.GetSubCommand() with | Kafka kafka -> Args.KafkaSinkArguments(c, kafka) | _ -> Args.missingArg "Must specify kafka arguments" -#endif #else member private _.TargetStoreArgs : Args.TargetStoreArgs = match p.GetSubCommand() with From e0dc95ef8755e769720a12cec2503cc276b33526 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 02:27:11 +0100 Subject: [PATCH 32/43] f sync kafka --- propulsion-sync/Program.fs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/propulsion-sync/Program.fs b/propulsion-sync/Program.fs index ba1656acf..86980e2ae 100644 --- a/propulsion-sync/Program.fs +++ b/propulsion-sync/Program.fs @@ -454,7 +454,7 @@ let transformV0 catFilter v0SchemaDocument : Propulsion.Streams.StreamEvent<_> s yield parsed } //#else let transformOrFilter catFilter changeFeedDocument : Propulsion.Streams.Default.StreamEvent seq = seq { - for FsCodec.StreamName.Category cat, e as x in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents catFilter changeFeedDocument do + for FsCodec.StreamName.Category cat, _ as x in Propulsion.CosmosStore.EquinoxSystemTextJsonParser.enumStreamEvents catFilter changeFeedDocument do // NB the `index` needs to be contiguous with existing events - IOW filtering needs to be at stream (and not event) level if catFilter cat then yield x } @@ -498,16 +498,16 @@ let build (args : Args.Arguments, log) = match cosmos.KafkaSink with | Some kafka -> let broker, topic, producers = kafka.BuildTargetParams() - let render (stream: FsCodec.StreamName, span: Propulsion.Streams.StreamSpan<_>) = async { + let render struct (stream: FsCodec.StreamName, span: Propulsion.Streams.Default.StreamSpan) = async { let value = span |> Propulsion.Codec.NewtonsoftJson.RenderedSpan.ofStreamSpan stream |> Propulsion.Codec.NewtonsoftJson.Serdes.Serialize - return FsCodec.StreamName.toString stream, value } + return struct (FsCodec.StreamName.toString stream, value) } let producer = Propulsion.Kafka.Producer(Log.Logger, AppName, broker, Confluent.Kafka.Acks.All, topic, degreeOfParallelism=producers) let stats = Stats(Log.Logger, args.StatsInterval, args.StateInterval) StreamsProducerSink.Start( - Log.Logger, args.MaxReadAhead, args.MaxWriters, render, producer, stats, args.StatsInterval, maxBytes=maxBytes, maxEvents=maxEvents), + Log.Logger, args.MaxReadAhead, args.MaxWriters, render, producer, stats, statsInterval = args.StatsInterval, maxBytes = maxBytes, maxEvents = maxEvents), args.CategoryFilterFunction(longOnly=true) | None -> #endif From 96440538510cf2d4f0ef0c7e81bedd806c8634e8 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 02:36:05 +0100 Subject: [PATCH 33/43] f sourceKafka --- propulsion-reactor/Program.fs | 9 +++++++-- propulsion-reactor/Reactor.fsproj | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index f5df54001..7ad75875a 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -56,7 +56,7 @@ module Args = processorName, maxReadAhead, maxConcurrentStreams) (processorName, maxReadAhead, maxConcurrentStreams) #if sourceKafka - member _.ConnectStoreAndSource(appName) : _ * _ * Args.KafkaSinkArguments * (string -> FsKafka.KafkaConsumerConfig) * (ILogger -> unit) = + member _.ConnectStoreAndSource(appName) : _ * _ * _ * (string -> FsKafka.KafkaConsumerConfig) * (ILogger -> unit) = let p = match p.GetSubCommand() with | Kafka p -> SourceArgs.Kafka.Arguments(c, p) @@ -70,7 +70,12 @@ module Args = #else let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) let targetStore = p.ConnectTarget cache - targetStore, targetStore, p.Kafka, createConsumerConfig, fun log -> +#if kafka + let kafka = a.Kafka +#else + let kafka = () +#endif + targetStore, targetStore, kafka, createConsumerConfig, fun log -> Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.DynamoStore.Core.Log.InternalMetrics.dump log #endif diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index ba8e0f16f..eb8f3d975 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -4,6 +4,7 @@ Exe net6.0 5 + sourceKafka From e05a690a20aa0c91139b347e40dfd8aca3c7d713 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 02:49:07 +0100 Subject: [PATCH 34/43] f reactor refs --- propulsion-reactor/Infrastructure.fs | 2 ++ propulsion-reactor/Reactor.fsproj | 7 +------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/propulsion-reactor/Infrastructure.fs b/propulsion-reactor/Infrastructure.fs index 808a97492..ad4a1f7c2 100644 --- a/propulsion-reactor/Infrastructure.fs +++ b/propulsion-reactor/Infrastructure.fs @@ -114,11 +114,13 @@ type Equinox.DynamoStore.DynamoStoreClient with member internal x.LogConfiguration(role, ?log) = (defaultArg log Log.Logger).Information("DynamoStore {role:l} Table {table} Archive {archive}", role, x.TableName, Option.toObj x.ArchiveTableName) +#if !sourceKafka member client.CreateCheckpointService(consumerGroupName, cache, log, ?checkpointInterval) = let checkpointInterval = defaultArg checkpointInterval (TimeSpan.FromHours 1.) let context = Equinox.DynamoStore.DynamoStoreContext(client) Propulsion.Feed.ReaderCheckpoint.DynamoStore.create log (consumerGroupName, checkpointInterval) (context, cache) +#endif type Equinox.DynamoStore.DynamoStoreContext with member internal x.LogConfiguration(log : ILogger) = diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index eb8f3d975..3cb01e878 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -36,16 +36,11 @@ - + - - - - - From a41cc09c025ed40bf01d4404bbea0924cd15246f Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 08:33:44 +0100 Subject: [PATCH 35/43] f proReactor --- propulsion-reactor/Args.fs | 2 +- propulsion-reactor/Handler.fs | 2 +- propulsion-reactor/Program.fs | 12 ++++++------ propulsion-reactor/Reactor.fsproj | 1 - propulsion-reactor/SourceArgs.fs | 8 ++++---- 5 files changed, 12 insertions(+), 13 deletions(-) diff --git a/propulsion-reactor/Args.fs b/propulsion-reactor/Args.fs index d8c4136e2..f6c578b9b 100644 --- a/propulsion-reactor/Args.fs +++ b/propulsion-reactor/Args.fs @@ -43,7 +43,7 @@ type Configuration(tryGet : string -> string option) = member x.SqlStreamStoreContainer = x.get "SQLSTREAMSTORE_CONTAINER" #endif -//#if kafka +//#if (kafka || sourceKafka) member x.Broker = x.get "PROPULSION_KAFKA_BROKER" member x.Topic = x.get "PROPULSION_KAFKA_TOPIC" //#endif diff --git a/propulsion-reactor/Handler.fs b/propulsion-reactor/Handler.fs index 3cd1ebf60..40fad3082 100644 --- a/propulsion-reactor/Handler.fs +++ b/propulsion-reactor/Handler.fs @@ -12,7 +12,7 @@ type Outcome = /// Gathers stats based on the outcome of each Span processed for emission, at intervals controlled by `StreamsConsumer` type Stats(log, statsInterval, stateInterval, verboseStore, ?logExternalStats) = -#if sourceKafka || blank +#if (blank || sourceKafka) inherit Propulsion.Streams.Stats(log, statsInterval, stateInterval) #else inherit Propulsion.Streams.Sync.Stats(log, statsInterval, stateInterval) diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index 7ad75875a..7c58bef54 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -65,13 +65,13 @@ module Args = FsKafka.KafkaConsumerConfig.Create( appName, p.Broker, [p.Topic], groupName, Confluent.Kafka.AutoOffsetReset.Earliest, maxInFlightBytes = p.MaxInFlightBytes, ?statisticsInterval = p.LagFrequency) -#if kafka && blank +#if (kafka && blank) let targetStore = () in targetStore, targetStore, p.Kafka, createConsumerConfig, ignore #else let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) let targetStore = p.ConnectTarget cache #if kafka - let kafka = a.Kafka + let kafka = p.Kafka #else let kafka = () #endif @@ -105,7 +105,7 @@ module Args = let context = client |> CosmosStoreContext.create let store = Config.Store.Cosmos (context, cache) #if kafka - let kafka = a.Kafka + let kafka = p.Kafka #if blank let targetStore = store #else @@ -124,7 +124,7 @@ module Args = SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) let store = Config.Store.Dynamo (context, cache) #if kafka - let kafka = a.Kafka + let kafka = p.Kafka #if blank let targetStore = store #else @@ -145,7 +145,7 @@ module Args = let hydrateBodies = true SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) #if kafka - let kafka = a.Kafka + let kafka = p.Kafka #else let kafka = () #endif @@ -164,7 +164,7 @@ module Args = let hydrateBodies = true SourceConfig.Sss (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) #if kafka - let kafka = a.Kafka + let kafka = p.Kafka #else let kafka = () #endif diff --git a/propulsion-reactor/Reactor.fsproj b/propulsion-reactor/Reactor.fsproj index 3cb01e878..e57a983d0 100644 --- a/propulsion-reactor/Reactor.fsproj +++ b/propulsion-reactor/Reactor.fsproj @@ -4,7 +4,6 @@ Exe net6.0 5 - sourceKafka diff --git a/propulsion-reactor/SourceArgs.fs b/propulsion-reactor/SourceArgs.fs index 6fd07320e..bb54e2cb3 100644 --- a/propulsion-reactor/SourceArgs.fs +++ b/propulsion-reactor/SourceArgs.fs @@ -6,11 +6,11 @@ open System type Configuration(tryGet) = inherit Args.Configuration(tryGet) -#if !sourceKafka +#if (!sourceKafka) member _.DynamoIndexTable = tryGet Args.INDEX_TABLE #endif -#if !sourceKafka +#if (!sourceKafka) module Cosmos = type [] Parameters = @@ -381,8 +381,8 @@ module Kafka = #if kafka member x.Kafka = match x.TargetStoreArgs with - | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Sink - | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Sink + | Args.TargetStoreArgs.Cosmos cosmos -> cosmos.Kafka + | Args.TargetStoreArgs.Dynamo dynamo -> dynamo.Kafka #endif member x.ConnectTarget(cache) : Config.Store = Args.TargetStoreArgs.connectTarget x.TargetStoreArgs cache From 246a45a3b0f502a18b04e84b982b0bd02e87577c Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 09:15:19 +0100 Subject: [PATCH 36/43] f many --- equinox-web-csharp/.template.config/template.json | 5 +++-- equinox-web-csharp/Domain/Aggregate.cs | 2 +- equinox-web-csharp/Domain/Domain.csproj | 4 ---- equinox-web-csharp/Web/Startup.cs | 13 ++++++++----- equinox-web/Web/Startup.fs | 11 +++++++++++ propulsion-projector/Projector.fsproj | 1 - tests/Equinox.Templates.Tests/DotnetBuild.fs | 9 +++++---- 7 files changed, 28 insertions(+), 17 deletions(-) diff --git a/equinox-web-csharp/.template.config/template.json b/equinox-web-csharp/.template.config/template.json index c3c383eb3..56c6935fc 100755 --- a/equinox-web-csharp/.template.config/template.json +++ b/equinox-web-csharp/.template.config/template.json @@ -7,7 +7,8 @@ "Web" ], "tags": { - "language": "C#" + "language": "C#", + "type": "solution" }, "identity": "Equinox.Template.CSharp", "name": "Equinox Web App", @@ -87,4 +88,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/equinox-web-csharp/Domain/Aggregate.cs b/equinox-web-csharp/Domain/Aggregate.cs index afaa5a1b7..a4a4e7af4 100755 --- a/equinox-web-csharp/Domain/Aggregate.cs +++ b/equinox-web-csharp/Domain/Aggregate.cs @@ -21,7 +21,7 @@ public class Snapshotted : Event static readonly SystemTextJsonUtf8Codec Codec = new(new()); - public static FSharpValueOption TryDecode(string et, byte[] json) => + public static FSharpValueOption TryDecode(string et, ReadOnlyMemory json) => et switch { nameof(Happened) => Codec.Decode(json), diff --git a/equinox-web-csharp/Domain/Domain.csproj b/equinox-web-csharp/Domain/Domain.csproj index 729ac30ae..0d2c83223 100755 --- a/equinox-web-csharp/Domain/Domain.csproj +++ b/equinox-web-csharp/Domain/Domain.csproj @@ -10,8 +10,4 @@ - - - - diff --git a/equinox-web-csharp/Web/Startup.cs b/equinox-web-csharp/Web/Startup.cs index c73a111f7..d598d2e60 100755 --- a/equinox-web-csharp/Web/Startup.cs +++ b/equinox-web-csharp/Web/Startup.cs @@ -82,7 +82,7 @@ static EquinoxContext ConfigureStore() // # run as a single-node cluster to allow connection logic to use cluster mode as for a commercial cluster // & $env:ProgramData\chocolatey\bin\EventStore.ClusterNode.exe --gossip-on-single-node --discover-via-dns 0 --ext-http-port=30778 - var esConfig = new EventStoreConfig("localhost", "admin", "changeit", cacheMb); + var esConfig = new EventStoreConfig("esdb://admin:changeit@localhost:2111,localhost:2112,localhost:2113?tls=true&tlsVerifyCert=false", cacheMb); return new EventStoreContext(esConfig); #endif #if cosmos @@ -130,7 +130,7 @@ public ServiceBuilder(EquinoxContext context, ILogger handlerLog) public Todo.Service CreateTodoService() { var resolve = - _context.Resolve( + _context.Resolve( _handlerLog, EquinoxCodec.Create(Todo.Event.Encode, Todo.Event.TryDecode), Todo.State.Fold, @@ -142,15 +142,18 @@ public Todo.Service CreateTodoService() #endif #if aggregate - public Aggregate.Service CreateAggregateService() => - new Aggregate.Service( + public Aggregate.Service CreateAggregateService() + { + var resolve = _context.Resolve( _handlerLog, EquinoxCodec.Create(Aggregate.Event.Encode, Aggregate.Event.TryDecode), Aggregate.State.Fold, Aggregate.State.Initial, Aggregate.State.IsOrigin, - Aggregate.State.Snapshot)); + Aggregate.State.Snapshot); + return new Aggregate.Service(ids => resolve(Aggregate.Event.StreamIds(ids))); + } #endif #if (!aggregate && !todos) // public Thing.Service CreateThingService() => diff --git a/equinox-web/Web/Startup.fs b/equinox-web/Web/Startup.fs index 390354df6..f19419f2d 100644 --- a/equinox-web/Web/Startup.fs +++ b/equinox-web/Web/Startup.fs @@ -172,6 +172,17 @@ type Startup() = | _ -> failwithf "Event Storage subsystem requires the following Environment Variables to be specified: %s, %s, %s" connectionVar databaseVar containerVar +//#endif +//#if dynamo + let storeConfig = + let regionVar, tableVar = "EQUINOX_DYNAMO_REGION", "EQUINOX_DYNAMO_TABLE" + let read key = Environment.GetEnvironmentVariable key |> Option.ofObj + match read regionVar, read tableVar with + | Some region, Some table -> + Storage.Store.Dynamo (region, table, cacheMb) + | _ -> + failwithf "Event Storage subsystem requires the following Environment Variables to be specified: %s, %s" regionVar tableVar + //#endif #if (memoryStore && !cosmos && !dynamo && !eventStore) let storeConfig = Storage.Store.Memory diff --git a/propulsion-projector/Projector.fsproj b/propulsion-projector/Projector.fsproj index 93763c63f..982d81e58 100644 --- a/propulsion-projector/Projector.fsproj +++ b/propulsion-projector/Projector.fsproj @@ -4,7 +4,6 @@ Exe net6.0 5 - esdb;sss;cosmos;dynamo;kafka diff --git a/tests/Equinox.Templates.Tests/DotnetBuild.fs b/tests/Equinox.Templates.Tests/DotnetBuild.fs index 777b46cef..6da21d15c 100644 --- a/tests/Equinox.Templates.Tests/DotnetBuild.fs +++ b/tests/Equinox.Templates.Tests/DotnetBuild.fs @@ -29,11 +29,12 @@ type EqxWebs() as this = inherit TheoryData() do for t in ["eqxweb"; "eqxwebcs"] do - do this.Add(t, ["--todos"; "--cosmos"]) + do this.Add(t, ["--todos"; "--cosmos"]) #if !DEBUG - do this.Add(t, ["--todos"]) - do this.Add(t, ["--todos"; "--eventStore"]) + do this.Add(t, ["--todos"]) + do this.Add(t, ["--todos"; "--eventStore"]) #endif + do this.Add("eqxweb", ["--todos"; "--cosmos"; "--aggregate"; "--dynamo"]) type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = @@ -44,7 +45,7 @@ type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = Dotnet.build [folder] #if DEBUG // Use this one to trigger an individual test - let [] ``*pending*`` () = run "proProjector" ["--source cosmos"; "--kafka"] + let [] ``*pending*`` () = run "eqxwebcs" ["--todos"; "--eventStore"] #endif let [] eqxPatterns () = run "eqxPatterns" [] From e42b26af9dcb43a0d1306e1f52c990ca371c4263 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 09:28:24 +0100 Subject: [PATCH 37/43] f eqxweb, sync --- equinox-web/Web/Web.fsproj | 1 + propulsion-sync/Program.fs | 6 +++--- tests/Equinox.Templates.Tests/DotnetBuild.fs | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/equinox-web/Web/Web.fsproj b/equinox-web/Web/Web.fsproj index e58d363ca..cfe29762a 100644 --- a/equinox-web/Web/Web.fsproj +++ b/equinox-web/Web/Web.fsproj @@ -12,6 +12,7 @@ + diff --git a/propulsion-sync/Program.fs b/propulsion-sync/Program.fs index 86980e2ae..5fefbc75e 100644 --- a/propulsion-sync/Program.fs +++ b/propulsion-sync/Program.fs @@ -438,12 +438,12 @@ module EventV0Parser = member _.CausationId = null member _.Context = null - type Newtonsoft.Json.Linq.JObject with + type System.Text.Json.JsonDocument with member document.Cast<'T>() = - document.ToObject<'T>() + System.Text.Json.JsonSerializer.Deserialize<'T>(document.RootElement) /// We assume all Documents represent Events laid out as above - let parse (d : Newtonsoft.Json.Linq.JObject) : Propulsion.Streams.Default.StreamEvent = + let parse (d : System.Text.Json.JsonDocument) : Propulsion.Streams.Default.StreamEvent = let e = d.Cast() FsCodec.StreamName.parse e.s, e |> FsCodec.Core.TimelineEvent.Map ReadOnlyMemory diff --git a/tests/Equinox.Templates.Tests/DotnetBuild.fs b/tests/Equinox.Templates.Tests/DotnetBuild.fs index 6da21d15c..baf0eed39 100644 --- a/tests/Equinox.Templates.Tests/DotnetBuild.fs +++ b/tests/Equinox.Templates.Tests/DotnetBuild.fs @@ -34,7 +34,7 @@ type EqxWebs() as this = do this.Add(t, ["--todos"]) do this.Add(t, ["--todos"; "--eventStore"]) #endif - do this.Add("eqxweb", ["--todos"; "--cosmos"; "--aggregate"; "--dynamo"]) + do this.Add("eqxweb", ["--todos"; "--aggregate"; "--dynamo"]) type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = @@ -45,7 +45,7 @@ type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = Dotnet.build [folder] #if DEBUG // Use this one to trigger an individual test - let [] ``*pending*`` () = run "eqxwebcs" ["--todos"; "--eventStore"] + let [] ``*pending*`` () = run "eqxwebcs" ["--todos"; "--cosmos"] #endif let [] eqxPatterns () = run "eqxPatterns" [] From 7bb209f8fdf1632c1857059ded53bad533299c69 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 11:27:48 +0100 Subject: [PATCH 38/43] f proProjector --- equinox-web/Web/Startup.fs | 2 +- propulsion-projector/Args.fs | 19 +++++++--- propulsion-projector/Config.fs | 10 +++-- propulsion-projector/Handler.fs | 12 ++++-- propulsion-projector/Infrastructure.fs | 8 ++-- propulsion-projector/Program.fs | 51 +++++++++++++++----------- propulsion-projector/Projector.fsproj | 8 +--- propulsion-projector/SourceArgs.fs | 4 +- propulsion-projector/SourceConfig.fs | 19 +++++++++- 9 files changed, 85 insertions(+), 48 deletions(-) diff --git a/equinox-web/Web/Startup.fs b/equinox-web/Web/Startup.fs index f19419f2d..1319ac4f7 100644 --- a/equinox-web/Web/Startup.fs +++ b/equinox-web/Web/Startup.fs @@ -135,7 +135,7 @@ type Startup() = |> Seq.iter options.JsonSerializerOptions.Converters.Add ) |> ignore -//#if (cosmos || eventStore) +//#if (cosmos || eventStore || dynamo) // This is the allocation limit passed internally to a System.Caching.MemoryCache instance // The primary objects held in the cache are the Folded State of Event-sourced aggregates // see https://docs.microsoft.com/en-us/dotnet/framework/performance/caching-in-net-framework-applications for more information diff --git a/propulsion-projector/Args.fs b/propulsion-projector/Args.fs index 0f6b0f34c..dfeb06e78 100644 --- a/propulsion-projector/Args.fs +++ b/propulsion-projector/Args.fs @@ -46,13 +46,16 @@ type Configuration(tryGet : string -> string option) = member x.PrometheusPort = tryGet "PROMETHEUS_PORT" |> Option.map int +#if (esdb || sss || dynamo) // Type used to represent where checkpoints (for either the FeedConsumer position, or for a Reactor's Event Store subscription position) will be stored // In a typical app you don't have anything like this as you'll simply use your primary Event Store (see) module Checkpoints = [] type Store = +#if (!dynamo) | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache +#endif | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache (* Propulsion.EventStoreDb does not implement a native checkpoint storage mechanism, perhaps port https://github.com/absolutejam/Propulsion.EventStoreDB ? @@ -62,17 +65,22 @@ module Checkpoints = For now, we store the Checkpoints in one of the above stores *) let create (consumerGroup, checkpointInterval) storeLog : Store -> Propulsion.Feed.IFeedCheckpointStore = function +#if (!dynamo) | Store.Cosmos (context, cache) -> Propulsion.Feed.ReaderCheckpoint.CosmosStore.create storeLog (consumerGroup, checkpointInterval) (context, cache) +#endif | Store.Dynamo (context, cache) -> Propulsion.Feed.ReaderCheckpoint.DynamoStore.create storeLog (consumerGroup, checkpointInterval) (context, cache) let createCheckpointStore (group, checkpointInterval, store : Config.Store) : Propulsion.Feed.IFeedCheckpointStore = let checkpointStore = match store with +#if (!dynamo) | Config.Store.Cosmos (context, cache) -> Store.Cosmos (context, cache) +#endif | Config.Store.Dynamo (context, cache) -> Store.Dynamo (context, cache) create (group, checkpointInterval) Config.log checkpointStore +#endif open Argu #if kafka @@ -91,7 +99,7 @@ type KafkaSinkArguments(c : Configuration, p : ParseResults #endif -#if (esdb || sss || cosmos) +// #if (esdb || sss || cosmos) module Cosmos = type [] Parameters = @@ -138,8 +146,8 @@ module Cosmos = | _ -> missingArg "Must specify `kafka` arguments" #endif -#endif // cosmos -#if (esdb || sss || dynamo) +// #endif // cosmos +// #if (esdb || sss || dynamo) module Dynamo = type [] Parameters = @@ -198,13 +206,13 @@ module Dynamo = | _ -> missingArg "Must specify `kafka` arguments" #endif -#endif // dynamo +// #endif // dynamo +#if (esdb || sss) type [] TargetStoreArgs = | Cosmos of Cosmos.Arguments | Dynamo of Dynamo.Arguments - module TargetStoreArgs = let connectTarget targetStore cache: Config.Store = @@ -215,3 +223,4 @@ module TargetStoreArgs = | TargetStoreArgs.Dynamo a -> let context = a.Connect() |> DynamoStoreContext.create Config.Store.Dynamo (context, cache) +#endif diff --git a/propulsion-projector/Config.fs b/propulsion-projector/Config.fs index e33d14b10..2b99d9032 100644 --- a/propulsion-projector/Config.fs +++ b/propulsion-projector/Config.fs @@ -2,6 +2,7 @@ module ProjectorTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) +// #if (esdb || sss || cosmos) module Cosmos = let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = @@ -16,6 +17,7 @@ module Cosmos = let accessStrategy = Equinox.CosmosStore.AccessStrategy.RollingState toSnapshot createCached codec initial fold accessStrategy (context, cache) +// #endif module Dynamo = let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = @@ -45,11 +47,11 @@ module Sss = #endif +// #if (esdb || sss || dynamo) [] type Store = -#if cosmos || esdb || sss +#if (esdb || sss) | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache -#endif -#if dynamo || esdb || sss +#endif | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache -#endif +// #endif diff --git a/propulsion-projector/Handler.fs b/propulsion-projector/Handler.fs index 0137bc20a..2400e2b1f 100644 --- a/propulsion-projector/Handler.fs +++ b/propulsion-projector/Handler.fs @@ -4,8 +4,8 @@ module ProjectorTemplate.Handler #if parallelOnly // Here we pass the items directly through to the handler without parsing them let mapToStreamItems (x : System.Collections.Generic.IReadOnlyCollection<'a>) : seq<'a> = upcast x -#else // cosmos && !parallelOnly let categoryFilter _ = true +#else // cosmos && !parallelOnly #endif // !parallelOnly //#endif // cosmos @@ -14,8 +14,10 @@ let categoryFilter _ = true type ExampleOutput = { id : string } let serdes = FsCodec.SystemTextJson.Options.Create() |> FsCodec.SystemTextJson.Serdes -let render (doc : Newtonsoft.Json.Linq.JObject) : string * string = - let equinoxPartition, itemId = doc.Value("p"), doc.Value("id") +let render (doc : System.Text.Json.JsonDocument) = + let r = doc.RootElement + let gs (name : string) = let x = r.GetProperty name in x.GetString() + let equinoxPartition, itemId = gs "p", gs "id" equinoxPartition, serdes.Serialize { id = itemId } #else // kafka && !(cosmos && parallelOnly) // Each outcome from `handle` is passed to `HandleOk` or `HandleExn` by the scheduler, DumpStats is called at `statsInterval` @@ -42,6 +44,10 @@ let render struct (stream : FsCodec.StreamName, span : Propulsion.Streams.Defaul |> Propulsion.Codec.NewtonsoftJson.RenderedSpan.ofStreamSpan stream |> Propulsion.Codec.NewtonsoftJson.Serdes.Serialize return struct (FsCodec.StreamName.toString stream, value) } + +let categoryFilter = function + | _ -> true // TODO filter categories to be rendered + #endif // kafka && !(cosmos && parallelOnly) #else // !kafka // Each outcome from `handle` is passed to `HandleOk` or `HandleExn` by the scheduler, DumpStats is called at `statsInterval` diff --git a/propulsion-projector/Infrastructure.fs b/propulsion-projector/Infrastructure.fs index 579918765..b30aef3e9 100644 --- a/propulsion-projector/Infrastructure.fs +++ b/propulsion-projector/Infrastructure.fs @@ -8,7 +8,7 @@ module EnvVar = let tryGet varName : string option = Environment.GetEnvironmentVariable varName |> Option.ofObj -#if (cosmos || esdb || sss) +// #if (cosmos || esdb || sss) module CosmosStoreContext = /// Create with default packing and querying policies. Search for other `module CosmosStoreContext` impls for custom variations @@ -46,8 +46,8 @@ type Equinox.CosmosStore.CosmosStoreConnector with let storeClient = Equinox.CosmosStore.CosmosStoreClient(monitored.Database.Client, databaseId, containerId) storeClient, monitored -#endif -#if (dynamo || esdb || sss) +// #endif +// #if (dynamo || esdb || sss) module Dynamo = open Equinox.DynamoStore @@ -95,7 +95,7 @@ module DynamoStoreContext = let create (storeClient : Equinox.DynamoStore.DynamoStoreClient) = Equinox.DynamoStore.DynamoStoreContext(storeClient, queryMaxItems = 100) -#endif +// #endif [] type Logging() = diff --git a/propulsion-projector/Program.fs b/propulsion-projector/Program.fs index 40001fd28..6aba1c20b 100644 --- a/propulsion-projector/Program.fs +++ b/propulsion-projector/Program.fs @@ -18,9 +18,9 @@ module Args = | [] Broker of string | [] Topic of string #endif -#if cosmos +// #if cosmos | [] Cosmos of ParseResults -#endif +// #endif #if dynamo | [] Dynamo of ParseResults #endif @@ -36,13 +36,13 @@ module Args = | ProcessorName _ -> "Projector consumer group name." | MaxReadAhead _ -> "maximum number of batches to let processing get ahead of completion. Default: 64" | MaxWriters _ -> "maximum number of concurrent streams on which to process at any time. Default: 1024" -//#if kafka +#if kafka | Broker _ -> "specify Kafka Broker, in host:port format. (optional if environment variable PROPULSION_KAFKA_BROKER specified)" | Topic _ -> "specify Kafka Topic Id. (optional if environment variable PROPULSION_KAFKA_TOPIC specified)" -//#endif -#if cosmos - | Cosmos _ -> "specify CosmosDb input parameters" #endif +// #if cosmos + | Cosmos _ -> "specify CosmosDb input parameters" +// #endif #if dynamo | Dynamo _ -> "specify DynamoDb input parameters" #endif @@ -63,22 +63,27 @@ module Args = member _.ProcessorParams() = Log.Information("Projecting... {processorName}, reading {maxReadAhead} ahead, {dop} writers", processorName, maxReadAhead, maxConcurrentProcessors) (processorName, maxReadAhead, maxConcurrentProcessors) - member val Store : Choice = + member val Store = match p.GetSubCommand() with - | Cosmos p -> Choice1Of4 <| SourceArgs.Cosmos.Arguments(c, p) - | Dynamo p -> Choice2Of4 <| SourceArgs.Dynamo.Arguments(c, p) - | Esdb p -> Choice3Of4 <| SourceArgs.Esdb.Arguments(c, p) - | Sss p -> Choice4Of4 <| SourceArgs.Sss.Arguments(c, p) +// #if cosmos + | Cosmos p -> SourceArgs.Cosmos.Arguments(c, p) +// #endif +#if dynamo + | Dynamo p -> SourceArgs.Dynamo.Arguments(c, p) +#endif +#if esdb + | Esdb p -> SourceArgs.Esdb.Arguments(c, p) +#endif +#if sss + | Sss p -> SourceArgs.Sss.Arguments(c, p) +#endif | p -> Args.missingArg $"Unexpected Store subcommand %A{p}" - member x.VerboseStore = match x.Store with - | Choice1Of4 p -> p.Verbose - | Choice2Of4 p -> p.Verbose - | Choice3Of4 p -> p.Verbose - | Choice4Of4 p -> p.Verbose + member x.VerboseStore = x.Store.Verbose member x.ConnectSource(appName) : (ILogger -> string -> SourceConfig) * _ * (ILogger -> unit) = let cache = Equinox.Cache (appName, sizeMb = x.CacheSizeMb) match x.Store with - | Choice1Of4 a -> + | a -> +//#if cosmos let monitored = a.ConnectMonitored() let buildSourceConfig log groupName = let leases, startFromTail, maxItems, tailSleepInterval, lagFrequency = a.MonitoringParams(log) @@ -90,7 +95,8 @@ module Args = let target = () #endif buildSourceConfig, target, ignore - | Choice2Of4 a -> +// #endif +#if dynamo let context = a.Connect() let buildSourceConfig log groupName = let indexStore, startFromTail, batchSizeCutoff, tailSleepInterval, streamsDop = a.MonitoringParams(log) @@ -103,7 +109,8 @@ module Args = let target = () #endif buildSourceConfig, target, Equinox.DynamoStore.Core.Log.InternalMetrics.dump - | Choice3Of4 a -> +#endif +#if esdb let connection = a.Connect(Log.Logger, appName, EventStore.Client.NodePreference.Leader) let targetStore = a.ConnectTarget(cache) let buildSourceConfig log groupName = @@ -119,7 +126,8 @@ module Args = buildSourceConfig, target, fun log -> Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.DynamoStore.Core.Log.InternalMetrics.dump log - | Choice4Of4 a -> +#endif +#if sss let connection = a.Connect() let buildSourceConfig log groupName = let startFromTail, maxItems, tailSleepInterval = a.MonitoringParams(log) @@ -135,6 +143,7 @@ module Args = Equinox.SqlStreamStore.Log.InternalMetrics.dump log Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.DynamoStore.Core.Log.InternalMetrics.dump log +#endif /// Parse the commandline; can throw exceptions in response to missing arguments and/or `-h`/`--help` args let parse tryGetConfigValue argv : Arguments = @@ -160,7 +169,7 @@ let build (args : Args.Arguments) = #endif // kafka && !parallelOnly #else // !kafka let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) - let sink = Propulsion.Streams.StreamsProjector.Start(Log.Logger, maxReadAhead, maxConcurrentStreams, Handler.handle, stats, args.StatsInterval) + let sink = Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentProcessors, Handler.handle, stats, args.StatsInterval) #endif // !kafka let source, _awaitReactions = let sourceConfig = buildSourceConfig Log.Logger consumerGroupName diff --git a/propulsion-projector/Projector.fsproj b/propulsion-projector/Projector.fsproj index 982d81e58..c2e2cf14c 100644 --- a/propulsion-projector/Projector.fsproj +++ b/propulsion-projector/Projector.fsproj @@ -24,19 +24,13 @@ - + - - - - - - diff --git a/propulsion-projector/SourceArgs.fs b/propulsion-projector/SourceArgs.fs index 42249bba3..ac54436b2 100644 --- a/propulsion-projector/SourceArgs.fs +++ b/propulsion-projector/SourceArgs.fs @@ -10,7 +10,7 @@ type Configuration(tryGet) = member _.DynamoIndexTable = tryGet Args.INDEX_TABLE #endif -#if cosmos +// #if cosmos module Cosmos = type [] Parameters = @@ -81,7 +81,7 @@ module Cosmos = | _ -> Args.missingArg "Must specify `kafka` arguments" #endif -#endif // cosmos +// #endif // cosmos #if dynamo module Dynamo = diff --git a/propulsion-projector/SourceConfig.fs b/propulsion-projector/SourceConfig.fs index 284432e77..eaf8f4708 100644 --- a/propulsion-projector/SourceConfig.fs +++ b/propulsion-projector/SourceConfig.fs @@ -4,10 +4,12 @@ open System [] type SourceConfig = +#if (cosmos) | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container * leasesContainer : Microsoft.Azure.Cosmos.Container * checkpoints : CosmosFeedConfig * tailSleepInterval : TimeSpan +#endif | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient * checkpoints : Propulsion.Feed.IFeedCheckpointStore * loading : DynamoLoadModeConfig @@ -36,6 +38,7 @@ and [] DynamoLoadModeConfig = | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int module SourceConfig = +#if cosmos module Cosmos = open Propulsion.CosmosStore let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter @@ -56,6 +59,8 @@ module SourceConfig = startFromTail = startFromTail, ?maxItems = maxItems, tailSleepInterval = tailSleepInterval, lagReportFreq = lagFrequency) source, None +#endif +#if dynamo module Dynamo = open Propulsion.DynamoStore let start (log, storeLog) (sink : Propulsion.Streams.Default.Sink) categoryFilter @@ -70,6 +75,8 @@ module SourceConfig = checkpoints, sink, loadMode, startFromTail = startFromTail, storeLog = storeLog) source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) +#endif +#if esdb module Esdb = open Propulsion.EventStoreDb let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter @@ -80,6 +87,8 @@ module SourceConfig = client, batchSize, tailSleepInterval, checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) +#endif +#if sss module Sss = open Propulsion.SqlStreamStore let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter @@ -90,13 +99,21 @@ module SourceConfig = client, batchSize, tailSleepInterval, checkpoints, sink, categoryFilter, hydrateBodies = hydrateBodies, startFromTail = startFromTail) source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) - +#endif let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Async) option = function +#if cosmos | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) +#endif +#if dynamo | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, tailSleepInterval, batchSizeCutoff, statsInterval) +#endif +#if esdb | SourceConfig.Esdb (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> Esdb.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) +#endif +#if sss | SourceConfig.Sss (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) -> Sss.start log sink categoryFilter (client, checkpoints, hydrateBodies, startFromTail, batchSize, tailSleepInterval, statsInterval) +#endif From bd2207854b9e2ed958f3fbdf31b7e15cf4ca6283 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 11:56:28 +0100 Subject: [PATCH 39/43] f proReactor --- propulsion-reactor/Program.fs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index 7c58bef54..7bdc491ad 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -71,7 +71,7 @@ module Args = let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) let targetStore = p.ConnectTarget cache #if kafka - let kafka = p.Kafka + let kafka = a.Kafka #else let kafka = () #endif @@ -105,7 +105,7 @@ module Args = let context = client |> CosmosStoreContext.create let store = Config.Store.Cosmos (context, cache) #if kafka - let kafka = p.Kafka + let kafka = a.Kafka #if blank let targetStore = store #else @@ -124,7 +124,7 @@ module Args = SourceConfig.Dynamo (indexStore, checkpoints, load, startFromTail, batchSizeCutoff, tailSleepInterval, x.StatsInterval) let store = Config.Store.Dynamo (context, cache) #if kafka - let kafka = p.Kafka + let kafka = a.Kafka #if blank let targetStore = store #else @@ -145,7 +145,7 @@ module Args = let hydrateBodies = true SourceConfig.Esdb (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) #if kafka - let kafka = p.Kafka + let kafka = a.Kafka #else let kafka = () #endif @@ -164,7 +164,7 @@ module Args = let hydrateBodies = true SourceConfig.Sss (connection.ReadConnection, checkpoints, hydrateBodies, startFromTail, maxItems, tailSleepInterval, x.StatsInterval) #if kafka - let kafka = p.Kafka + let kafka = a.Kafka #else let kafka = () #endif From fd2c51946946fbcf005e00cdf50b1d46146f22d3 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 12:30:21 +0100 Subject: [PATCH 40/43] Undo delete, fixes --- .../Watchdog.Integration/CosmosConnector.fs | 31 +++++++++++++++++++ propulsion-projector/SourceConfig.fs | 12 +++---- propulsion-reactor/Program.fs | 12 +++---- 3 files changed, 43 insertions(+), 12 deletions(-) create mode 100644 equinox-shipping/Watchdog.Integration/CosmosConnector.fs diff --git a/equinox-shipping/Watchdog.Integration/CosmosConnector.fs b/equinox-shipping/Watchdog.Integration/CosmosConnector.fs new file mode 100644 index 000000000..dfa3a80ef --- /dev/null +++ b/equinox-shipping/Watchdog.Integration/CosmosConnector.fs @@ -0,0 +1,31 @@ +namespace Shipping.Watchdog.Integration + +open Shipping.Infrastructure + +type CosmosConnector(connectionString, databaseId, containerId) = + + let discovery = connectionString |> Equinox.CosmosStore.Discovery.ConnectionString + let timeout = 5. |> System.TimeSpan.FromSeconds + let retries, maxRetryWaitTime = 5, 5. |> System.TimeSpan.FromSeconds + let connectionMode = Microsoft.Azure.Cosmos.ConnectionMode.Gateway + let connector = Equinox.CosmosStore.CosmosStoreConnector(discovery, timeout, retries, maxRetryWaitTime, connectionMode) + let leaseContainerId = containerId + "-aux" + let connectLeases () = connector.CreateUninitialized(databaseId, leaseContainerId) + + new (c : Shipping.Watchdog.SourceArgs.Configuration) = CosmosConnector(c.CosmosConnection, c.CosmosDatabase, c.CosmosContainer) + new () = CosmosConnector(Shipping.Watchdog.SourceArgs.Configuration EnvVar.tryGet) + + member val DumpStats = Equinox.CosmosStore.Core.Log.InternalMetrics.dump + member private _.ConnectStoreAndMonitored() = connector.ConnectStoreAndMonitored(databaseId, containerId) + member _.ConnectLeases() = + let leases : Microsoft.Azure.Cosmos.Container = connectLeases() + // Just as ConnectStoreAndMonitored references the global Logger, so do we -> see SerilogLogFixture, _dummy + Serilog.Log.Information("ChangeFeed Leases Database {db} Container {container}", leases.Database.Id, leases.Id) + leases + member x.Connect() = + let client, monitored = x.ConnectStoreAndMonitored() + let storeCfg = + let context = client |> CosmosStoreContext.create + let cache = Equinox.Cache("Tests", sizeMb = 10) + Shipping.Domain.Config.Store.Cosmos (context, cache) + storeCfg, monitored diff --git a/propulsion-projector/SourceConfig.fs b/propulsion-projector/SourceConfig.fs index eaf8f4708..a56b0bca7 100644 --- a/propulsion-projector/SourceConfig.fs +++ b/propulsion-projector/SourceConfig.fs @@ -4,12 +4,12 @@ open System [] type SourceConfig = -#if (cosmos) +// #if (cosmos) | Cosmos of monitoredContainer : Microsoft.Azure.Cosmos.Container * leasesContainer : Microsoft.Azure.Cosmos.Container * checkpoints : CosmosFeedConfig * tailSleepInterval : TimeSpan -#endif +// #endif | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient * checkpoints : Propulsion.Feed.IFeedCheckpointStore * loading : DynamoLoadModeConfig @@ -38,7 +38,7 @@ and [] DynamoLoadModeConfig = | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int module SourceConfig = -#if cosmos +// #if cosmos module Cosmos = open Propulsion.CosmosStore let start log (sink : Propulsion.Streams.Default.Sink) categoryFilter @@ -59,7 +59,7 @@ module SourceConfig = startFromTail = startFromTail, ?maxItems = maxItems, tailSleepInterval = tailSleepInterval, lagReportFreq = lagFrequency) source, None -#endif +// #endif #if dynamo module Dynamo = open Propulsion.DynamoStore @@ -101,10 +101,10 @@ module SourceConfig = source.Start(), Some (fun propagationDelay -> source.Monitor.AwaitCompletion(propagationDelay, ignoreSubsequent = false)) #endif let start (log, storeLog) sink categoryFilter : SourceConfig -> Propulsion.Pipeline * (TimeSpan -> Async) option = function -#if cosmos +// #if cosmos | SourceConfig.Cosmos (monitored, leases, checkpointConfig, tailSleepInterval) -> Cosmos.start log sink categoryFilter (monitored, leases, checkpointConfig, tailSleepInterval) -#endif +// #endif #if dynamo | SourceConfig.Dynamo (indexStore, checkpoints, loading, startFromTail, batchSizeCutoff, tailSleepInterval, statsInterval) -> Dynamo.start (log, storeLog) sink categoryFilter (indexStore, checkpoints, loading, startFromTail, tailSleepInterval, batchSizeCutoff, statsInterval) diff --git a/propulsion-reactor/Program.fs b/propulsion-reactor/Program.fs index 7bdc491ad..31f2e328e 100644 --- a/propulsion-reactor/Program.fs +++ b/propulsion-reactor/Program.fs @@ -57,19 +57,19 @@ module Args = (processorName, maxReadAhead, maxConcurrentStreams) #if sourceKafka member _.ConnectStoreAndSource(appName) : _ * _ * _ * (string -> FsKafka.KafkaConsumerConfig) * (ILogger -> unit) = - let p = + let a = match p.GetSubCommand() with | Kafka p -> SourceArgs.Kafka.Arguments(c, p) | p -> Args.missingArg $"Unexpected Source subcommand %A{p}" let createConsumerConfig groupName = FsKafka.KafkaConsumerConfig.Create( - appName, p.Broker, [p.Topic], groupName, Confluent.Kafka.AutoOffsetReset.Earliest, - maxInFlightBytes = p.MaxInFlightBytes, ?statisticsInterval = p.LagFrequency) + appName, a.Broker, [a.Topic], groupName, Confluent.Kafka.AutoOffsetReset.Earliest, + maxInFlightBytes = a.MaxInFlightBytes, ?statisticsInterval = a.LagFrequency) #if (kafka && blank) - let targetStore = () in targetStore, targetStore, p.Kafka, createConsumerConfig, ignore + let targetStore = () in targetStore, targetStore, a.Kafka, createConsumerConfig, ignore #else let cache = Equinox.Cache (appName, sizeMb = cacheSizeMb) - let targetStore = p.ConnectTarget cache + let targetStore = a.ConnectTarget cache #if kafka let kafka = a.Kafka #else @@ -78,7 +78,7 @@ module Args = targetStore, targetStore, kafka, createConsumerConfig, fun log -> Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.DynamoStore.Core.Log.InternalMetrics.dump log -#endif +#endif member val VerboseStore = false #else member val Store : Choice = From fa736208af1e39fc52d616613afb9f5ae3b597d8 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 13:34:09 +0100 Subject: [PATCH 41/43] Fix parallelOnly cosmos --- propulsion-projector/Program.fs | 14 ++++++++++++++ propulsion-projector/SourceConfig.fs | 12 +++++++++++- tests/Equinox.Templates.Tests/DotnetBuild.fs | 1 - 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/propulsion-projector/Program.fs b/propulsion-projector/Program.fs index 6aba1c20b..c55901a10 100644 --- a/propulsion-projector/Program.fs +++ b/propulsion-projector/Program.fs @@ -171,9 +171,23 @@ let build (args : Args.Arguments) = let stats = Handler.Stats(Log.Logger, args.StatsInterval, args.StateInterval) let sink = Propulsion.Streams.Default.Config.Start(Log.Logger, maxReadAhead, maxConcurrentProcessors, Handler.handle, stats, args.StatsInterval) #endif // !kafka +#if (cosmos && parallelOnly) + // Custom logic for establishing the source, as we're not projecting StreamEvents - TODO could probably be generalized + let source = + let mapToStreamItems (x : System.Collections.Generic.IReadOnlyCollection<'a>) : seq<'a> = upcast x + let observer = Propulsion.CosmosStore.CosmosStoreSource.CreateObserver(Log.Logger, sink.StartIngester, Handler.mapToStreamItems) + match buildSourceConfig Log.Logger consumerGroupName with SourceConfig.Cosmos (monitoredContainer, leasesContainer, checkpoints, tailSleepInterval : TimeSpan) -> + match checkpoints with + | Ephemeral _ -> failwith "Unexpected" + | Persistent (processorName, startFromTail, maxItems, lagFrequency) -> + + Propulsion.CosmosStore.CosmosStoreSource.Start(Log.Logger, monitoredContainer, leasesContainer, consumerGroupName, observer, + startFromTail = startFromTail, ?maxItems=maxItems, lagReportFreq=lagFrequency) +#else let source, _awaitReactions = let sourceConfig = buildSourceConfig Log.Logger consumerGroupName Handler.Config.StartSource(Log.Logger, sink, sourceConfig) +#endif [| Async.AwaitKeyboardInterruptAsTaskCanceledException() source.AwaitWithStopOnCancellation() sink.AwaitWithStopOnCancellation() |] diff --git a/propulsion-projector/SourceConfig.fs b/propulsion-projector/SourceConfig.fs index a56b0bca7..b1f6b56f5 100644 --- a/propulsion-projector/SourceConfig.fs +++ b/propulsion-projector/SourceConfig.fs @@ -9,7 +9,8 @@ type SourceConfig = * leasesContainer : Microsoft.Azure.Cosmos.Container * checkpoints : CosmosFeedConfig * tailSleepInterval : TimeSpan -// #endif +// #endif +#if dynamo | Dynamo of indexStore : Equinox.DynamoStore.DynamoStoreClient * checkpoints : Propulsion.Feed.IFeedCheckpointStore * loading : DynamoLoadModeConfig @@ -17,6 +18,8 @@ type SourceConfig = * batchSizeCutoff : int * tailSleepInterval : TimeSpan * statsInterval : TimeSpan +#endif +#if esdb | Esdb of client : EventStore.Client.EventStoreClient * checkpoints : Propulsion.Feed.IFeedCheckpointStore * hydrateBodies : bool @@ -24,6 +27,8 @@ type SourceConfig = * batchSize : int * tailSleepInterval : TimeSpan * statsInterval : TimeSpan +#endif +#if sss | Sss of client : SqlStreamStore.IStreamStore * checkpoints : Propulsion.Feed.IFeedCheckpointStore * hydrateBodies : bool @@ -31,11 +36,16 @@ type SourceConfig = * batchSize : int * tailSleepInterval : TimeSpan * statsInterval : TimeSpan +#endif +// #if cosmos and [] CosmosFeedConfig = | Ephemeral of processorName : string | Persistent of processorName : string * startFromTail : bool * maxItems : int option * lagFrequency : TimeSpan +// #endif +#if dynamo and [] DynamoLoadModeConfig = | Hydrate of monitoredContext : Equinox.DynamoStore.DynamoStoreContext * hydrationConcurrency : int +#endif module SourceConfig = // #if cosmos diff --git a/tests/Equinox.Templates.Tests/DotnetBuild.fs b/tests/Equinox.Templates.Tests/DotnetBuild.fs index baf0eed39..8b6d2823d 100644 --- a/tests/Equinox.Templates.Tests/DotnetBuild.fs +++ b/tests/Equinox.Templates.Tests/DotnetBuild.fs @@ -47,7 +47,6 @@ type DotnetBuild(output : ITestOutputHelper, folder : EquinoxTemplatesFixture) = #if DEBUG // Use this one to trigger an individual test let [] ``*pending*`` () = run "eqxwebcs" ["--todos"; "--cosmos"] #endif - let [] eqxPatterns () = run "eqxPatterns" [] let [] eqxTestbed () = run "eqxTestbed" [] let [] eqxShipping () = run "eqxShipping" ["--skipIntegrationTests"] From 06a6c38c3523a8cc32b6917e3d5bd854c28b7c51 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 14:01:01 +0100 Subject: [PATCH 42/43] Fix? --- propulsion-projector/Config.fs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/propulsion-projector/Config.fs b/propulsion-projector/Config.fs index 2b99d9032..3d7c78644 100644 --- a/propulsion-projector/Config.fs +++ b/propulsion-projector/Config.fs @@ -2,7 +2,7 @@ module ProjectorTemplate.Config let log = Serilog.Log.ForContext("isMetric", true) -// #if (esdb || sss || cosmos) +// #if (cosmos || esdb || sss) module Cosmos = let private createCached codec initial fold accessStrategy (context, cache) : Equinox.Category<_, _, _> = @@ -32,7 +32,6 @@ module Dynamo = let accessStrategy = Equinox.DynamoStore.AccessStrategy.RollingState toSnapshot createCached codec initial fold accessStrategy (context, cache) -#if !(sourceKafka && kafka) module Esdb = let create codec initial fold (context, cache) = @@ -45,13 +44,9 @@ module Sss = let cacheStrategy = Equinox.SqlStreamStore.CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) Equinox.SqlStreamStore.SqlStreamStoreCategory(context, codec, fold, initial, cacheStrategy) -#endif - -// #if (esdb || sss || dynamo) [] type Store = -#if (esdb || sss) +#if (esdb || sss || cosmos) | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.Core.ICache #endif | Dynamo of Equinox.DynamoStore.DynamoStoreContext * Equinox.Core.ICache -// #endif From 4eb6658f43b1204f969b85fe7a6f57ae87db6694 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Sep 2022 14:19:05 +0100 Subject: [PATCH 43/43] another? --- README.md | 2 +- propulsion-projector/Args.fs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index fbd431a2a..73ce58642 100644 --- a/README.md +++ b/README.md @@ -186,7 +186,7 @@ There's [integration tests in the repo](https://github.com/jet/dotnet-templates/ dotnet build build.proj # build Equinox.Templates package, run tests \/ dotnet pack build.proj # build Equinox.Templates package only - dotnet test build.proj # Test aphabetically newest file in bin/nupkgs only + dotnet test build.proj -c Release # Test aphabetically newest file in bin/nupkgs only (-c Release to run full tests) One can also do it manually: diff --git a/propulsion-projector/Args.fs b/propulsion-projector/Args.fs index dfeb06e78..6f57a2ea7 100644 --- a/propulsion-projector/Args.fs +++ b/propulsion-projector/Args.fs @@ -99,7 +99,7 @@ type KafkaSinkArguments(c : Configuration, p : ParseResults #endif -// #if (esdb || sss || cosmos) +//#if (esdb || sss || cosmos) module Cosmos = type [] Parameters = @@ -146,8 +146,8 @@ module Cosmos = | _ -> missingArg "Must specify `kafka` arguments" #endif -// #endif // cosmos -// #if (esdb || sss || dynamo) +//#endif // cosmos +//#if (esdb || sss || dynamo) module Dynamo = type [] Parameters = @@ -206,9 +206,9 @@ module Dynamo = | _ -> missingArg "Must specify `kafka` arguments" #endif -// #endif // dynamo +//#endif // dynamo -#if (esdb || sss) +#if esdb type [] TargetStoreArgs = | Cosmos of Cosmos.Arguments