diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d18969fc1ac..279cb349e997 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,7 +48,76 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Bug Fixes +<<<<<<< HEAD * (baseapp) [#14049](https://github.com/cosmos/cosmos-sdk/pull/14049) Fix state sync when interval is zero. +======= +* (modules) [#13850](https://github.com/cosmos/cosmos-sdk/pull/13850) and [#14046](https://github.com/cosmos/cosmos-sdk/pull/14046) Remove gogoproto stringer annotations. This removes the custom `String()` methods on all types that were using the annotations. +* (x/auth) [#13850](https://github.com/cosmos/cosmos-sdk/pull/13850/) Remove `MarshalYAML` methods from module (`x/...`) types. +* (x/auth) [#13877](https://github.com/cosmos/cosmos-sdk/pull/13877) Rename `AccountKeeper`'s `GetNextAccountNumber` to `NextAccountNumber`. +* (x/evidence) [#13740](https://github.com/cosmos/cosmos-sdk/pull/13740) The `NewQueryEvidenceRequest` function now takes `hash` as a HEX encoded `string`. +* (server) [#13485](https://github.com/cosmos/cosmos-sdk/pull/13485) The `Application` service now requires the `RegisterNodeService` method to be implemented. +* (x/slashing, x/staking) [#13122](https://github.com/cosmos/cosmos-sdk/pull/13122) Add the infraction a validator commited type as an argument to the `Slash` keeper method. +* [#13437](https://github.com/cosmos/cosmos-sdk/pull/13437) Add a list of modules to export argument in `ExportAppStateAndValidators`. +* (x/slashing) [#13427](https://github.com/cosmos/cosmos-sdk/pull/13427) Move `x/slashing/testslashing` to `x/slashing/testutil` for consistency with other modules. +* (x/staking) [#13427](https://github.com/cosmos/cosmos-sdk/pull/13427) Move `x/staking/teststaking` to `x/staking/testutil` for consistency with other modules. +* (simapp) [#13402](https://github.com/cosmos/cosmos-sdk/pull/13402) Move simulation flags to `x/simulation/client/cli`. +* (simapp) [#13402](https://github.com/cosmos/cosmos-sdk/pull/13402) Move simulation helpers functions (`SetupSimulation`, `SimulationOperations`, `CheckExportSimulation`, `PrintStats`, `GetSimulationLog`) to `testutil/sims`. +* (simapp) [#13402](https://github.com/cosmos/cosmos-sdk/pull/13402) Move `testutil/rest` package to `testutil`. +* (types) [#13380](https://github.com/cosmos/cosmos-sdk/pull/13380) Remove deprecated `sdk.NewLevelDB`. +* (simapp) [#13378](https://github.com/cosmos/cosmos-sdk/pull/13378) Move `simapp.App` to `runtime.AppI`. +* (tx) [#12659](https://github.com/cosmos/cosmos-sdk/pull/12659) Remove broadcast mode `block`. +* (db) [#13370](https://github.com/cosmos/cosmos-sdk/pull/13370) remove storev2alpha1, see also https://github.com/cosmos/cosmos-sdk/pull/13371 +* (x/bank) [#12706](https://github.com/cosmos/cosmos-sdk/pull/12706) Removed the `testutil` package from the `x/bank/client` package. +* (simapp) [#12747](https://github.com/cosmos/cosmos-sdk/pull/12747) Remove `simapp.MakeTestEncodingConfig`. Please use `moduletestutil.MakeTestEncodingConfig` (`types/module/testutil`) in tests instead. +* (x/bank) [#12648](https://github.com/cosmos/cosmos-sdk/pull/12648) `NewSendAuthorization` takes a new argument of an optional list of addresses allowed to receive bank assests via authz MsgSend grant. You can pass `nil` for the same behavior as before, i.e. any recipient is allowed. +* (x/bank) [#12593](https://github.com/cosmos/cosmos-sdk/pull/12593) Add `SpendableCoin` method to `BaseViewKeeper` +* (x/slashing) [#12581](https://github.com/cosmos/cosmos-sdk/pull/12581) Remove `x/slashing` legacy querier. +* (types) [#12355](https://github.com/cosmos/cosmos-sdk/pull/12355) Remove the compile-time `types.DBbackend` variable. Removes usage of the same in server/util.go +* (x/gov) [#12368](https://github.com/cosmos/cosmos-sdk/pull/12369) Gov keeper is now passed by reference instead of copy to make post-construction mutation of Hooks and Proposal Handlers possible at a framework level. +* (simapp) [#12270](https://github.com/cosmos/cosmos-sdk/pull/12270) Remove `invCheckPeriod uint` attribute from `SimApp` struct as per migration of `x/crisis` to app wiring +* (simapp) [#12334](https://github.com/cosmos/cosmos-sdk/pull/12334) Move `simapp.ConvertAddrsToValAddrs` and `simapp.CreateTestPubKeys ` to respectively `simtestutil.ConvertAddrsToValAddrs` and `simtestutil.CreateTestPubKeys` (`testutil/sims`) +* (simapp) [#12312](https://github.com/cosmos/cosmos-sdk/pull/12312) Move `simapp.EmptyAppOptions` to `simtestutil.EmptyAppOptions` (`testutil/sims`) +* (simapp) [#12312](https://github.com/cosmos/cosmos-sdk/pull/12312) Remove `skipUpgradeHeights map[int64]bool` and `homePath string` from `NewSimApp` constructor as per migration of `x/upgrade` to app-wiring. +* (testutil) [#12278](https://github.com/cosmos/cosmos-sdk/pull/12278) Move all functions from `simapp/helpers` to `testutil/sims` +* (testutil) [#12233](https://github.com/cosmos/cosmos-sdk/pull/12233) Move `simapp.TestAddr` to `simtestutil.TestAddr` (`testutil/sims`) +* (x/staking) [#12102](https://github.com/cosmos/cosmos-sdk/pull/12102) Staking keeper now is passed by reference instead of copy. Keeper's SetHooks no longer returns keeper. It updates the keeper in place instead. +* (linting) [#12141](https://github.com/cosmos/cosmos-sdk/pull/12141) Fix usability related linting for database. This means removing the infix Prefix from `prefix.NewPrefixWriter` and such so that it is `prefix.NewWriter` and making `db.DBConnection` and such into `db.Connection` +* (x/distribution) [#12434](https://github.com/cosmos/cosmos-sdk/pull/12434) `x/distribution` module `SetParams` keeper method definition is now updated to return `error`. +* (x/staking) [#12409](https://github.com/cosmos/cosmos-sdk/pull/12409) `x/staking` module `SetParams` keeper method definition is now updated to return `error`. +* (x/crisis) [#12445](https://github.com/cosmos/cosmos-sdk/pull/12445) `x/crisis` module `SetConstantFee` keeper method definition is now updated to return `error`. +* (x/gov) [#12631](https://github.com/cosmos/cosmos-sdk/pull/12631) `x/gov` module refactored to use `Params` as single struct instead of `DepositParams`, `TallyParams` & `VotingParams`. +* (x/gov) [#12631](https://github.com/cosmos/cosmos-sdk/pull/12631) Migrate `x/gov` to self-managed parameters and deprecate it's usage of `x/params`. +* (x/bank) [#12630](https://github.com/cosmos/cosmos-sdk/pull/12630) `x/bank` module `SetParams` keeper method definition is now updated to return `error`. +* (x/bank) [#11859](https://github.com/cosmos/cosmos-sdk/pull/11859) Move the SendEnabled information out of the Params and into the state store directly. + The information can now be accessed using the BankKeeper. + Setting can be done using MsgSetSendEnabled as a governance proposal. + A SendEnabled query has been added to both GRPC and CLI. +* (appModule) Remove `Route`, `QuerierRoute` and `LegacyQuerierHandler` from AppModule Interface. +* (x/modules) Remove all LegacyQueries and related code from modules +* (store) [#11825](https://github.com/cosmos/cosmos-sdk/pull/11825) Make extension snapshotter interface safer to use, renamed the util function `WriteExtensionItem` to `WriteExtensionPayload`. +* (x/genutil)[#12956](https://github.com/cosmos/cosmos-sdk/pull/12956) `genutil.AppModuleBasic` has a new attribute: genesis transaction validation function. The existing validation logic is implemented in `genutiltypes.DefaultMessageValidator`. Use `genutil.NewAppModuleBasic` to create a new genutil Module Basic. +* (codec) [#12964](https://github.com/cosmos/cosmos-sdk/pull/12964) `ProtoCodec.MarshalInterface` now returns an error when serializing unregistered types and a subsequent `ProtoCodec.UnmarshalInterface` would fail. +* (x/staking) [#12973](https://github.com/cosmos/cosmos-sdk/pull/12973) Removed `stakingkeeper.RandomValidator`. Use `testutil.RandSliceElem(r, sk.GetAllValidators(ctx))` instead. +* (x/gov) [#13160](https://github.com/cosmos/cosmos-sdk/pull/13160) Remove custom marshaling of proposl and voteoption. +* (types) [#13430](https://github.com/cosmos/cosmos-sdk/pull/13430) Remove unused code `ResponseCheckTx` and `ResponseDeliverTx` +* (store) [#13529](https://github.com/cosmos/cosmos-sdk/pull/13529) Add method `LatestVersion` to `MultiStore` interface, add method `SetQueryMultiStore` to baesapp to support alternative `MultiStore` implementation for query service. +* (pruning) [#13609](https://github.com/cosmos/cosmos-sdk/pull/13609) Move pruning package to be under store package +* [#13794](https://github.com/cosmos/cosmos-sdk/pull/13794) Most methods on `types/module.AppModule` have been moved to +extension interfaces. `module.Manager.Modules` is now of type `map[string]interface{}` to support in parallel the new +`cosmossdk.io/core/appmodule.AppModule` API. +* (signing) [#13701](https://github.com/cosmos/cosmos-sdk/pull/) Add `context.Context` as an argument `x/auth/signing.VerifySignature`. +* (x/group) [#13876](https://github.com/cosmos/cosmos-sdk/pull/13876) Add `GetMinExecutionPeriod` method on DecisionPolicy interface. +* (x/auth)[#13780](https://github.com/cosmos/cosmos-sdk/pull/13780) Querying with `id` (type of int64) in `AccountAddressByID` grpc query now throws error, use account-id(type of uint64) instead. +* (snapshots) [14048](https://github.com/cosmos/cosmos-sdk/pull/14048) Move the Snapshot package to the store package. This is done in an effort group all storage related logic under one package. +* (baseapp) [#14050](https://github.com/cosmos/cosmos-sdk/pull/14050) refactor `ABCIListener` interface to accept go contexts +* (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Update State Streaming APIs: + * Add method `ListenCommit` to `ABCIListener` + * Move `ListeningEnabled` and `AddListener` methods to `CommitMultiStore` + * Remove `CacheWrapWithListeners` from `CacheWrap` and `CacheWrapper` interfaces + * Remove listening APIs from the caching layer (it should only listen to the `rootmulti.Store`) + * Add three new options to file streaming service constructor. + * Modify `ABCIListener` such that any error from any method will always halt the app via `panic` +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) ## [v0.46.6](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.46.6) - 2022-11-18 @@ -59,6 +128,19 @@ Ref: https://keepachangelog.com/en/1.0.0/ ### Bug Fixes * (x/gov) [#13918](https://github.com/cosmos/cosmos-sdk/pull/13918) Fix propagation of message errors when executing a proposal. +<<<<<<< HEAD +======= +* (baseapp)[#14049](https://github.com/cosmos/cosmos-sdk/pull/14049) Fix state sync when interval is zero. +* (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Fix state listener that was observing writes at wrong time. + +### Deprecated + +* (x/evidence) [#13740](https://github.com/cosmos/cosmos-sdk/pull/13740) The `evidence_hash` field of `QueryEvidenceRequest` has been deprecated and now contains a new field `hash` with type `string`. +* (x/bank) [#11859](https://github.com/cosmos/cosmos-sdk/pull/11859) The Params.SendEnabled field is deprecated and unusable. + The information can now be accessed using the BankKeeper. + Setting can be done using MsgSetSendEnabled as a governance proposal. + A SendEnabled query has been added to both GRPC and CLI. +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) ## [v0.46.5](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.46.5) - 2022-11-17 diff --git a/api/cosmos/base/store/v1beta1/listening.pulsar.go b/api/cosmos/base/store/v1beta1/listening.pulsar.go new file mode 100644 index 000000000000..47eaf7b0fc13 --- /dev/null +++ b/api/cosmos/base/store/v1beta1/listening.pulsar.go @@ -0,0 +1,2399 @@ +// Code generated by protoc-gen-go-pulsar. DO NOT EDIT. +package storev1beta1 + +import ( + abci "cosmossdk.io/api/tendermint/abci" + fmt "fmt" + runtime "github.com/cosmos/cosmos-proto/runtime" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + reflect "reflect" + sync "sync" +) + +var ( + md_StoreKVPair protoreflect.MessageDescriptor + fd_StoreKVPair_store_key protoreflect.FieldDescriptor + fd_StoreKVPair_delete protoreflect.FieldDescriptor + fd_StoreKVPair_key protoreflect.FieldDescriptor + fd_StoreKVPair_value protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_base_store_v1beta1_listening_proto_init() + md_StoreKVPair = File_cosmos_base_store_v1beta1_listening_proto.Messages().ByName("StoreKVPair") + fd_StoreKVPair_store_key = md_StoreKVPair.Fields().ByName("store_key") + fd_StoreKVPair_delete = md_StoreKVPair.Fields().ByName("delete") + fd_StoreKVPair_key = md_StoreKVPair.Fields().ByName("key") + fd_StoreKVPair_value = md_StoreKVPair.Fields().ByName("value") +} + +var _ protoreflect.Message = (*fastReflection_StoreKVPair)(nil) + +type fastReflection_StoreKVPair StoreKVPair + +func (x *StoreKVPair) ProtoReflect() protoreflect.Message { + return (*fastReflection_StoreKVPair)(x) +} + +func (x *StoreKVPair) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_base_store_v1beta1_listening_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_StoreKVPair_messageType fastReflection_StoreKVPair_messageType +var _ protoreflect.MessageType = fastReflection_StoreKVPair_messageType{} + +type fastReflection_StoreKVPair_messageType struct{} + +func (x fastReflection_StoreKVPair_messageType) Zero() protoreflect.Message { + return (*fastReflection_StoreKVPair)(nil) +} +func (x fastReflection_StoreKVPair_messageType) New() protoreflect.Message { + return new(fastReflection_StoreKVPair) +} +func (x fastReflection_StoreKVPair_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_StoreKVPair +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_StoreKVPair) Descriptor() protoreflect.MessageDescriptor { + return md_StoreKVPair +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_StoreKVPair) Type() protoreflect.MessageType { + return _fastReflection_StoreKVPair_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_StoreKVPair) New() protoreflect.Message { + return new(fastReflection_StoreKVPair) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_StoreKVPair) Interface() protoreflect.ProtoMessage { + return (*StoreKVPair)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_StoreKVPair) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.StoreKey != "" { + value := protoreflect.ValueOfString(x.StoreKey) + if !f(fd_StoreKVPair_store_key, value) { + return + } + } + if x.Delete != false { + value := protoreflect.ValueOfBool(x.Delete) + if !f(fd_StoreKVPair_delete, value) { + return + } + } + if len(x.Key) != 0 { + value := protoreflect.ValueOfBytes(x.Key) + if !f(fd_StoreKVPair_key, value) { + return + } + } + if len(x.Value) != 0 { + value := protoreflect.ValueOfBytes(x.Value) + if !f(fd_StoreKVPair_value, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_StoreKVPair) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.StoreKVPair.store_key": + return x.StoreKey != "" + case "cosmos.base.store.v1beta1.StoreKVPair.delete": + return x.Delete != false + case "cosmos.base.store.v1beta1.StoreKVPair.key": + return len(x.Key) != 0 + case "cosmos.base.store.v1beta1.StoreKVPair.value": + return len(x.Value) != 0 + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.StoreKVPair")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.StoreKVPair does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_StoreKVPair) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.StoreKVPair.store_key": + x.StoreKey = "" + case "cosmos.base.store.v1beta1.StoreKVPair.delete": + x.Delete = false + case "cosmos.base.store.v1beta1.StoreKVPair.key": + x.Key = nil + case "cosmos.base.store.v1beta1.StoreKVPair.value": + x.Value = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.StoreKVPair")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.StoreKVPair does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_StoreKVPair) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.base.store.v1beta1.StoreKVPair.store_key": + value := x.StoreKey + return protoreflect.ValueOfString(value) + case "cosmos.base.store.v1beta1.StoreKVPair.delete": + value := x.Delete + return protoreflect.ValueOfBool(value) + case "cosmos.base.store.v1beta1.StoreKVPair.key": + value := x.Key + return protoreflect.ValueOfBytes(value) + case "cosmos.base.store.v1beta1.StoreKVPair.value": + value := x.Value + return protoreflect.ValueOfBytes(value) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.StoreKVPair")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.StoreKVPair does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_StoreKVPair) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.StoreKVPair.store_key": + x.StoreKey = value.Interface().(string) + case "cosmos.base.store.v1beta1.StoreKVPair.delete": + x.Delete = value.Bool() + case "cosmos.base.store.v1beta1.StoreKVPair.key": + x.Key = value.Bytes() + case "cosmos.base.store.v1beta1.StoreKVPair.value": + x.Value = value.Bytes() + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.StoreKVPair")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.StoreKVPair does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_StoreKVPair) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.StoreKVPair.store_key": + panic(fmt.Errorf("field store_key of message cosmos.base.store.v1beta1.StoreKVPair is not mutable")) + case "cosmos.base.store.v1beta1.StoreKVPair.delete": + panic(fmt.Errorf("field delete of message cosmos.base.store.v1beta1.StoreKVPair is not mutable")) + case "cosmos.base.store.v1beta1.StoreKVPair.key": + panic(fmt.Errorf("field key of message cosmos.base.store.v1beta1.StoreKVPair is not mutable")) + case "cosmos.base.store.v1beta1.StoreKVPair.value": + panic(fmt.Errorf("field value of message cosmos.base.store.v1beta1.StoreKVPair is not mutable")) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.StoreKVPair")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.StoreKVPair does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_StoreKVPair) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.StoreKVPair.store_key": + return protoreflect.ValueOfString("") + case "cosmos.base.store.v1beta1.StoreKVPair.delete": + return protoreflect.ValueOfBool(false) + case "cosmos.base.store.v1beta1.StoreKVPair.key": + return protoreflect.ValueOfBytes(nil) + case "cosmos.base.store.v1beta1.StoreKVPair.value": + return protoreflect.ValueOfBytes(nil) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.StoreKVPair")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.StoreKVPair does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_StoreKVPair) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.base.store.v1beta1.StoreKVPair", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_StoreKVPair) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_StoreKVPair) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_StoreKVPair) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_StoreKVPair) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*StoreKVPair) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + l = len(x.StoreKey) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.Delete { + n += 2 + } + l = len(x.Key) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + l = len(x.Value) + if l > 0 { + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*StoreKVPair) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if len(x.Value) > 0 { + i -= len(x.Value) + copy(dAtA[i:], x.Value) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Value))) + i-- + dAtA[i] = 0x22 + } + if len(x.Key) > 0 { + i -= len(x.Key) + copy(dAtA[i:], x.Key) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.Key))) + i-- + dAtA[i] = 0x1a + } + if x.Delete { + i-- + if x.Delete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(x.StoreKey) > 0 { + i -= len(x.StoreKey) + copy(dAtA[i:], x.StoreKey) + i = runtime.EncodeVarint(dAtA, i, uint64(len(x.StoreKey))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*StoreKVPair) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: StoreKVPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: StoreKVPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field StoreKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.StoreKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + x.Delete = bool(v != 0) + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Key = append(x.Key[:0], dAtA[iNdEx:postIndex]...) + if x.Key == nil { + x.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.Value = append(x.Value[:0], dAtA[iNdEx:postIndex]...) + if x.Value == nil { + x.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var _ protoreflect.List = (*_BlockMetadata_3_list)(nil) + +type _BlockMetadata_3_list struct { + list *[]*BlockMetadata_DeliverTx +} + +func (x *_BlockMetadata_3_list) Len() int { + if x.list == nil { + return 0 + } + return len(*x.list) +} + +func (x *_BlockMetadata_3_list) Get(i int) protoreflect.Value { + return protoreflect.ValueOfMessage((*x.list)[i].ProtoReflect()) +} + +func (x *_BlockMetadata_3_list) Set(i int, value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*BlockMetadata_DeliverTx) + (*x.list)[i] = concreteValue +} + +func (x *_BlockMetadata_3_list) Append(value protoreflect.Value) { + valueUnwrapped := value.Message() + concreteValue := valueUnwrapped.Interface().(*BlockMetadata_DeliverTx) + *x.list = append(*x.list, concreteValue) +} + +func (x *_BlockMetadata_3_list) AppendMutable() protoreflect.Value { + v := new(BlockMetadata_DeliverTx) + *x.list = append(*x.list, v) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_BlockMetadata_3_list) Truncate(n int) { + for i := n; i < len(*x.list); i++ { + (*x.list)[i] = nil + } + *x.list = (*x.list)[:n] +} + +func (x *_BlockMetadata_3_list) NewElement() protoreflect.Value { + v := new(BlockMetadata_DeliverTx) + return protoreflect.ValueOfMessage(v.ProtoReflect()) +} + +func (x *_BlockMetadata_3_list) IsValid() bool { + return x.list != nil +} + +var ( + md_BlockMetadata protoreflect.MessageDescriptor + fd_BlockMetadata_request_begin_block protoreflect.FieldDescriptor + fd_BlockMetadata_response_begin_block protoreflect.FieldDescriptor + fd_BlockMetadata_deliver_txs protoreflect.FieldDescriptor + fd_BlockMetadata_request_end_block protoreflect.FieldDescriptor + fd_BlockMetadata_response_end_block protoreflect.FieldDescriptor + fd_BlockMetadata_response_commit protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_base_store_v1beta1_listening_proto_init() + md_BlockMetadata = File_cosmos_base_store_v1beta1_listening_proto.Messages().ByName("BlockMetadata") + fd_BlockMetadata_request_begin_block = md_BlockMetadata.Fields().ByName("request_begin_block") + fd_BlockMetadata_response_begin_block = md_BlockMetadata.Fields().ByName("response_begin_block") + fd_BlockMetadata_deliver_txs = md_BlockMetadata.Fields().ByName("deliver_txs") + fd_BlockMetadata_request_end_block = md_BlockMetadata.Fields().ByName("request_end_block") + fd_BlockMetadata_response_end_block = md_BlockMetadata.Fields().ByName("response_end_block") + fd_BlockMetadata_response_commit = md_BlockMetadata.Fields().ByName("response_commit") +} + +var _ protoreflect.Message = (*fastReflection_BlockMetadata)(nil) + +type fastReflection_BlockMetadata BlockMetadata + +func (x *BlockMetadata) ProtoReflect() protoreflect.Message { + return (*fastReflection_BlockMetadata)(x) +} + +func (x *BlockMetadata) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_base_store_v1beta1_listening_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_BlockMetadata_messageType fastReflection_BlockMetadata_messageType +var _ protoreflect.MessageType = fastReflection_BlockMetadata_messageType{} + +type fastReflection_BlockMetadata_messageType struct{} + +func (x fastReflection_BlockMetadata_messageType) Zero() protoreflect.Message { + return (*fastReflection_BlockMetadata)(nil) +} +func (x fastReflection_BlockMetadata_messageType) New() protoreflect.Message { + return new(fastReflection_BlockMetadata) +} +func (x fastReflection_BlockMetadata_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_BlockMetadata +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_BlockMetadata) Descriptor() protoreflect.MessageDescriptor { + return md_BlockMetadata +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_BlockMetadata) Type() protoreflect.MessageType { + return _fastReflection_BlockMetadata_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_BlockMetadata) New() protoreflect.Message { + return new(fastReflection_BlockMetadata) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_BlockMetadata) Interface() protoreflect.ProtoMessage { + return (*BlockMetadata)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_BlockMetadata) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.RequestBeginBlock != nil { + value := protoreflect.ValueOfMessage(x.RequestBeginBlock.ProtoReflect()) + if !f(fd_BlockMetadata_request_begin_block, value) { + return + } + } + if x.ResponseBeginBlock != nil { + value := protoreflect.ValueOfMessage(x.ResponseBeginBlock.ProtoReflect()) + if !f(fd_BlockMetadata_response_begin_block, value) { + return + } + } + if len(x.DeliverTxs) != 0 { + value := protoreflect.ValueOfList(&_BlockMetadata_3_list{list: &x.DeliverTxs}) + if !f(fd_BlockMetadata_deliver_txs, value) { + return + } + } + if x.RequestEndBlock != nil { + value := protoreflect.ValueOfMessage(x.RequestEndBlock.ProtoReflect()) + if !f(fd_BlockMetadata_request_end_block, value) { + return + } + } + if x.ResponseEndBlock != nil { + value := protoreflect.ValueOfMessage(x.ResponseEndBlock.ProtoReflect()) + if !f(fd_BlockMetadata_response_end_block, value) { + return + } + } + if x.ResponseCommit != nil { + value := protoreflect.ValueOfMessage(x.ResponseCommit.ProtoReflect()) + if !f(fd_BlockMetadata_response_commit, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_BlockMetadata) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.request_begin_block": + return x.RequestBeginBlock != nil + case "cosmos.base.store.v1beta1.BlockMetadata.response_begin_block": + return x.ResponseBeginBlock != nil + case "cosmos.base.store.v1beta1.BlockMetadata.deliver_txs": + return len(x.DeliverTxs) != 0 + case "cosmos.base.store.v1beta1.BlockMetadata.request_end_block": + return x.RequestEndBlock != nil + case "cosmos.base.store.v1beta1.BlockMetadata.response_end_block": + return x.ResponseEndBlock != nil + case "cosmos.base.store.v1beta1.BlockMetadata.response_commit": + return x.ResponseCommit != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.request_begin_block": + x.RequestBeginBlock = nil + case "cosmos.base.store.v1beta1.BlockMetadata.response_begin_block": + x.ResponseBeginBlock = nil + case "cosmos.base.store.v1beta1.BlockMetadata.deliver_txs": + x.DeliverTxs = nil + case "cosmos.base.store.v1beta1.BlockMetadata.request_end_block": + x.RequestEndBlock = nil + case "cosmos.base.store.v1beta1.BlockMetadata.response_end_block": + x.ResponseEndBlock = nil + case "cosmos.base.store.v1beta1.BlockMetadata.response_commit": + x.ResponseCommit = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_BlockMetadata) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.request_begin_block": + value := x.RequestBeginBlock + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_begin_block": + value := x.ResponseBeginBlock + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.deliver_txs": + if len(x.DeliverTxs) == 0 { + return protoreflect.ValueOfList(&_BlockMetadata_3_list{}) + } + listValue := &_BlockMetadata_3_list{list: &x.DeliverTxs} + return protoreflect.ValueOfList(listValue) + case "cosmos.base.store.v1beta1.BlockMetadata.request_end_block": + value := x.RequestEndBlock + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_end_block": + value := x.ResponseEndBlock + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_commit": + value := x.ResponseCommit + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.request_begin_block": + x.RequestBeginBlock = value.Message().Interface().(*abci.RequestBeginBlock) + case "cosmos.base.store.v1beta1.BlockMetadata.response_begin_block": + x.ResponseBeginBlock = value.Message().Interface().(*abci.ResponseBeginBlock) + case "cosmos.base.store.v1beta1.BlockMetadata.deliver_txs": + lv := value.List() + clv := lv.(*_BlockMetadata_3_list) + x.DeliverTxs = *clv.list + case "cosmos.base.store.v1beta1.BlockMetadata.request_end_block": + x.RequestEndBlock = value.Message().Interface().(*abci.RequestEndBlock) + case "cosmos.base.store.v1beta1.BlockMetadata.response_end_block": + x.ResponseEndBlock = value.Message().Interface().(*abci.ResponseEndBlock) + case "cosmos.base.store.v1beta1.BlockMetadata.response_commit": + x.ResponseCommit = value.Message().Interface().(*abci.ResponseCommit) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.request_begin_block": + if x.RequestBeginBlock == nil { + x.RequestBeginBlock = new(abci.RequestBeginBlock) + } + return protoreflect.ValueOfMessage(x.RequestBeginBlock.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_begin_block": + if x.ResponseBeginBlock == nil { + x.ResponseBeginBlock = new(abci.ResponseBeginBlock) + } + return protoreflect.ValueOfMessage(x.ResponseBeginBlock.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.deliver_txs": + if x.DeliverTxs == nil { + x.DeliverTxs = []*BlockMetadata_DeliverTx{} + } + value := &_BlockMetadata_3_list{list: &x.DeliverTxs} + return protoreflect.ValueOfList(value) + case "cosmos.base.store.v1beta1.BlockMetadata.request_end_block": + if x.RequestEndBlock == nil { + x.RequestEndBlock = new(abci.RequestEndBlock) + } + return protoreflect.ValueOfMessage(x.RequestEndBlock.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_end_block": + if x.ResponseEndBlock == nil { + x.ResponseEndBlock = new(abci.ResponseEndBlock) + } + return protoreflect.ValueOfMessage(x.ResponseEndBlock.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_commit": + if x.ResponseCommit == nil { + x.ResponseCommit = new(abci.ResponseCommit) + } + return protoreflect.ValueOfMessage(x.ResponseCommit.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_BlockMetadata) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.request_begin_block": + m := new(abci.RequestBeginBlock) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_begin_block": + m := new(abci.ResponseBeginBlock) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.deliver_txs": + list := []*BlockMetadata_DeliverTx{} + return protoreflect.ValueOfList(&_BlockMetadata_3_list{list: &list}) + case "cosmos.base.store.v1beta1.BlockMetadata.request_end_block": + m := new(abci.RequestEndBlock) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_end_block": + m := new(abci.ResponseEndBlock) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.response_commit": + m := new(abci.ResponseCommit) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_BlockMetadata) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.base.store.v1beta1.BlockMetadata", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_BlockMetadata) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_BlockMetadata) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_BlockMetadata) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*BlockMetadata) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.RequestBeginBlock != nil { + l = options.Size(x.RequestBeginBlock) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.ResponseBeginBlock != nil { + l = options.Size(x.ResponseBeginBlock) + n += 1 + l + runtime.Sov(uint64(l)) + } + if len(x.DeliverTxs) > 0 { + for _, e := range x.DeliverTxs { + l = options.Size(e) + n += 1 + l + runtime.Sov(uint64(l)) + } + } + if x.RequestEndBlock != nil { + l = options.Size(x.RequestEndBlock) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.ResponseEndBlock != nil { + l = options.Size(x.ResponseEndBlock) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.ResponseCommit != nil { + l = options.Size(x.ResponseCommit) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*BlockMetadata) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.ResponseCommit != nil { + encoded, err := options.Marshal(x.ResponseCommit) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x32 + } + if x.ResponseEndBlock != nil { + encoded, err := options.Marshal(x.ResponseEndBlock) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x2a + } + if x.RequestEndBlock != nil { + encoded, err := options.Marshal(x.RequestEndBlock) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x22 + } + if len(x.DeliverTxs) > 0 { + for iNdEx := len(x.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + encoded, err := options.Marshal(x.DeliverTxs[iNdEx]) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x1a + } + } + if x.ResponseBeginBlock != nil { + encoded, err := options.Marshal(x.ResponseBeginBlock) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x12 + } + if x.RequestBeginBlock != nil { + encoded, err := options.Marshal(x.RequestBeginBlock) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*BlockMetadata) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RequestBeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.RequestBeginBlock == nil { + x.RequestBeginBlock = &abci.RequestBeginBlock{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.RequestBeginBlock); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ResponseBeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.ResponseBeginBlock == nil { + x.ResponseBeginBlock = &abci.ResponseBeginBlock{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.ResponseBeginBlock); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + x.DeliverTxs = append(x.DeliverTxs, &BlockMetadata_DeliverTx{}) + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.DeliverTxs[len(x.DeliverTxs)-1]); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field RequestEndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.RequestEndBlock == nil { + x.RequestEndBlock = &abci.RequestEndBlock{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.RequestEndBlock); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ResponseEndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.ResponseEndBlock == nil { + x.ResponseEndBlock = &abci.ResponseEndBlock{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.ResponseEndBlock); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.ResponseCommit == nil { + x.ResponseCommit = &abci.ResponseCommit{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.ResponseCommit); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +var ( + md_BlockMetadata_DeliverTx protoreflect.MessageDescriptor + fd_BlockMetadata_DeliverTx_request protoreflect.FieldDescriptor + fd_BlockMetadata_DeliverTx_response protoreflect.FieldDescriptor +) + +func init() { + file_cosmos_base_store_v1beta1_listening_proto_init() + md_BlockMetadata_DeliverTx = File_cosmos_base_store_v1beta1_listening_proto.Messages().ByName("BlockMetadata").Messages().ByName("DeliverTx") + fd_BlockMetadata_DeliverTx_request = md_BlockMetadata_DeliverTx.Fields().ByName("request") + fd_BlockMetadata_DeliverTx_response = md_BlockMetadata_DeliverTx.Fields().ByName("response") +} + +var _ protoreflect.Message = (*fastReflection_BlockMetadata_DeliverTx)(nil) + +type fastReflection_BlockMetadata_DeliverTx BlockMetadata_DeliverTx + +func (x *BlockMetadata_DeliverTx) ProtoReflect() protoreflect.Message { + return (*fastReflection_BlockMetadata_DeliverTx)(x) +} + +func (x *BlockMetadata_DeliverTx) slowProtoReflect() protoreflect.Message { + mi := &file_cosmos_base_store_v1beta1_listening_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +var _fastReflection_BlockMetadata_DeliverTx_messageType fastReflection_BlockMetadata_DeliverTx_messageType +var _ protoreflect.MessageType = fastReflection_BlockMetadata_DeliverTx_messageType{} + +type fastReflection_BlockMetadata_DeliverTx_messageType struct{} + +func (x fastReflection_BlockMetadata_DeliverTx_messageType) Zero() protoreflect.Message { + return (*fastReflection_BlockMetadata_DeliverTx)(nil) +} +func (x fastReflection_BlockMetadata_DeliverTx_messageType) New() protoreflect.Message { + return new(fastReflection_BlockMetadata_DeliverTx) +} +func (x fastReflection_BlockMetadata_DeliverTx_messageType) Descriptor() protoreflect.MessageDescriptor { + return md_BlockMetadata_DeliverTx +} + +// Descriptor returns message descriptor, which contains only the protobuf +// type information for the message. +func (x *fastReflection_BlockMetadata_DeliverTx) Descriptor() protoreflect.MessageDescriptor { + return md_BlockMetadata_DeliverTx +} + +// Type returns the message type, which encapsulates both Go and protobuf +// type information. If the Go type information is not needed, +// it is recommended that the message descriptor be used instead. +func (x *fastReflection_BlockMetadata_DeliverTx) Type() protoreflect.MessageType { + return _fastReflection_BlockMetadata_DeliverTx_messageType +} + +// New returns a newly allocated and mutable empty message. +func (x *fastReflection_BlockMetadata_DeliverTx) New() protoreflect.Message { + return new(fastReflection_BlockMetadata_DeliverTx) +} + +// Interface unwraps the message reflection interface and +// returns the underlying ProtoMessage interface. +func (x *fastReflection_BlockMetadata_DeliverTx) Interface() protoreflect.ProtoMessage { + return (*BlockMetadata_DeliverTx)(x) +} + +// Range iterates over every populated field in an undefined order, +// calling f for each field descriptor and value encountered. +// Range returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current field descriptor. +func (x *fastReflection_BlockMetadata_DeliverTx) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if x.Request != nil { + value := protoreflect.ValueOfMessage(x.Request.ProtoReflect()) + if !f(fd_BlockMetadata_DeliverTx_request, value) { + return + } + } + if x.Response != nil { + value := protoreflect.ValueOfMessage(x.Response.ProtoReflect()) + if !f(fd_BlockMetadata_DeliverTx_response, value) { + return + } + } +} + +// Has reports whether a field is populated. +// +// Some fields have the property of nullability where it is possible to +// distinguish between the default value of a field and whether the field +// was explicitly populated with the default value. Singular message fields, +// member fields of a oneof, and proto2 scalar fields are nullable. Such +// fields are populated only if explicitly set. +// +// In other cases (aside from the nullable cases above), +// a proto3 scalar field is populated if it contains a non-zero value, and +// a repeated field is populated if it is non-empty. +func (x *fastReflection_BlockMetadata_DeliverTx) Has(fd protoreflect.FieldDescriptor) bool { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request": + return x.Request != nil + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response": + return x.Response != nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata.DeliverTx does not contain field %s", fd.FullName())) + } +} + +// Clear clears the field such that a subsequent Has call reports false. +// +// Clearing an extension field clears both the extension type and value +// associated with the given field number. +// +// Clear is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata_DeliverTx) Clear(fd protoreflect.FieldDescriptor) { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request": + x.Request = nil + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response": + x.Response = nil + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata.DeliverTx does not contain field %s", fd.FullName())) + } +} + +// Get retrieves the value for a field. +// +// For unpopulated scalars, it returns the default value, where +// the default value of a bytes scalar is guaranteed to be a copy. +// For unpopulated composite types, it returns an empty, read-only view +// of the value; to obtain a mutable reference, use Mutable. +func (x *fastReflection_BlockMetadata_DeliverTx) Get(descriptor protoreflect.FieldDescriptor) protoreflect.Value { + switch descriptor.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request": + value := x.Request + return protoreflect.ValueOfMessage(value.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response": + value := x.Response + return protoreflect.ValueOfMessage(value.ProtoReflect()) + default: + if descriptor.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata.DeliverTx does not contain field %s", descriptor.FullName())) + } +} + +// Set stores the value for a field. +// +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType. +// When setting a composite type, it is unspecified whether the stored value +// aliases the source's memory in any way. If the composite value is an +// empty, read-only value, then it panics. +// +// Set is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata_DeliverTx) Set(fd protoreflect.FieldDescriptor, value protoreflect.Value) { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request": + x.Request = value.Message().Interface().(*abci.RequestDeliverTx) + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response": + x.Response = value.Message().Interface().(*abci.ResponseDeliverTx) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata.DeliverTx does not contain field %s", fd.FullName())) + } +} + +// Mutable returns a mutable reference to a composite type. +// +// If the field is unpopulated, it may allocate a composite value. +// For a field belonging to a oneof, it implicitly clears any other field +// that may be currently set within the same oneof. +// For extension fields, it implicitly stores the provided ExtensionType +// if not already stored. +// It panics if the field does not contain a composite type. +// +// Mutable is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata_DeliverTx) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request": + if x.Request == nil { + x.Request = new(abci.RequestDeliverTx) + } + return protoreflect.ValueOfMessage(x.Request.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response": + if x.Response == nil { + x.Response = new(abci.ResponseDeliverTx) + } + return protoreflect.ValueOfMessage(x.Response.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata.DeliverTx does not contain field %s", fd.FullName())) + } +} + +// NewField returns a new value that is assignable to the field +// for the given descriptor. For scalars, this returns the default value. +// For lists, maps, and messages, this returns a new, empty, mutable value. +func (x *fastReflection_BlockMetadata_DeliverTx) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.FullName() { + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request": + m := new(abci.RequestDeliverTx) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + case "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response": + m := new(abci.ResponseDeliverTx) + return protoreflect.ValueOfMessage(m.ProtoReflect()) + default: + if fd.IsExtension() { + panic(fmt.Errorf("proto3 declared messages do not support extensions: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")) + } + panic(fmt.Errorf("message cosmos.base.store.v1beta1.BlockMetadata.DeliverTx does not contain field %s", fd.FullName())) + } +} + +// WhichOneof reports which field within the oneof is populated, +// returning nil if none are populated. +// It panics if the oneof descriptor does not belong to this message. +func (x *fastReflection_BlockMetadata_DeliverTx) WhichOneof(d protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + switch d.FullName() { + default: + panic(fmt.Errorf("%s is not a oneof field in cosmos.base.store.v1beta1.BlockMetadata.DeliverTx", d.FullName())) + } + panic("unreachable") +} + +// GetUnknown retrieves the entire list of unknown fields. +// The caller may only mutate the contents of the RawFields +// if the mutated bytes are stored back into the message with SetUnknown. +func (x *fastReflection_BlockMetadata_DeliverTx) GetUnknown() protoreflect.RawFields { + return x.unknownFields +} + +// SetUnknown stores an entire list of unknown fields. +// The raw fields must be syntactically valid according to the wire format. +// An implementation may panic if this is not the case. +// Once stored, the caller must not mutate the content of the RawFields. +// An empty RawFields may be passed to clear the fields. +// +// SetUnknown is a mutating operation and unsafe for concurrent use. +func (x *fastReflection_BlockMetadata_DeliverTx) SetUnknown(fields protoreflect.RawFields) { + x.unknownFields = fields +} + +// IsValid reports whether the message is valid. +// +// An invalid message is an empty, read-only value. +// +// An invalid message often corresponds to a nil pointer of the concrete +// message type, but the details are implementation dependent. +// Validity is not part of the protobuf data model, and may not +// be preserved in marshaling or other operations. +func (x *fastReflection_BlockMetadata_DeliverTx) IsValid() bool { + return x != nil +} + +// ProtoMethods returns optional fastReflectionFeature-path implementations of various operations. +// This method may return nil. +// +// The returned methods type is identical to +// "google.golang.org/protobuf/runtime/protoiface".Methods. +// Consult the protoiface package documentation for details. +func (x *fastReflection_BlockMetadata_DeliverTx) ProtoMethods() *protoiface.Methods { + size := func(input protoiface.SizeInput) protoiface.SizeOutput { + x := input.Message.Interface().(*BlockMetadata_DeliverTx) + if x == nil { + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: 0, + } + } + options := runtime.SizeInputToOptions(input) + _ = options + var n int + var l int + _ = l + if x.Request != nil { + l = options.Size(x.Request) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.Response != nil { + l = options.Size(x.Response) + n += 1 + l + runtime.Sov(uint64(l)) + } + if x.unknownFields != nil { + n += len(x.unknownFields) + } + return protoiface.SizeOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Size: n, + } + } + + marshal := func(input protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + x := input.Message.Interface().(*BlockMetadata_DeliverTx) + if x == nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + options := runtime.MarshalInputToOptions(input) + _ = options + size := options.Size(x) + dAtA := make([]byte, size) + i := len(dAtA) + _ = i + var l int + _ = l + if x.unknownFields != nil { + i -= len(x.unknownFields) + copy(dAtA[i:], x.unknownFields) + } + if x.Response != nil { + encoded, err := options.Marshal(x.Response) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0x12 + } + if x.Request != nil { + encoded, err := options.Marshal(x.Request) + if err != nil { + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = runtime.EncodeVarint(dAtA, i, uint64(len(encoded))) + i-- + dAtA[i] = 0xa + } + if input.Buf != nil { + input.Buf = append(input.Buf, dAtA...) + } else { + input.Buf = dAtA + } + return protoiface.MarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Buf: input.Buf, + }, nil + } + unmarshal := func(input protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + x := input.Message.Interface().(*BlockMetadata_DeliverTx) + if x == nil { + return protoiface.UnmarshalOutput{ + NoUnkeyedLiterals: input.NoUnkeyedLiterals, + Flags: input.Flags, + }, nil + } + options := runtime.UnmarshalInputToOptions(input) + _ = options + dAtA := input.Buf + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: BlockMetadata_DeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: BlockMetadata_DeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Request == nil { + x.Request = &abci.RequestDeliverTx{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Request); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrIntOverflow + } + if iNdEx >= l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if postIndex > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if x.Response == nil { + x.Response = &abci.ResponseDeliverTx{} + } + if err := options.Unmarshal(dAtA[iNdEx:postIndex], x.Response); err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := runtime.Skip(dAtA[iNdEx:]) + if err != nil { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, runtime.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + if !options.DiscardUnknown { + x.unknownFields = append(x.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + } + iNdEx += skippy + } + } + + if iNdEx > l { + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, io.ErrUnexpectedEOF + } + return protoiface.UnmarshalOutput{NoUnkeyedLiterals: input.NoUnkeyedLiterals, Flags: input.Flags}, nil + } + return &protoiface.Methods{ + NoUnkeyedLiterals: struct{}{}, + Flags: protoiface.SupportMarshalDeterministic | protoiface.SupportUnmarshalDiscardUnknown, + Size: size, + Marshal: marshal, + Unmarshal: unmarshal, + Merge: nil, + CheckInitialized: nil, + } +} + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.0 +// protoc (unknown) +// source: cosmos/base/store/v1beta1/listening.proto + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes) +// It optionally includes the StoreKey for the originating KVStore and a Boolean flag to distinguish between Sets and +// Deletes +// +// Since: cosmos-sdk 0.43 +type StoreKVPair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StoreKey string `protobuf:"bytes,1,opt,name=store_key,json=storeKey,proto3" json:"store_key,omitempty"` // the store key for the KVStore this pair originates from + Delete bool `protobuf:"varint,2,opt,name=delete,proto3" json:"delete,omitempty"` // true indicates a delete operation, false indicates a set operation + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *StoreKVPair) Reset() { + *x = StoreKVPair{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_base_store_v1beta1_listening_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreKVPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreKVPair) ProtoMessage() {} + +// Deprecated: Use StoreKVPair.ProtoReflect.Descriptor instead. +func (*StoreKVPair) Descriptor() ([]byte, []int) { + return file_cosmos_base_store_v1beta1_listening_proto_rawDescGZIP(), []int{0} +} + +func (x *StoreKVPair) GetStoreKey() string { + if x != nil { + return x.StoreKey + } + return "" +} + +func (x *StoreKVPair) GetDelete() bool { + if x != nil { + return x.Delete + } + return false +} + +func (x *StoreKVPair) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *StoreKVPair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// BlockMetadata contains all the abci event data of a block +// the file streamer dump them into files together with the state changes. +type BlockMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RequestBeginBlock *abci.RequestBeginBlock `protobuf:"bytes,1,opt,name=request_begin_block,json=requestBeginBlock,proto3" json:"request_begin_block,omitempty"` + ResponseBeginBlock *abci.ResponseBeginBlock `protobuf:"bytes,2,opt,name=response_begin_block,json=responseBeginBlock,proto3" json:"response_begin_block,omitempty"` + DeliverTxs []*BlockMetadata_DeliverTx `protobuf:"bytes,3,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + RequestEndBlock *abci.RequestEndBlock `protobuf:"bytes,4,opt,name=request_end_block,json=requestEndBlock,proto3" json:"request_end_block,omitempty"` + ResponseEndBlock *abci.ResponseEndBlock `protobuf:"bytes,5,opt,name=response_end_block,json=responseEndBlock,proto3" json:"response_end_block,omitempty"` + ResponseCommit *abci.ResponseCommit `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"` +} + +func (x *BlockMetadata) Reset() { + *x = BlockMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_base_store_v1beta1_listening_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockMetadata) ProtoMessage() {} + +// Deprecated: Use BlockMetadata.ProtoReflect.Descriptor instead. +func (*BlockMetadata) Descriptor() ([]byte, []int) { + return file_cosmos_base_store_v1beta1_listening_proto_rawDescGZIP(), []int{1} +} + +func (x *BlockMetadata) GetRequestBeginBlock() *abci.RequestBeginBlock { + if x != nil { + return x.RequestBeginBlock + } + return nil +} + +func (x *BlockMetadata) GetResponseBeginBlock() *abci.ResponseBeginBlock { + if x != nil { + return x.ResponseBeginBlock + } + return nil +} + +func (x *BlockMetadata) GetDeliverTxs() []*BlockMetadata_DeliverTx { + if x != nil { + return x.DeliverTxs + } + return nil +} + +func (x *BlockMetadata) GetRequestEndBlock() *abci.RequestEndBlock { + if x != nil { + return x.RequestEndBlock + } + return nil +} + +func (x *BlockMetadata) GetResponseEndBlock() *abci.ResponseEndBlock { + if x != nil { + return x.ResponseEndBlock + } + return nil +} + +func (x *BlockMetadata) GetResponseCommit() *abci.ResponseCommit { + if x != nil { + return x.ResponseCommit + } + return nil +} + +// DeliverTx encapulate deliver tx request and response. +type BlockMetadata_DeliverTx struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Request *abci.RequestDeliverTx `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *abci.ResponseDeliverTx `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *BlockMetadata_DeliverTx) Reset() { + *x = BlockMetadata_DeliverTx{} + if protoimpl.UnsafeEnabled { + mi := &file_cosmos_base_store_v1beta1_listening_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockMetadata_DeliverTx) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockMetadata_DeliverTx) ProtoMessage() {} + +// Deprecated: Use BlockMetadata_DeliverTx.ProtoReflect.Descriptor instead. +func (*BlockMetadata_DeliverTx) Descriptor() ([]byte, []int) { + return file_cosmos_base_store_v1beta1_listening_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *BlockMetadata_DeliverTx) GetRequest() *abci.RequestDeliverTx { + if x != nil { + return x.Request + } + return nil +} + +func (x *BlockMetadata_DeliverTx) GetResponse() *abci.ResponseDeliverTx { + if x != nil { + return x.Response + } + return nil +} + +var File_cosmos_base_store_v1beta1_listening_proto protoreflect.FileDescriptor + +var file_cosmos_base_store_v1beta1_listening_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x6c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x73, + 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1b, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x74, 0x2f, 0x61, 0x62, 0x63, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x56, 0x50, 0x61, + 0x69, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x83, 0x05, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x52, 0x0a, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x61, 0x62, 0x63, 0x69, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x11, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x55, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, + 0x2e, 0x61, 0x62, 0x63, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x65, + 0x67, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x0b, + 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x78, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x69, + 0x76, 0x65, 0x72, 0x54, 0x78, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x54, 0x78, + 0x73, 0x12, 0x4c, 0x0a, 0x11, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x6e, 0x64, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, + 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x61, 0x62, 0x63, 0x69, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, + 0x4f, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x61, 0x62, 0x63, 0x69, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x10, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x48, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x61, 0x62, 0x63, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x1a, 0x88, 0x01, 0x0a, 0x09, 0x44, + 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x54, 0x78, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x61, 0x62, 0x63, 0x69, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x54, 0x78, 0x52, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x74, 0x2e, 0x61, 0x62, 0x63, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x54, 0x78, 0x52, 0x08, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xef, 0x01, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, + 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, + 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x73, 0x64, 0x6b, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x73, 0x6d, + 0x6f, 0x73, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, 0x62, 0x65, 0x74, + 0x61, 0x31, 0xa2, 0x02, 0x03, 0x43, 0x42, 0x53, 0xaa, 0x02, 0x19, 0x43, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x19, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x42, 0x61, + 0x73, 0x65, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, + 0xe2, 0x02, 0x25, 0x43, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x5c, 0x42, 0x61, 0x73, 0x65, 0x5c, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x43, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x3a, 0x3a, 0x42, 0x61, 0x73, 0x65, 0x3a, 0x3a, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, + 0x56, 0x31, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cosmos_base_store_v1beta1_listening_proto_rawDescOnce sync.Once + file_cosmos_base_store_v1beta1_listening_proto_rawDescData = file_cosmos_base_store_v1beta1_listening_proto_rawDesc +) + +func file_cosmos_base_store_v1beta1_listening_proto_rawDescGZIP() []byte { + file_cosmos_base_store_v1beta1_listening_proto_rawDescOnce.Do(func() { + file_cosmos_base_store_v1beta1_listening_proto_rawDescData = protoimpl.X.CompressGZIP(file_cosmos_base_store_v1beta1_listening_proto_rawDescData) + }) + return file_cosmos_base_store_v1beta1_listening_proto_rawDescData +} + +var file_cosmos_base_store_v1beta1_listening_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_cosmos_base_store_v1beta1_listening_proto_goTypes = []interface{}{ + (*StoreKVPair)(nil), // 0: cosmos.base.store.v1beta1.StoreKVPair + (*BlockMetadata)(nil), // 1: cosmos.base.store.v1beta1.BlockMetadata + (*BlockMetadata_DeliverTx)(nil), // 2: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx + (*abci.RequestBeginBlock)(nil), // 3: tendermint.abci.RequestBeginBlock + (*abci.ResponseBeginBlock)(nil), // 4: tendermint.abci.ResponseBeginBlock + (*abci.RequestEndBlock)(nil), // 5: tendermint.abci.RequestEndBlock + (*abci.ResponseEndBlock)(nil), // 6: tendermint.abci.ResponseEndBlock + (*abci.ResponseCommit)(nil), // 7: tendermint.abci.ResponseCommit + (*abci.RequestDeliverTx)(nil), // 8: tendermint.abci.RequestDeliverTx + (*abci.ResponseDeliverTx)(nil), // 9: tendermint.abci.ResponseDeliverTx +} +var file_cosmos_base_store_v1beta1_listening_proto_depIdxs = []int32{ + 3, // 0: cosmos.base.store.v1beta1.BlockMetadata.request_begin_block:type_name -> tendermint.abci.RequestBeginBlock + 4, // 1: cosmos.base.store.v1beta1.BlockMetadata.response_begin_block:type_name -> tendermint.abci.ResponseBeginBlock + 2, // 2: cosmos.base.store.v1beta1.BlockMetadata.deliver_txs:type_name -> cosmos.base.store.v1beta1.BlockMetadata.DeliverTx + 5, // 3: cosmos.base.store.v1beta1.BlockMetadata.request_end_block:type_name -> tendermint.abci.RequestEndBlock + 6, // 4: cosmos.base.store.v1beta1.BlockMetadata.response_end_block:type_name -> tendermint.abci.ResponseEndBlock + 7, // 5: cosmos.base.store.v1beta1.BlockMetadata.response_commit:type_name -> tendermint.abci.ResponseCommit + 8, // 6: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.request:type_name -> tendermint.abci.RequestDeliverTx + 9, // 7: cosmos.base.store.v1beta1.BlockMetadata.DeliverTx.response:type_name -> tendermint.abci.ResponseDeliverTx + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_cosmos_base_store_v1beta1_listening_proto_init() } +func file_cosmos_base_store_v1beta1_listening_proto_init() { + if File_cosmos_base_store_v1beta1_listening_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cosmos_base_store_v1beta1_listening_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreKVPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_base_store_v1beta1_listening_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cosmos_base_store_v1beta1_listening_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockMetadata_DeliverTx); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cosmos_base_store_v1beta1_listening_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cosmos_base_store_v1beta1_listening_proto_goTypes, + DependencyIndexes: file_cosmos_base_store_v1beta1_listening_proto_depIdxs, + MessageInfos: file_cosmos_base_store_v1beta1_listening_proto_msgTypes, + }.Build() + File_cosmos_base_store_v1beta1_listening_proto = out.File + file_cosmos_base_store_v1beta1_listening_proto_rawDesc = nil + file_cosmos_base_store_v1beta1_listening_proto_goTypes = nil + file_cosmos_base_store_v1beta1_listening_proto_depIdxs = nil +} diff --git a/baseapp/abci.go b/baseapp/abci.go index 74a5b04f51b6..028a0d1caf6e 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -189,7 +189,7 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // call the hooks with the BeginBlock messages for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + panic(fmt.Errorf("BeginBlock listening hook failed, height: %d, err: %w", req.Header.Height, err)) } } @@ -214,7 +214,7 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // call the streaming service hooks with the EndBlock messages for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + panic(fmt.Errorf("EndBlock listening hook failed, height: %d, err: %w", req.Height, err)) } } @@ -268,7 +268,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliv defer func() { for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) + panic(fmt.Errorf("DeliverTx listening hook failed: %w", err)) } } }() @@ -302,7 +302,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliv // defined in config, Commit will execute a deferred function call to check // against that height and gracefully halt if it matches the latest committed // height. -func (app *BaseApp) Commit() (res abci.ResponseCommit) { +func (app *BaseApp) Commit() abci.ResponseCommit { header := app.deliverState.ctx.BlockHeader() retainHeight := app.GetBlockRetentionHeight(header.Height) @@ -311,6 +311,19 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { // MultiStore (app.cms) so when Commit() is called is persists those values. app.deliverState.ms.Write() commitID := app.cms.Commit() + + res := abci.ResponseCommit{ + Data: commitID.Hash, + RetainHeight: retainHeight, + } + + // call the hooks with the Commit message + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenCommit(app.deliverState.ctx, res); err != nil { + panic(fmt.Errorf("Commit listening hook failed, height: %d, err: %w", header.Height, err)) + } + } + app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID)) // Reset the Check state to the latest committed. @@ -342,10 +355,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { go app.snapshotManager.SnapshotIfApplicable(header.Height) - return abci.ResponseCommit{ - Data: commitID.Hash, - RetainHeight: retainHeight, - } + return res } // halt attempts to gracefully shutdown the node via SIGINT and SIGTERM falling diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 39e0f1ca6e9b..4d70107d6c38 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -10,14 +10,22 @@ import ( "github.com/cosmos/cosmos-sdk/types" ) -// ABCIListener interface used to hook into the ABCI message processing of the BaseApp +// ABCIListener interface used to hook into the ABCI message processing of the BaseApp. +// the error results are propagated to consensus state machine, +// if you don't want to affect consensus, handle the errors internally and always return `nil` in these APIs. type ABCIListener interface { // ListenBeginBlock updates the streaming service with the latest BeginBlock messages ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error // ListenEndBlock updates the steaming service with the latest EndBlock messages ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages +<<<<<<< HEAD ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error +======= + ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // ListenCommit updates the steaming service with the latest Commit event + ListenCommit(ctx context.Context, res abci.ResponseCommit) error +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index b9c209b4aa32..e2b8c23bed1f 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -3,6 +3,11 @@ ## Changelog * 11/23/2020: Initial draft +* 10/14/2022: + * Add `ListenCommit`, flatten the state writes in a block to a single batch. + * Remove listeners from cache stores, should only listen to `rootmulti.Store`. + * Remove `HaltAppOnDeliveryError()`, the errors are propogated by default, the implementations should return nil if don't want to propogate errors. + ## Status @@ -20,7 +25,7 @@ In addition to these request/response queries, it would be beneficial to have a ## Decision -We will modify the `MultiStore` interface and its concrete (`rootmulti` and `cachemulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores. +We will modify the `CommitMultiStore` interface and its concrete (`rootmulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores. We don't need to listen to cache stores, because we can't be sure that the writes will be committed eventually, and the writes are duplicated in `rootmulti.Store` eventually, so we should only listen to `rootmulti.Store`. We will introduce a plugin system for configuring and running streaming services that write these state changes and their surrounding ABCI message context to different destinations. ### Listening interface @@ -39,8 +44,8 @@ type WriteListener interface { ### Listener type -We will create a concrete implementation of the `WriteListener` interface in `store/types/listening.go`, that writes out protobuf -encoded KV pairs to an underlying `io.Writer`. +We will create two concrete implementations of the `WriteListener` interface in `store/types/listening.go`, that writes out protobuf +encoded KV pairs to an underlying `io.Writer`, and simply accumulate them in memory. This will include defining a simple protobuf type for the KV pairs. In addition to the key and value fields this message will include the StoreKey for the originating KVStore so that we can write out from separate KVStores to the same stream/file @@ -89,6 +94,42 @@ func (wl *StoreKVPairWriteListener) OnWrite(storeKey types.StoreKey, key []byte, } ``` +```golang +// MemoryListener listens to the state writes and accumulate the records in memory. +type MemoryListener struct { + key StoreKey + stateCache []StoreKVPair +} + +// NewMemoryListener creates a listener that accumulate the state writes in memory. +func NewMemoryListener(key StoreKey) *MemoryListener { + return &MemoryListener{key: key} +} + +// OnWrite implements WriteListener interface +func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error { + fl.stateCache = append(fl.stateCache, StoreKVPair{ + StoreKey: storeKey.Name(), + Delete: delete, + Key: key, + Value: value, + }) + return nil +} + +// PopStateCache returns the current state caches and set to nil +func (fl *MemoryListener) PopStateCache() []StoreKVPair { + res := fl.stateCache + fl.stateCache = nil + return res +} + +// StoreKey returns the storeKey it listens to +func (fl *MemoryListener) StoreKey() StoreKey { + return fl.key +} +``` + ### ListenKVStore We will create a new `Store` type `listenkv.Store` that the `MultiStore` wraps around a `KVStore` to enable state listening. @@ -137,12 +178,16 @@ func (s *Store) onWrite(delete bool, key, value []byte) { ### MultiStore interface updates -We will update the `MultiStore` interface to allow us to wrap a set of listeners around a specific `KVStore`. -Additionally, we will update the `CacheWrap` and `CacheWrapper` interfaces to enable listening in the caching layer. +We will update the `CommitMultiStore` interface to allow us to wrap a set of listeners around a specific `KVStore`. ```go +<<<<<<< HEAD type MultiStore interface { ... +======= +type CommitMultiStore interface { + ... +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey ListeningEnabled(key StoreKey) bool @@ -153,6 +198,7 @@ type MultiStore interface { } ``` +<<<<<<< HEAD ```go type CacheWrap interface { ... @@ -169,9 +215,11 @@ type CacheWrapper interface { } ``` +======= +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) ### MultiStore implementation updates -We will modify all of the `Store` and `MultiStore` implementations to satisfy these new interfaces, and adjust the `rootmulti` `GetKVStore` method +We will modify all of the `CommitMultiStore` implementations to satisfy these new interfaces, and adjust the `rootmulti` `GetKVStore` method to wrap the returned `KVStore` with a `listenkv.Store` if listening is turned on for that `Store`. ```go @@ -189,16 +237,27 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { } ``` -We will also adjust the `cachemulti` constructor methods and the `rootmulti` `CacheMultiStore` method to forward the listeners -to and enable listening in the cache layer. +We will also adjust the `rootmulti` `CacheMultiStore` method to wrap the stores with `listenkv.Store` to enable listening when the cache layer writes. ```go func (rs *Store) CacheMultiStore() types.CacheMultiStore { stores := make(map[types.StoreKey]types.CacheWrapper) for k, v := range rs.stores { +<<<<<<< HEAD stores[k] = v } return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners) +======= + store := v.(types.KVStore) + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(store, k, rs.listeners[k]) + } + stores[k] = store + } + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } ``` @@ -208,14 +267,14 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore { We will introduce a new `StreamingService` interface for exposing `WriteListener` data streams to external consumers. In addition to streaming state changes as `StoreKVPair`s, the interface satisfies an `ABCIListener` interface that plugs -into the BaseApp and relays ABCI requests and responses so that the service can group the state changes with the ABCI -requests that affected them and the ABCI responses they affected. The `ABCIListener` interface also exposes a -`ListenSuccess` method which is (optionally) used by the `BaseApp` to await positive acknowledgement of message -receipt from the `StreamingService`. +into the BaseApp and relays ABCI requests and responses so that the service can observe those block metadatas as well. + +The `WriteListener`s of `StreamingService` listens to the `rootmulti.Store`, which is only written into at commit event by the cache store of `deliverState`. ```go // ABCIListener interface used to hook into the ABCI message processing of the BaseApp type ABCIListener interface { +<<<<<<< HEAD // ListenBeginBlock updates the streaming service with the latest BeginBlock messages ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error // ListenEndBlock updates the steaming service with the latest EndBlock messages @@ -225,6 +284,17 @@ type ABCIListener interface { // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt ListenSuccess() <-chan bool +======= + // ListenBeginBlock updates the streaming service with the latest BeginBlock messages + ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + // ListenEndBlock updates the steaming service with the latest EndBlock messages + ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + // ListenDeliverTx updates the steaming service with the latest DeliverTx messages + ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // ListenCommit updates the steaming service with the latest Commit message, + // All the state writes of current block should have notified before this message. + ListenCommit(ctx types.Context, res abci.ResponseCommit) error +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks @@ -274,10 +344,21 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg ... +<<<<<<< HEAD // Call the streaming service hooks with the BeginBlock messages for _, listener := range app.abciListeners { listener.ListenBeginBlock(app.deliverState.ctx, req, res) } +======= + defer func() { + // call the hooks with the BeginBlock messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + panic(sdkerrors.Wrapf(err, "BeginBlock listening hook failed, height: %d", req.Header.Height)) + } + } + }() +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) return res } @@ -288,16 +369,28 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc ... +<<<<<<< HEAD // Call the streaming service hooks with the EndBlock messages for _, listener := range app.abciListeners { listener.ListenEndBlock(app.deliverState.ctx, req, res) } +======= + defer func() { + // Call the streaming service hooks with the EndBlock messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + panic(sdkerrors.Wrapf(err, "EndBlock listening hook failed, height: %d", req.Height)) + } + } + }() +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) return res } ``` ```go +<<<<<<< HEAD func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { ... @@ -360,11 +453,21 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { if success == false { halt = true break +======= +func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliverTx) { + + defer func() { + // call the hooks with the DeliverTx messages + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { + panic(sdkerrors.Wrap(err, "DeliverTx listening hook failed")) +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } case <- maxWait.C: halt = true break } +<<<<<<< HEAD } if halt { @@ -374,12 +477,62 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { // reset or moved to a more distant value. app.halt() } +======= + }() +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) ... } ``` +```golang +func (app *BaseApp) Commit() abci.ResponseCommit { + header := app.deliverState.ctx.BlockHeader() + retainHeight := app.GetBlockRetentionHeight(header.Height) + + // Write the DeliverTx state into branched storage and commit the MultiStore. + // The write to the DeliverTx state writes all state transitions to the root + // MultiStore (app.cms) so when Commit() is called is persists those values. + app.deliverState.ms.Write() + commitID := app.cms.Commit() + + res := abci.ResponseCommit{ + Data: commitID.Hash, + RetainHeight: retainHeight, + } + + // call the hooks with the Commit message + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenCommit(app.deliverState.ctx, res); err != nil { + panic(sdkerrors.Wrapf(err, "Commit listening hook failed, height: %d", header.Height)) + } + } + + app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID)) + ... +} +``` + +#### Error Handling And Async Consumers + +`ABCIListener`s are called synchronously inside the consensus state machine, the returned error causes panic which in turn halt the consensus state machine. The implementer should be careful not to break consensus unexpectedly or slow down it too much. + +For some async use cases, one can spawn a go-routine internanlly to avoid slow down consensus state machine, and handle the errors internally and always returns `nil` to avoid halting consensus state machine on error. + +Furthermore, for most of the cases, we only need to use the builtin file streamer to listen to state changes directly inside cosmos-sdk, the other consumers should subscribe to the file streamer output externally. + +#### File Streamer + +We provide a minimal filesystem based implementation inside cosmos-sdk, and provides options to write output files reliably, the output files can be further consumed by external consumers, so most of the state listeners actually don't need to live inside the sdk and node, which improves the node robustness and simplify sdk internals. + +The file streamer can be wired in app like this: +```golang +exposeStoreKeys := ... // decide the key list to listen +service, err := file.NewStreamingService(streamingDir, "", exposeStoreKeys, appCodec, logger) +bApp.SetStreamingService(service) +``` + #### Plugin system We propose a plugin architecture to load and run `StreamingService` implementations. We will introduce a plugin @@ -535,7 +688,7 @@ These changes will provide a means of subscribing to KVStore state changes in re ### Backwards Compatibility -* This ADR changes the `MultiStore`, `CacheWrap`, and `CacheWrapper` interfaces, implementations supporting the previous version of these interfaces will not support the new ones +* This ADR changes the `CommitMultiStore` interface, implementations supporting the previous version of these interfaces will not support the new ones ### Positive @@ -543,7 +696,7 @@ These changes will provide a means of subscribing to KVStore state changes in re ### Negative -* Changes `MultiStore`, `CacheWrap`, and `CacheWrapper` interfaces +* Changes `CommitMultiStore`interface ### Neutral diff --git a/proto/cosmos/base/store/v1beta1/listening.proto b/proto/cosmos/base/store/v1beta1/listening.proto index 359997109c10..753f7c165512 100644 --- a/proto/cosmos/base/store/v1beta1/listening.proto +++ b/proto/cosmos/base/store/v1beta1/listening.proto @@ -1,6 +1,8 @@ syntax = "proto3"; package cosmos.base.store.v1beta1; +import "tendermint/abci/types.proto"; + option go_package = "github.com/cosmos/cosmos-sdk/store/types"; // StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes) @@ -14,3 +16,19 @@ message StoreKVPair { bytes key = 3; bytes value = 4; } + +// BlockMetadata contains all the abci event data of a block +// the file streamer dump them into files together with the state changes. +message BlockMetadata { + // DeliverTx encapulate deliver tx request and response. + message DeliverTx { + tendermint.abci.RequestDeliverTx request = 1; + tendermint.abci.ResponseDeliverTx response = 2; + } + tendermint.abci.RequestBeginBlock request_begin_block = 1; + tendermint.abci.ResponseBeginBlock response_begin_block = 2; + repeated DeliverTx deliver_txs = 3; + tendermint.abci.RequestEndBlock request_end_block = 4; + tendermint.abci.ResponseEndBlock response_end_block = 5; + tendermint.abci.ResponseCommit response_commit = 6; +} diff --git a/server/config/config.go b/server/config/config.go index e34de76614d3..75b25df63ed5 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -218,6 +218,15 @@ type ( Keys []string `mapstructure:"keys"` WriteDir string `mapstructure:"write_dir"` Prefix string `mapstructure:"prefix"` + // OutputMetadata specifies if output the block metadata file which includes + // the abci requests/responses, otherwise only the data file is outputted. + OutputMetadata bool `mapstructure:"output-metadata"` + // StopNodeOnError specifies if propagate the streamer errors to the consensus + // state machine, it's nesserary for data integrity of output. + StopNodeOnError bool `mapstructure:"stop-node-on-error"` + // Fsync specifies if calling fsync after writing the files, it slows down + // the commit, but don't lose data in face of system crash. + Fsync bool `mapstructure:"fsync"` } ) @@ -320,7 +329,13 @@ func DefaultConfig() *Config { }, Streamers: StreamersConfig{ File: FileStreamerConfig{ - Keys: []string{"*"}, + Keys: []string{"*"}, + WriteDir: "data/file_streamer", + OutputMetadata: true, + StopNodeOnError: true, + // NOTICE: the default config don't protect the streamer data integrity + // in face of system crash. + Fsync: false, }, }, } diff --git a/server/config/toml.go b/server/config/toml.go index e69f3845222b..2d46af2a6ed2 100644 --- a/server/config/toml.go +++ b/server/config/toml.go @@ -248,6 +248,24 @@ streamers = [{{ range .Store.Streamers }}{{ printf "%q, " . }}{{end}}] keys = [{{ range .Streamers.File.Keys }}{{ printf "%q, " . }}{{end}}] write_dir = "{{ .Streamers.File.WriteDir }}" prefix = "{{ .Streamers.File.Prefix }}" +<<<<<<< HEAD +======= +# output-metadata specifies if output the metadata file which includes the abci request/responses +# during processing the block. +output-metadata = "{{ .Streamers.File.OutputMetadata }}" +# stop-node-on-error specifies if propagate the file streamer errors to consensus state machine. +stop-node-on-error = "{{ .Streamers.File.StopNodeOnError }}" +# fsync specifies if call fsync after writing the files. +fsync = "{{ .Streamers.File.Fsync }}" + +############################################################################### +### Mempool ### +############################################################################### + +[mempool] +max-txs = "{{ .Mempool.MaxTxs }}" + +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) ` var configTemplate *template.Template diff --git a/simapp/app.go b/simapp/app.go index 5261b552827c..ada946b8bcf0 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -230,7 +230,7 @@ func NewSimApp( memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, "testingkey") // load state streaming if enabled - if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, keys); err != nil { + if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, logger, keys); err != nil { fmt.Printf("failed to load state streaming: %s", err) os.Exit(1) } diff --git a/simapp/app_v2.go b/simapp/app_v2.go new file mode 100644 index 000000000000..9dcba3bb85e2 --- /dev/null +++ b/simapp/app_v2.go @@ -0,0 +1,440 @@ +//go:build !app_v1 + +package simapp + +import ( + _ "embed" + "fmt" + "io" + "os" + "path/filepath" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + dbm "github.com/tendermint/tm-db" + + "cosmossdk.io/depinject" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/server/api" + "github.com/cosmos/cosmos-sdk/server/config" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/streaming" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/testutil/testdata_pulsar" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/x/auth" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" + _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" // import for side-effects + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/auth/vesting" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + "github.com/cosmos/cosmos-sdk/x/authz" + authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" + authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module" + "github.com/cosmos/cosmos-sdk/x/bank" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/capability" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + consensus "github.com/cosmos/cosmos-sdk/x/consensus" + consensuskeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" + consensustypes "github.com/cosmos/cosmos-sdk/x/consensus/types" + "github.com/cosmos/cosmos-sdk/x/crisis" + crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + distr "github.com/cosmos/cosmos-sdk/x/distribution" + distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/cosmos/cosmos-sdk/x/evidence" + evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + "github.com/cosmos/cosmos-sdk/x/feegrant" + feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module" + "github.com/cosmos/cosmos-sdk/x/genutil" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + "github.com/cosmos/cosmos-sdk/x/gov" + govclient "github.com/cosmos/cosmos-sdk/x/gov/client" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/cosmos/cosmos-sdk/x/group" + groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" + groupmodule "github.com/cosmos/cosmos-sdk/x/group/module" + "github.com/cosmos/cosmos-sdk/x/mint" + mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + "github.com/cosmos/cosmos-sdk/x/nft" + nftkeeper "github.com/cosmos/cosmos-sdk/x/nft/keeper" + nftmodule "github.com/cosmos/cosmos-sdk/x/nft/module" + "github.com/cosmos/cosmos-sdk/x/params" + paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" + paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/cosmos/cosmos-sdk/x/slashing" + slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/cosmos/cosmos-sdk/x/staking" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/cosmos/cosmos-sdk/x/upgrade" + upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" + upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +var ( + // DefaultNodeHome default home directories for the application daemon + DefaultNodeHome string + + // ModuleBasics defines the module BasicManager is in charge of setting up basic, + // non-dependant module elements, such as codec registration + // and genesis verification. + ModuleBasics = module.NewBasicManager( + auth.AppModuleBasic{}, + genutil.NewAppModuleBasic(genutiltypes.DefaultMessageValidator), + bank.AppModuleBasic{}, + capability.AppModuleBasic{}, + staking.AppModuleBasic{}, + mint.AppModuleBasic{}, + distr.AppModuleBasic{}, + gov.NewAppModuleBasic( + []govclient.ProposalHandler{ + paramsclient.ProposalHandler, + upgradeclient.LegacyProposalHandler, + upgradeclient.LegacyCancelProposalHandler, + }, + ), + params.AppModuleBasic{}, + crisis.AppModuleBasic{}, + slashing.AppModuleBasic{}, + feegrantmodule.AppModuleBasic{}, + upgrade.AppModuleBasic{}, + evidence.AppModuleBasic{}, + authzmodule.AppModuleBasic{}, + groupmodule.AppModuleBasic{}, + vesting.AppModuleBasic{}, + nftmodule.AppModuleBasic{}, + consensus.AppModuleBasic{}, + ) +) + +var ( + _ runtime.AppI = (*SimApp)(nil) + _ servertypes.Application = (*SimApp)(nil) +) + +// SimApp extends an ABCI application, but with most of its parameters exported. +// They are exported for convenience in creating helper functions, as object +// capabilities aren't needed for testing. +type SimApp struct { + *runtime.App + legacyAmino *codec.LegacyAmino + appCodec codec.Codec + txConfig client.TxConfig + interfaceRegistry codectypes.InterfaceRegistry + + // keys to access the substores + keys map[string]*storetypes.KVStoreKey + + // keepers + AccountKeeper authkeeper.AccountKeeper + BankKeeper bankkeeper.Keeper + CapabilityKeeper *capabilitykeeper.Keeper + StakingKeeper *stakingkeeper.Keeper + SlashingKeeper slashingkeeper.Keeper + MintKeeper mintkeeper.Keeper + DistrKeeper distrkeeper.Keeper + GovKeeper *govkeeper.Keeper + CrisisKeeper *crisiskeeper.Keeper + UpgradeKeeper upgradekeeper.Keeper + ParamsKeeper paramskeeper.Keeper + AuthzKeeper authzkeeper.Keeper + EvidenceKeeper evidencekeeper.Keeper + FeeGrantKeeper feegrantkeeper.Keeper + GroupKeeper groupkeeper.Keeper + NFTKeeper nftkeeper.Keeper + ConsensusParamsKeeper consensuskeeper.Keeper + + // simulation manager + sm *module.SimulationManager +} + +func init() { + userHomeDir, err := os.UserHomeDir() + if err != nil { + panic(err) + } + + DefaultNodeHome = filepath.Join(userHomeDir, ".simapp") +} + +// NewSimApp returns a reference to an initialized SimApp. +func NewSimApp( + logger log.Logger, + db dbm.DB, + traceStore io.Writer, + loadLatest bool, + appOpts servertypes.AppOptions, + baseAppOptions ...func(*baseapp.BaseApp), +) *SimApp { + var ( + app = &SimApp{} + appBuilder *runtime.AppBuilder + // Below we could construct and set an application specific mempool and ABCI 1.0 Prepare and Process Proposal + // handlers. These defaults are already set in the SDK's BaseApp, this shows an example of how to override + // them. + // + //nonceMempool = mempool.NewNonceMempool() + //mempoolOpt = baseapp.SetMempool(nonceMempool) + //prepareOpt = func(app *baseapp.BaseApp) { + // app.SetPrepareProposal(app.DefaultPrepareProposal()) + //} + //processOpt = func(app *baseapp.BaseApp) { + // app.SetProcessProposal(app.DefaultProcessProposal()) + //} + // + // Further down we'd set the options in the AppBuilder like below. + //baseAppOptions = append(baseAppOptions, mempoolOpt, prepareOpt, processOpt) + + // merge the AppConfig and other configuration in one config + appConfig = depinject.Configs( + AppConfig, + depinject.Supply( + // supply the application options + appOpts, + + // ADVANCED CONFIGURATION + + // + // AUTH + // + // For providing a custom function required in auth to generate custom account types + // add it below. By default the auth module uses simulation.RandomGenesisAccounts. + // + // authtypes.RandomGenesisAccountsFn(simulation.RandomGenesisAccounts), + + // For providing a custom a base account type add it below. + // By default the auth module uses authtypes.ProtoBaseAccount(). + // + // func() authtypes.AccountI { return authtypes.ProtoBaseAccount() }, + + // + // MINT + // + + // For providing a custom inflation function for x/mint add here your + // custom function that implements the minttypes.InflationCalculationFn + // interface. + ), + ) + ) + + if err := depinject.Inject(appConfig, + &appBuilder, + &app.appCodec, + &app.legacyAmino, + &app.txConfig, + &app.interfaceRegistry, + &app.AccountKeeper, + &app.BankKeeper, + &app.CapabilityKeeper, + &app.StakingKeeper, + &app.SlashingKeeper, + &app.MintKeeper, + &app.DistrKeeper, + &app.GovKeeper, + &app.CrisisKeeper, + &app.UpgradeKeeper, + &app.ParamsKeeper, + &app.AuthzKeeper, + &app.EvidenceKeeper, + &app.FeeGrantKeeper, + &app.GroupKeeper, + &app.NFTKeeper, + &app.ConsensusParamsKeeper, + ); err != nil { + panic(err) + } + + app.App = appBuilder.Build(logger, db, traceStore, baseAppOptions...) + + // load state streaming if enabled + if _, _, err := streaming.LoadStreamingServices(app.App.BaseApp, appOpts, app.appCodec, logger, app.keys); err != nil { + fmt.Printf("failed to load state streaming: %s", err) + os.Exit(1) + } + + /**** Module Options ****/ + + // Sets the version setter for the upgrade module + app.UpgradeKeeper.SetVersionSetter(app.BaseApp) + + // NOTE: The genutils module must occur after staking so that pools are + // properly initialized with tokens from genesis accounts. + // NOTE: The genutils module must also occur after auth so that it can access the params from auth. + // NOTE: Capability module must occur first so that it can initialize any capabilities + // so that other modules that want to create or claim capabilities afterwards in InitChain + // can do so safely. + genesisModuleOrder := []string{ + capabilitytypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, + distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, govtypes.ModuleName, + minttypes.ModuleName, crisistypes.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, + feegrant.ModuleName, nft.ModuleName, group.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, + vestingtypes.ModuleName, consensustypes.ModuleName, + } + app.ModuleManager.SetOrderInitGenesis(genesisModuleOrder...) + app.ModuleManager.SetOrderExportGenesis(genesisModuleOrder...) + + // Uncomment if you want to set a custom migration order here. + // app.ModuleManager.SetOrderMigrations(custom order) + + app.ModuleManager.RegisterInvariants(app.CrisisKeeper) + + // RegisterUpgradeHandlers is used for registering any on-chain upgrades. + // Make sure it's called after `app.ModuleManager` and `app.configurator` are set. + app.RegisterUpgradeHandlers() + + // add test gRPC service for testing gRPC queries in isolation + testdata_pulsar.RegisterQueryServer(app.GRPCQueryRouter(), testdata_pulsar.QueryImpl{}) + + // create the simulation manager and define the order of the modules for deterministic simulations + // + // NOTE: this is not required apps that don't use the simulator for fuzz testing + // transactions + overrideModules := map[string]module.AppModuleSimulation{ + authtypes.ModuleName: auth.NewAppModule(app.appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts, app.GetSubspace(authtypes.ModuleName)), + } + app.sm = module.NewSimulationManagerFromAppModules(app.ModuleManager.Modules, overrideModules) + + app.sm.RegisterStoreDecoders() + + // initialize stores + app.MountKVStores(app.keys) + + // initialize BaseApp + app.SetInitChainer(app.InitChainer) + + if err := app.Load(loadLatest); err != nil { + panic(err) + } + + return app +} + +// Name returns the name of the App +func (app *SimApp) Name() string { return app.BaseApp.Name() } + +// InitChainer application update at chain initialization +func (app *SimApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { + app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap()) + return app.App.InitChainer(ctx, req) +} + +// LoadHeight loads a particular height +func (app *SimApp) LoadHeight(height int64) error { + return app.LoadVersion(height) +} + +// LegacyAmino returns SimApp's amino codec. +// +// NOTE: This is solely to be used for testing purposes as it may be desirable +// for modules to register their own custom testing types. +func (app *SimApp) LegacyAmino() *codec.LegacyAmino { + return app.legacyAmino +} + +// AppCodec returns SimApp's app codec. +// +// NOTE: This is solely to be used for testing purposes as it may be desirable +// for modules to register their own custom testing types. +func (app *SimApp) AppCodec() codec.Codec { + return app.appCodec +} + +// InterfaceRegistry returns SimApp's InterfaceRegistry +func (app *SimApp) InterfaceRegistry() codectypes.InterfaceRegistry { + return app.interfaceRegistry +} + +// TxConfig returns SimApp's TxConfig +func (app *SimApp) TxConfig() client.TxConfig { + return app.txConfig +} + +// GetKey returns the KVStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (app *SimApp) GetKey(storeKey string) *storetypes.KVStoreKey { + kvsk := app.keys[storeKey] + if kvsk != nil { + return kvsk + } + + sk := app.UnsafeFindStoreKey(storeKey) + kvStoreKey, ok := sk.(*storetypes.KVStoreKey) + if !ok { + return nil + } + return kvStoreKey +} + +// GetSubspace returns a param subspace for a given module name. +// +// NOTE: This is solely to be used for testing purposes. +func (app *SimApp) GetSubspace(moduleName string) paramstypes.Subspace { + subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) + return subspace +} + +// SimulationManager implements the SimulationApp interface +func (app *SimApp) SimulationManager() *module.SimulationManager { + return app.sm +} + +// RegisterAPIRoutes registers all application module routes with the provided +// API server. +func (app *SimApp) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) { + app.App.RegisterAPIRoutes(apiSvr, apiConfig) + // register swagger API from root so that other applications can override easily + if err := server.RegisterSwaggerAPI(apiSvr.ClientCtx, apiSvr.Router, apiConfig.Swagger); err != nil { + panic(err) + } +} + +// GetMaccPerms returns a copy of the module account permissions +// +// NOTE: This is solely to be used for testing purposes. +func GetMaccPerms() map[string][]string { + dup := make(map[string][]string) + for _, perms := range moduleAccPerms { + dup[perms.Account] = perms.Permissions + } + + return dup +} + +// BlockedAddresses returns all the app's blocked account addresses. +func BlockedAddresses() map[string]bool { + result := make(map[string]bool) + + if len(blockAccAddrs) > 0 { + for _, addr := range blockAccAddrs { + result[addr] = true + } + } else { + for addr := range GetMaccPerms() { + result[addr] = true + } + } + + return result +} diff --git a/store/cachekv/store.go b/store/cachekv/store.go index a70becf13587..0ebc52268548 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -9,7 +9,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/internal/conv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/types/kv" @@ -158,11 +157,6 @@ func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types return NewStore(tracekv.NewStore(store, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (store *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return NewStore(listenkv.NewStore(store, storeKey, listeners)) -} - //---------------------------------------- // Iteration diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go index deb1d46272dd..77e80e8d6900 100644 --- a/store/cachemulti/store.go +++ b/store/cachemulti/store.go @@ -8,7 +8,6 @@ import ( "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -31,8 +30,6 @@ type Store struct { traceWriter io.Writer traceContext types.TraceContext - - listeners map[types.StoreKey][]types.WriteListener } var _ types.CacheMultiStore = Store{} @@ -43,18 +40,13 @@ var _ types.CacheMultiStore = Store{} func NewFromKVStore( store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, - listeners map[types.StoreKey][]types.WriteListener, ) Store { - if listeners == nil { - listeners = make(map[types.StoreKey][]types.WriteListener) - } cms := Store{ db: cachekv.NewStore(store), stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), keys: keys, traceWriter: traceWriter, traceContext: traceContext, - listeners: listeners, } for key, store := range stores { @@ -65,9 +57,6 @@ func NewFromKVStore( store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) } - if cms.ListeningEnabled(key) { - store = listenkv.NewStore(store.(types.KVStore), key, listeners[key]) - } cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) } @@ -78,9 +67,9 @@ func NewFromKVStore( // CacheWrapper objects. Each CacheWrapper store is a branched store. func NewStore( db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, - traceWriter io.Writer, traceContext types.TraceContext, listeners map[types.StoreKey][]types.WriteListener, + traceWriter io.Writer, traceContext types.TraceContext, ) Store { - return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext, listeners) + return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) } func newCacheMultiStoreFromCMS(cms Store) Store { @@ -89,8 +78,7 @@ func newCacheMultiStoreFromCMS(cms Store) Store { stores[k] = v } - // don't pass listeners to nested cache store. - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext, nil) + return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) } // SetTracer sets the tracer for the MultiStore that the underlying @@ -121,6 +109,7 @@ func (cms Store) TracingEnabled() bool { return cms.traceWriter != nil } +<<<<<<< HEAD // AddListeners adds listeners for a specific KVStore func (cms Store) AddListeners(key types.StoreKey, listeners []types.WriteListener) { if ls, ok := cms.listeners[key]; ok { @@ -136,6 +125,11 @@ func (cms Store) ListeningEnabled(key types.StoreKey) bool { return len(ls) != 0 } return false +======= +// LatestVersion returns the branch version of the store +func (cms Store) LatestVersion() int64 { + panic("cannot get latest version from branch cached multi-store") +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } // GetStoreType returns the type of the store. @@ -161,11 +155,6 @@ func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac return cms.CacheWrap() } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (cms Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - return cms.CacheWrap() -} - // Implements MultiStore. func (cms Store) CacheMultiStore() types.CacheMultiStore { return newCacheMultiStoreFromCMS(cms) diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go index 2f0ceb5df54a..e9ea4f847d14 100644 --- a/store/dbadapter/store.go +++ b/store/dbadapter/store.go @@ -6,7 +6,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/store/cachekv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -86,10 +85,5 @@ func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Ca return cachekv.NewStore(tracekv.NewStore(dsa, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (dsa Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(dsa, storeKey, listeners)) -} - // dbm.DB implements KVStore so we can CacheKVStore it. var _ types.KVStore = Store{} diff --git a/store/dbadapter/store_test.go b/store/dbadapter/store_test.go index 9f8ac71b25cf..658b9d1b999e 100644 --- a/store/dbadapter/store_test.go +++ b/store/dbadapter/store_test.go @@ -84,7 +84,4 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } diff --git a/store/gaskv/store.go b/store/gaskv/store.go index 845e59cf9363..85bf598d0626 100644 --- a/store/gaskv/store.go +++ b/store/gaskv/store.go @@ -92,11 +92,6 @@ func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac panic("cannot CacheWrapWithTrace a GasKVStore") } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (gs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - panic("cannot CacheWrapWithListeners a GasKVStore") -} - func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { var parent types.Iterator if ascending { diff --git a/store/gaskv/store_test.go b/store/gaskv/store_test.go index 2401a9805d94..701f44217921 100644 --- a/store/gaskv/store_test.go +++ b/store/gaskv/store_test.go @@ -25,7 +25,6 @@ func TestGasKVStoreBasic(t *testing.T) { require.Equal(t, types.StoreTypeDB, st.GetStoreType()) require.Panics(t, func() { st.CacheWrap() }) require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) }) - require.Panics(t, func() { st.CacheWrapWithListeners(nil, nil) }) require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") diff --git a/store/iavl/store.go b/store/iavl/store.go index 21b1e70069df..ac43234ea0d9 100644 --- a/store/iavl/store.go +++ b/store/iavl/store.go @@ -15,7 +15,11 @@ import ( pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachekv" +<<<<<<< HEAD "github.com/cosmos/cosmos-sdk/store/listenkv" +======= + pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types" +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/telemetry" @@ -191,11 +195,6 @@ func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Ca return cachekv.NewStore(tracekv.NewStore(st, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (st *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(st, storeKey, listeners)) -} - // Implements types.KVStore. func (st *Store) Set(key, value []byte) { types.AssertValidKey(key) diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index f1cd586e36a0..dabe1e37f630 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -655,7 +655,4 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } diff --git a/store/listenkv/store.go b/store/listenkv/store.go index dfb6dea46c2f..4595d0fe56d1 100644 --- a/store/listenkv/store.go +++ b/store/listenkv/store.go @@ -141,12 +141,6 @@ func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cach panic("cannot CacheWrapWithTrace a ListenKVStore") } -// CacheWrapWithListeners implements the KVStore interface. It panics as a -// Store cannot be cache wrapped. -func (s *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - panic("cannot CacheWrapWithListeners a ListenKVStore") -} - // onWrite writes a KVStore operation to all of the WriteListeners func (s *Store) onWrite(delete bool, key, value []byte) { for _, l := range s.listeners { diff --git a/store/listenkv/store_test.go b/store/listenkv/store_test.go index 8d0510ba49ce..44be4120427b 100644 --- a/store/listenkv/store_test.go +++ b/store/listenkv/store_test.go @@ -292,8 +292,3 @@ func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { store := newEmptyListenKVStore(nil) require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) } - -func TestListenKVStoreCacheWrapWithListeners(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithListeners(nil, nil) }) -} diff --git a/store/mem/mem_test.go b/store/mem/mem_test.go index a2fc6add8a3e..893a4d286fbd 100644 --- a/store/mem/mem_test.go +++ b/store/mem/mem_test.go @@ -33,9 +33,6 @@ func TestStore(t *testing.T) { cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := db.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } func TestCommit(t *testing.T) { diff --git a/store/mem/store.go b/store/mem/store.go index 06d7b63f5506..98d314154c98 100644 --- a/store/mem/store.go +++ b/store/mem/store.go @@ -8,7 +8,11 @@ import ( pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/dbadapter" +<<<<<<< HEAD "github.com/cosmos/cosmos-sdk/store/listenkv" +======= + pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types" +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -47,11 +51,6 @@ func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Cach return cachekv.NewStore(tracekv.NewStore(s, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners)) -} - // Commit performs a no-op as entries are persistent between commitments. func (s *Store) Commit() (id types.CommitID) { return } diff --git a/store/prefix/store.go b/store/prefix/store.go index 295278a0a853..4f9d5a75e087 100644 --- a/store/prefix/store.go +++ b/store/prefix/store.go @@ -6,7 +6,6 @@ import ( "io" "github.com/cosmos/cosmos-sdk/store/cachekv" - "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" "github.com/cosmos/cosmos-sdk/store/types" ) @@ -58,11 +57,6 @@ func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Cach return cachekv.NewStore(tracekv.NewStore(s, w, tc)) } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap { - return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners)) -} - // Implements KVStore func (s Store) Get(key []byte) []byte { res := s.parent.Get(s.key(key)) diff --git a/store/prefix/store_test.go b/store/prefix/store_test.go index 25e07cbb1ae3..8da075dc9031 100644 --- a/store/prefix/store_test.go +++ b/store/prefix/store_test.go @@ -438,7 +438,4 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners) } diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index b6a7eeab4ab1..63865479953a 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -244,7 +244,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { // If it was deleted, remove all data if upgrades.IsDeleted(key.Name()) { - if err := deleteKVStore(store.(types.KVStore)); err != nil { + if err := deleteKVStore(types.KVStore(store)); err != nil { return errors.Wrapf(err, "failed to delete store %s", key.Name()) } rs.removalMap[key] = true @@ -262,7 +262,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { } // move all data - if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil { + if err := moveKVStoreData(types.KVStore(oldStore), types.KVStore(store)); err != nil { return errors.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name()) } @@ -457,19 +457,20 @@ func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac return rs.CacheWrap() } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (rs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - return rs.CacheWrap() -} - // CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. // It implements the MultiStore interface. func (rs *Store) CacheMultiStore() types.CacheMultiStore { stores := make(map[types.StoreKey]types.CacheWrapper) for k, v := range rs.stores { - stores[k] = v + store := types.KVStore(v) + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(store, k, rs.listeners[k]) + } + stores[k] = store } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext(), rs.listeners) + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) } // CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it @@ -479,6 +480,7 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore { func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) { cachedStores := make(map[types.StoreKey]types.CacheWrapper) for key, store := range rs.stores { + var cacheStore types.KVStore switch store.GetStoreType() { case types.StoreTypeIAVL: // If the store is wrapped with an inter-block cache, we must first unwrap @@ -487,19 +489,25 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor // Attempt to lazy-load an already saved IAVL store version. If the // version does not exist or is pruned, an error should be returned. - iavlStore, err := store.(*iavl.Store).GetImmutable(version) + var err error + cacheStore, err = store.(*iavl.Store).GetImmutable(version) if err != nil { return nil, err } - - cachedStores[key] = iavlStore - default: - cachedStores[key] = store + cacheStore = store + } + + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(key) { + cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) } + + cachedStores[key] = cacheStore } - return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext(), rs.listeners), nil + return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil } // GetStore returns a mounted Store for a given StoreKey. If the StoreKey does @@ -528,7 +536,7 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { if s == nil { panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) } - store := s.(types.KVStore) + store := types.KVStore(s) if rs.TracingEnabled() { store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 237a3f24d353..26204bceb21b 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -741,9 +741,6 @@ func TestCacheWraps(t *testing.T) { cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil) require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace) - - cacheWrappedWithListeners := multi.CacheWrapWithListeners(nil, nil) - require.IsType(t, cachemulti.Store{}, cacheWrappedWithListeners) } func TestTraceConcurrency(t *testing.T) { diff --git a/store/streaming/constructor.go b/store/streaming/constructor.go index e576f84b83d1..849a220dc393 100644 --- a/store/streaming/constructor.go +++ b/store/streaming/constructor.go @@ -2,20 +2,32 @@ package streaming import ( "fmt" + "os" + "path" "strings" "sync" "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/codec" serverTypes "github.com/cosmos/cosmos-sdk/server/types" "github.com/cosmos/cosmos-sdk/store/streaming/file" "github.com/cosmos/cosmos-sdk/store/types" +<<<<<<< HEAD +======= + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) "github.com/spf13/cast" ) // ServiceConstructor is used to construct a streaming service +<<<<<<< HEAD type ServiceConstructor func(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) +======= +type ServiceConstructor func(serverTypes.AppOptions, []types.StoreKey, codec.BinaryCodec, log.Logger) (baseapp.StreamingService, error) +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) // ServiceType enum for specifying the type of StreamingService type ServiceType int @@ -26,7 +38,23 @@ const ( // add more in the future ) +<<<<<<< HEAD // ServiceTypeFromString returns the streaming.ServiceType corresponding to the provided name +======= +// Streaming option keys +const ( + OptStreamersFilePrefix = "streamers.file.prefix" + OptStreamersFileWriteDir = "streamers.file.write_dir" + OptStreamersFileOutputMetadata = "streamers.file.output-metadata" + OptStreamersFileStopNodeOnError = "streamers.file.stop-node-on-error" + OptStreamersFileFsync = "streamers.file.fsync" + + OptStoreStreamers = "store.streamers" +) + +// ServiceTypeFromString returns the streaming.ServiceType corresponding to the +// provided name. +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) func ServiceTypeFromString(name string) ServiceType { switch strings.ToLower(name) { case "file", "f": @@ -63,6 +91,7 @@ func NewServiceConstructor(name string) (ServiceConstructor, error) { return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) } +<<<<<<< HEAD // NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) { filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) @@ -73,6 +102,49 @@ func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, // LoadStreamingServices is a function for loading StreamingServices onto the BaseApp using the provided AppOptions, codec, and keys // It returns the WaitGroup and quit channel used to synchronize with the streaming services and any error that occurs during the setup func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions, appCodec codec.BinaryCodec, keys map[string]*types.KVStoreKey) ([]baseapp.StreamingService, *sync.WaitGroup, error) { +======= +// NewFileStreamingService is the streaming.ServiceConstructor function for +// creating a FileStreamingService. +func NewFileStreamingService( + opts serverTypes.AppOptions, + keys []types.StoreKey, + marshaller codec.BinaryCodec, + logger log.Logger, +) (baseapp.StreamingService, error) { + homePath := cast.ToString(opts.Get(flags.FlagHome)) + filePrefix := cast.ToString(opts.Get(OptStreamersFilePrefix)) + fileDir := cast.ToString(opts.Get(OptStreamersFileWriteDir)) + outputMetadata := cast.ToBool(opts.Get(OptStreamersFileOutputMetadata)) + stopNodeOnErr := cast.ToBool(opts.Get(OptStreamersFileStopNodeOnError)) + fsync := cast.ToBool(opts.Get(OptStreamersFileFsync)) + + // relative path is based on node home directory. + if !path.IsAbs(fileDir) { + fileDir = path.Join(homePath, fileDir) + } + + // try to create output directory if not exists. + if _, err := os.Stat(fileDir); os.IsNotExist(err) { + if err = os.MkdirAll(fileDir, os.ModePerm); err != nil { + return nil, err + } + } + + return file.NewStreamingService(fileDir, filePrefix, keys, marshaller, logger, outputMetadata, stopNodeOnErr, fsync) +} + +// LoadStreamingServices is a function for loading StreamingServices onto the +// BaseApp using the provided AppOptions, codec, and keys. It returns the +// WaitGroup and quit channel used to synchronize with the streaming services +// and any error that occurs during the setup. +func LoadStreamingServices( + bApp *baseapp.BaseApp, + appOpts serverTypes.AppOptions, + appCodec codec.BinaryCodec, + logger log.Logger, + keys map[string]*types.KVStoreKey, +) ([]baseapp.StreamingService, *sync.WaitGroup, error) { +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) // waitgroup and quit channel for optional shutdown coordination of the streaming service(s) wg := new(sync.WaitGroup) // configure state listening capabilities using AppOptions @@ -107,8 +179,15 @@ func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions } return nil, nil, err } +<<<<<<< HEAD // generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) +======= + + // Generate the streaming service using the constructor, appOptions, and the + // StoreKeys we want to expose. + streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec, logger) +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) if err != nil { // close any services we may have already spun up before hitting the error on this one for _, activeStreamer := range activeStreamers { diff --git a/store/streaming/constructor_test.go b/store/streaming/constructor_test.go index 1b0479cc7d51..06417968a24c 100644 --- a/store/streaming/constructor_test.go +++ b/store/streaming/constructor_test.go @@ -20,7 +20,13 @@ import ( type fakeOptions struct{} -func (f *fakeOptions) Get(string) interface{} { return nil } +func (f *fakeOptions) Get(key string) interface{} { + if key == "streamers.file.write_dir" { + return "data/file_streamer" + + } + return nil +} var ( mockOptions = new(fakeOptions) @@ -38,7 +44,7 @@ func TestStreamingServiceConstructor(t *testing.T) { var expectedType streaming.ServiceConstructor require.IsType(t, expectedType, constructor) - serv, err := constructor(mockOptions, mockKeys, testMarshaller) + serv, err := constructor(mockOptions, mockKeys, testMarshaller, log.NewNopLogger()) require.Nil(t, err) require.IsType(t, &file.StreamingService{}, serv) listeners := serv.Listeners() @@ -76,7 +82,7 @@ func TestLoadStreamingServices(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - activeStreamers, _, err := streaming.LoadStreamingServices(bApp, tc.appOpts, encCdc.Codec, keys) + activeStreamers, _, err := streaming.LoadStreamingServices(bApp, tc.appOpts, encCdc.Codec, log.NewNopLogger(), keys) require.NoError(t, err) require.Equal(t, tc.activeStreamersLen, len(activeStreamers)) }) @@ -93,6 +99,8 @@ func (ao streamingAppOptions) Get(o string) interface{} { return []string{"file"} case "streamers.file.keys": return ao.keys + case "streamers.file.write_dir": + return "data/file_streamer" default: return nil } diff --git a/store/streaming/file/README.md b/store/streaming/file/README.md index f5e0dab86a3f..0c34de7f3bea 100644 --- a/store/streaming/file/README.md +++ b/store/streaming/file/README.md @@ -25,42 +25,67 @@ We turn the service on by adding its name, "file", to `store.streamers`- the lis In `streamers.file` we include three configuration parameters for the file streaming service: -1. `streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service. -In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. +1. `streamers.file.keys` contains the list of `StoreKey` names for the KVStores to expose using this service. + In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. 2. `streamers.file.write_dir` contains the path to the directory to write the files to. 3. `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions -with other App `StreamingService` output files. + with other App `StreamingService` output files. +4. `streamers.file.output-metadata` specifies if output the metadata file, otherwise only data file is outputted. +5. `streamers.file.stop-node-on-error` specifies if propagate the error to consensus state machine, it's nesserary for data integrity when node restarts. +6. `streamers.file.fsync` specifies if call fsync after writing the files, it's nesserary for data integrity when system crash, but slows down the commit time. ### Encoding -For each pair of `BeginBlock` requests and responses, a file is created and named `block-{N}-begin`, where N is the block number. -At the head of this file the length-prefixed protobuf encoded `BeginBlock` request is written. -At the tail of this file the length-prefixed protobuf encoded `BeginBlock` response is written. -In between these two encoded messages, the state changes that occurred due to the `BeginBlock` request are written chronologically as -a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service -is configured to listen to. - -For each pair of `DeliverTx` requests and responses, a file is created and named `block-{N}-tx-{M}` where N is the block number and M -is the tx number in the block (i.e. 0, 1, 2...). -At the head of this file the length-prefixed protobuf encoded `DeliverTx` request is written. -At the tail of this file the length-prefixed protobuf encoded `DeliverTx` response is written. -In between these two encoded messages, the state changes that occurred due to the `DeliverTx` request are written chronologically as -a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service -is configured to listen to. - -For each pair of `EndBlock` requests and responses, a file is created and named `block-{N}-end`, where N is the block number. -At the head of this file the length-prefixed protobuf encoded `EndBlock` request is written. -At the tail of this file the length-prefixed protobuf encoded `EndBlock` response is written. -In between these two encoded messages, the state changes that occurred due to the `EndBlock` request are written chronologically as -a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service -is configured to listen to. +For each block, two files are created and names `block-{N}-meta` and `block-{N}-data`, where `N` is the block number. + +The meta file contains the protobuf encoded message `BlockMetadata` which contains the abci event requests and responses of the block: + +```protobuf +message BlockMetadata { + message DeliverTx { + tendermint.abci.RequestDeliverTx request = 1; + tendermint.abci.ResponseDeliverTx response = 2; + } + tendermint.abci.RequestBeginBlock request_begin_block = 1; + tendermint.abci.ResponseBeginBlock response_begin_block = 2; + repeated DeliverTx deliver_txs = 3; + tendermint.abci.RequestEndBlock request_end_block = 4; + tendermint.abci.ResponseEndBlock response_end_block = 5; + tendermint.abci.ResponseCommit response_commit = 6; +} +``` + +The data file contains a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores during the execution of block. + +Both meta and data files are prefixed with the length of the data content for consumer to detect completeness of the file, the length is encoded as 8 bytes with big endianness. + +The files are written at abci commit event, by default the error happens will be propagated to interuppted consensus state machine, but fsync is not called, it'll have good performance but have the risk of lossing data in face of rare event of system crash. ### Decoding -To decode the files written in the above format we read all the bytes from a given file into memory and segment them into proto -messages based on the length-prefixing of each message. Once segmented, it is known that the first message is the ABCI request, -the last message is the ABCI response, and that every message in between is a `StoreKVPair`. This enables us to decode each segment into -the appropriate message type. +The pseudo-code for decoding is like this: -The type of ABCI req/res, the block height, and the transaction index (where relevant) is known -from the file name, and the KVStore each `StoreKVPair` originates from is known since the `StoreKey` is included as a field in the proto message. +```python +def decode_meta_file(file): + bz = file.read(8) + if len(bz) < 8: + raise "incomplete file exception" + size = int.from_bytes(bz, 'big') + + if file.size != size + 8: + raise "incomplete file exception" + + return decode_protobuf_message(BlockMetadata, file) + +def decode_data_file(file): + bz = file.read(8) + if len(bz) < 8: + raise "incomplete file exception" + size = int.from_bytes(bz, 'big') + + if file.size != size + 8: + raise "incomplete file exception" + + while not file.eof(): + yield decode_length_prefixed_protobuf_message(StoreKVStore, file) +``` diff --git a/store/streaming/file/service.go b/store/streaming/file/service.go index b826c7734ae7..a4125ffef692 100644 --- a/store/streaming/file/service.go +++ b/store/streaming/file/service.go @@ -1,65 +1,64 @@ package file import ( +<<<<<<< HEAD "errors" +======= + "bytes" + "context" +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) "fmt" + "io" "os" "path" - "path/filepath" + "sort" "sync" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" +<<<<<<< HEAD +======= + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) ) var _ baseapp.StreamingService = &StreamingService{} // StreamingService is a concrete implementation of StreamingService that writes state changes out to files type StreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all the WriteListeners write their data out to - filePrefix string // optional prefix for each of the generated files - writeDir string // directory to write files into - codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel to synchronize closure -} + storeListeners []*types.MemoryListener // a series of KVStore listeners for each KVStore + filePrefix string // optional prefix for each of the generated files + writeDir string // directory to write files into + codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files + logger log.Logger -// IntermediateWriter is used so that we do not need to update the underlying io.Writer -// inside the StoreKVPairWriteListener everytime we begin writing to a new file -type IntermediateWriter struct { - outChan chan<- []byte -} - -// NewIntermediateWriter create an instance of an IntermediateWriter that sends to the provided channel -func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { - return &IntermediateWriter{ - outChan: outChan, - } -} - -// Write satisfies io.Writer -func (iw *IntermediateWriter) Write(b []byte) (int, error) { - iw.outChan <- b - return len(b), nil + currentBlockNumber int64 + blockMetadata types.BlockMetadata + // if write the metadata file, otherwise only data file is outputted. + outputMetadata bool + // if true, when commit failed it will panic and stop the consensus state machine to ensure the + // eventual consistency of the output, otherwise the error is ignored and have the risk of lossing data. + stopNodeOnErr bool + // if true, the file.Sync() is called to make sure the data is persisted onto disk, otherwise it risks lossing data when system crash. + fsync bool } // NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys -func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec) (*StreamingService, error) { - listenChan := make(chan []byte) - iw := NewIntermediateWriter(listenChan) - listener := types.NewStoreKVPairWriteListener(iw, c) - listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) +func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec, logger log.Logger, outputMetadata bool, stopNodeOnErr bool, fsync bool) (*StreamingService, error) { + // sort storeKeys for deterministic output + sort.SliceStable(storeKeys, func(i, j int) bool { + return storeKeys[i].Name() < storeKeys[j].Name() + }) + + listeners := make([]*types.MemoryListener, len(storeKeys)) // in this case, we are using the same listener for each Store - for _, key := range storeKeys { - listeners[key] = append(listeners[key], listener) + for i, key := range storeKeys { + listeners[i] = types.NewMemoryListener(key) } // check that the writeDir exists and is writable so that we can catch the error here at initialization if it is not // we don't open a dstFile until we receive our first ABCI message @@ -67,13 +66,14 @@ func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey return nil, err } return &StreamingService{ - listeners: listeners, - srcChan: listenChan, + storeListeners: listeners, filePrefix: filePrefix, writeDir: writeDir, codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), + logger: logger, + outputMetadata: outputMetadata, + stopNodeOnErr: stopNodeOnErr, + fsync: fsync, }, nil } @@ -81,12 +81,17 @@ func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey // It returns the StreamingService's underlying WriteListeners // Use for registering the underlying WriteListeners with the BaseApp func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener { - return fss.listeners + listeners := make(map[types.StoreKey][]types.WriteListener, len(fss.storeListeners)) + for _, listener := range fss.storeListeners { + listeners[listener.StoreKey()] = []types.WriteListener{listener} + } + return listeners } // ListenBeginBlock satisfies the baseapp.ABCIListener interface // It writes the received BeginBlock request and response and the resulting state changes // out to a file as described in the above the naming schema +<<<<<<< HEAD func (fss *StreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { // generate the new file dstFile, err := fss.openBeginBlockFile(req) @@ -133,11 +138,19 @@ func (fss *StreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) } return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) +======= +func (fss *StreamingService) ListenBeginBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) (rerr error) { + fss.blockMetadata.RequestBeginBlock = &req + fss.blockMetadata.ResponseBeginBlock = &res + fss.currentBlockNumber = req.Header.Height + return nil +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } // ListenDeliverTx satisfies the baseapp.ABCIListener interface // It writes the received DeliverTx request and response and the resulting state changes // out to a file as described in the above the naming schema +<<<<<<< HEAD func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { // generate the new file dstFile, err := fss.openDeliverTxFile() @@ -146,21 +159,35 @@ func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDe } // write req to file lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) +======= +func (fss *StreamingService) ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) (rerr error) { + fss.blockMetadata.DeliverTxs = append(fss.blockMetadata.DeliverTxs, &types.BlockMetadata_DeliverTx{ + Request: &req, + Response: &res, + }) + return nil +} + +// ListenEndBlock satisfies the baseapp.ABCIListener interface +// It writes the received EndBlock request and response and the resulting state changes +// out to a file as described in the above the naming schema +func (fss *StreamingService) ListenEndBlock(ctx context.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) (rerr error) { + fss.blockMetadata.RequestEndBlock = &req + fss.blockMetadata.ResponseEndBlock = &res + return nil +} + +// ListenEndBlock satisfies the baseapp.ABCIListener interface +func (fss *StreamingService) ListenCommit(ctx context.Context, res abci.ResponseCommit) error { + err := fss.doListenCommit(ctx, res) +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() + fss.logger.Error("Commit listening hook failed", "height", fss.currentBlockNumber, "err", err) + if fss.stopNodeOnErr { return err } } +<<<<<<< HEAD // reset cache fss.stateCache = nil fss.stateCacheLock.Unlock() @@ -174,17 +201,23 @@ func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDe } // close file return dstFile.Close() +======= + return nil +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } -func (fss *StreamingService) openDeliverTxFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) +func (fss *StreamingService) doListenCommit(ctx context.Context, res abci.ResponseCommit) (err error) { + fss.blockMetadata.ResponseCommit = &res + + // write to target files, the file size is written at the beginning, which can be used to detect completeness. + metaFileName := fmt.Sprintf("block-%d-meta", fss.currentBlockNumber) + dataFileName := fmt.Sprintf("block-%d-data", fss.currentBlockNumber) if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + metaFileName = fmt.Sprintf("%s-%s", fss.filePrefix, metaFileName) + dataFileName = fmt.Sprintf("%s-%s", fss.filePrefix, dataFileName) } - fss.currentTxIndex++ - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) -} +<<<<<<< HEAD // ListenEndBlock satisfies the baseapp.ABCIListener interface // It writes the received EndBlock request and response and the resulting state changes // out to a file as described in the above the naming schema @@ -208,62 +241,56 @@ func (fss *StreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEnd if _, err = dstFile.Write(stateChange); err != nil { fss.stateCache = nil fss.stateCacheLock.Unlock() +======= + if fss.outputMetadata { + bz, err := fss.codec.Marshal(&fss.blockMetadata) + if err != nil { + return err + } + if err := writeLengthPrefixedFile(path.Join(fss.writeDir, metaFileName), bz, fss.fsync); err != nil { +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) return err } } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { + + var buf bytes.Buffer + if err := fss.writeBlockData(&buf); err != nil { return err } +<<<<<<< HEAD if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { return err } // close file return dstFile.Close() +======= + return writeLengthPrefixedFile(path.Join(fss.writeDir, dataFileName), buf.Bytes(), fss.fsync) +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } -func (fss *StreamingService) openEndBlockFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) +func (fss *StreamingService) writeBlockData(writer io.Writer) error { + for _, listener := range fss.storeListeners { + cache := listener.PopStateCache() + for i := range cache { + bz, err := fss.codec.MarshalLengthPrefixed(&cache[i]) + if err != nil { + return err + } + if _, err = writer.Write(bz); err != nil { + return err + } + } } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600) + return nil } // Stream satisfies the baseapp.StreamingService interface -// It spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs -// and caches them in the order they were received -// returns an error if it is called twice func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { - if fss.quitChan != nil { - return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") - } - fss.quitChan = make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-fss.quitChan: - fss.quitChan = nil - return - case by := <-fss.srcChan: - fss.stateCacheLock.Lock() - fss.stateCache = append(fss.stateCache, by) - fss.stateCacheLock.Unlock() - } - } - }() return nil } // Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface func (fss *StreamingService) Close() error { - close(fss.quitChan) return nil } @@ -276,3 +303,32 @@ func isDirWriteable(dir string) error { } return os.Remove(f) } + +func writeLengthPrefixedFile(path string, data []byte, fsync bool) (err error) { + var f *os.File + f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600) + if err != nil { + return sdkerrors.Wrapf(err, "open file failed: %s", path) + } + defer func() { + // avoid overriding the real error with file close error + if err1 := f.Close(); err1 != nil && err == nil { + err = sdkerrors.Wrapf(err, "close file failed: %s", path) + } + }() + _, err = f.Write(sdk.Uint64ToBigEndian(uint64(len(data)))) + if err != nil { + return sdkerrors.Wrapf(err, "write length prefix failed: %s", path) + } + _, err = f.Write(data) + if err != nil { + return sdkerrors.Wrapf(err, "write block data failed: %s", path) + } + if fsync { + err = f.Sync() + if err != nil { + return sdkerrors.Wrapf(err, "fsync failed: %s", path) + } + } + return +} diff --git a/store/streaming/file/service_test.go b/store/streaming/file/service_test.go index 52f477ed6a00..7ee15d3fbb16 100644 --- a/store/streaming/file/service_test.go +++ b/store/streaming/file/service_test.go @@ -2,12 +2,21 @@ package file import ( "encoding/binary" + "errors" "fmt" "os" "path/filepath" "sync" "testing" +<<<<<<< HEAD +======= + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) "github.com/cosmos/cosmos-sdk/codec" codecTypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/store/types" @@ -56,6 +65,10 @@ var ( ConsensusParamUpdates: &abci.ConsensusParams{}, ValidatorUpdates: []abci.ValidatorUpdate{}, } + testCommitRes = abci.ResponseCommit{ + Data: []byte{1}, + RetainHeight: 0, + } mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} testDeliverTxReq1 = abci.RequestDeliverTx{ Tx: mockTxBytes1, @@ -104,25 +117,6 @@ var ( mockValue3 = []byte{5, 4, 3} ) -func TestIntermediateWriter(t *testing.T) { - outChan := make(chan []byte, 0) - iw := NewIntermediateWriter(outChan) - require.IsType(t, &IntermediateWriter{}, iw) - testBytes := []byte{1, 2, 3, 4, 5} - var length int - var err error - waitChan := make(chan struct{}, 0) - go func() { - length, err = iw.Write(testBytes) - waitChan <- struct{}{} - }() - receivedBytes := <-outChan - <-waitChan - require.Equal(t, len(testBytes), length) - require.Equal(t, testBytes, receivedBytes) - require.Nil(t, err) -} - func TestFileStreamingService(t *testing.T) { if os.Getenv("CI") != "" { t.Skip("Skipping TestFileStreamingService in CI environment") @@ -132,29 +126,23 @@ func TestFileStreamingService(t *testing.T) { defer os.RemoveAll(testDir) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller) + testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller, log.NewNopLogger(), true, false, false) require.Nil(t, err) require.IsType(t, &StreamingService{}, testStreamingService) require.Equal(t, testPrefix, testStreamingService.filePrefix) require.Equal(t, testDir, testStreamingService.writeDir) require.Equal(t, testMarshaller, testStreamingService.codec) - testListener1 = testStreamingService.listeners[mockStoreKey1][0] - testListener2 = testStreamingService.listeners[mockStoreKey2][0] + testListener1 = testStreamingService.storeListeners[0] + testListener2 = testStreamingService.storeListeners[1] wg := new(sync.WaitGroup) testStreamingService.Stream(wg) - testListenBeginBlock(t) - testListenDeliverTx1(t) - testListenDeliverTx2(t) - testListenEndBlock(t) + testListenBlock(t) testStreamingService.Close() wg.Wait() } -func testListenBeginBlock(t *testing.T) { - expectedBeginBlockReqBytes, err := testMarshaller.Marshal(&testBeginBlockReq) - require.Nil(t, err) - expectedBeginBlockResBytes, err := testMarshaller.Marshal(&testBeginBlockRes) - require.Nil(t, err) +func testListenBlock(t *testing.T) { + var expectKVPairsStore1, expectKVPairsStore2 [][]byte // write state changes testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) @@ -183,162 +171,102 @@ func testListenBeginBlock(t *testing.T) { Delete: false, }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1, expectedKVPair3) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair2) // send the ABCI messages err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) require.Nil(t, err) - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-begin", testPrefix, testBeginBlockReq.GetHeader().Height) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedBeginBlockReqBytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedBeginBlockResBytes, segments[4]) -} - -func testListenDeliverTx1(t *testing.T) { - expectedDeliverTxReq1Bytes, err := testMarshaller.Marshal(&testDeliverTxReq1) - require.Nil(t, err) - expectedDeliverTxRes1Bytes, err := testMarshaller.Marshal(&testDeliverTxRes1) - require.Nil(t, err) - // write state changes testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey1, Value: mockValue1, Delete: false, }) require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey2, Value: mockValue2, Delete: false, }) require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey3, Value: mockValue3, Delete: false, }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair2, expectedKVPair3) // send the ABCI messages err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) require.Nil(t, err) - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 0) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedDeliverTxReq1Bytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedDeliverTxRes1Bytes, segments[4]) -} - -func testListenDeliverTx2(t *testing.T) { - expectedDeliverTxReq2Bytes, err := testMarshaller.Marshal(&testDeliverTxReq2) - require.Nil(t, err) - expectedDeliverTxRes2Bytes, err := testMarshaller.Marshal(&testDeliverTxRes2) - require.Nil(t, err) - // write state changes - testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + testListener2.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener1.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey1, Value: mockValue1, Delete: false, }) require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey2, Value: mockValue2, Delete: false, }) require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey3, Value: mockValue3, Delete: false, }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair2) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair1, expectedKVPair3) // send the ABCI messages err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) require.Nil(t, err) - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 1) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedDeliverTxReq2Bytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedDeliverTxRes2Bytes, segments[4]) -} - -func testListenEndBlock(t *testing.T) { - expectedEndBlockReqBytes, err := testMarshaller.Marshal(&testEndBlockReq) - require.Nil(t, err) - expectedEndBlockResBytes, err := testMarshaller.Marshal(&testEndBlockRes) - require.Nil(t, err) - // write state changes testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + testListener1.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey1, Value: mockValue1, Delete: false, }) require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey1.Name(), Key: mockKey2, Value: mockValue2, Delete: false, }) require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{ StoreKey: mockStoreKey2.Name(), Key: mockKey3, Value: mockValue3, @@ -346,29 +274,57 @@ func testListenEndBlock(t *testing.T) { }) require.Nil(t, err) + expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1, expectedKVPair2) + expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair3) + // send the ABCI messages err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) require.Nil(t, err) + err = testStreamingService.ListenCommit(emptyContext, testCommitRes) + require.Nil(t, err) + // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-end", testPrefix, testEndBlockReq.Height) - fileBytes, err := readInFile(fileName) + metaFileName := fmt.Sprintf("%s-block-%d-meta", testPrefix, testBeginBlockReq.GetHeader().Height) + dataFileName := fmt.Sprintf("%s-block-%d-data", testPrefix, testBeginBlockReq.GetHeader().Height) + metaFileBytes, err := readInFile(metaFileName) + dataFileBytes, err := readInFile(dataFileName) + require.Nil(t, err) + + metadata := types.BlockMetadata{ + RequestBeginBlock: &testBeginBlockReq, + ResponseBeginBlock: &testBeginBlockRes, + RequestEndBlock: &testEndBlockReq, + ResponseEndBlock: &testEndBlockRes, + ResponseCommit: &testCommitRes, + DeliverTxs: []*types.BlockMetadata_DeliverTx{ + {Request: &testDeliverTxReq1, Response: &testDeliverTxRes1}, + {Request: &testDeliverTxReq2, Response: &testDeliverTxRes2}, + }, + } + expectedMetadataBytes, err := testMarshaller.Marshal(&metadata) require.Nil(t, err) + require.Equal(t, expectedMetadataBytes, metaFileBytes) // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) + segments, err := segmentBytes(dataFileBytes) require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedEndBlockReqBytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedEndBlockResBytes, segments[4]) + require.Equal(t, len(expectKVPairsStore1)+len(expectKVPairsStore2), len(segments)) + require.Equal(t, expectKVPairsStore1, segments[:len(expectKVPairsStore1)]) + require.Equal(t, expectKVPairsStore2, segments[len(expectKVPairsStore1):]) } func readInFile(name string) ([]byte, error) { path := filepath.Join(testDir, name) - return os.ReadFile(path) + bz, err := os.ReadFile(path) + if err != nil { + return nil, err + } + size := sdk.BigEndianToUint64(bz[:8]) + if len(bz) != int(size)+8 { + return nil, errors.New("incomplete file ") + } + return bz[8:], nil } // segmentBytes returns all of the protobuf messages contained in the byte array as an array of byte arrays diff --git a/store/tracekv/store.go b/store/tracekv/store.go index 91f3c657682c..caf871552f56 100644 --- a/store/tracekv/store.go +++ b/store/tracekv/store.go @@ -173,11 +173,6 @@ func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Ca panic("cannot CacheWrapWithTrace a TraceKVStore") } -// CacheWrapWithListeners implements the CacheWrapper interface. -func (tkv *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap { - panic("cannot CacheWrapWithListeners a TraceKVStore") -} - // writeOperation writes a KVStore operation to the underlying io.Writer as // JSON-encoded data where the key/value pair is base64 encoded. func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) { diff --git a/store/tracekv/store_test.go b/store/tracekv/store_test.go index 1b81e89bafd2..b7a3b13ea964 100644 --- a/store/tracekv/store_test.go +++ b/store/tracekv/store_test.go @@ -290,8 +290,3 @@ func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { store := newEmptyTraceKVStore(nil) require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) } - -func TestTraceKVStoreCacheWrapWithListeners(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithListeners(nil, nil) }) -} diff --git a/store/types/listening.go b/store/types/listening.go index 02cde4c715c7..5f21689449fd 100644 --- a/store/types/listening.go +++ b/store/types/listening.go @@ -45,3 +45,37 @@ func (wl *StoreKVPairWriteListener) OnWrite(storeKey StoreKey, key []byte, value } return nil } + +// MemoryListener listens to the state writes and accumulate the records in memory. +type MemoryListener struct { + key StoreKey + stateCache []StoreKVPair +} + +// NewMemoryListener creates a listener that accumulate the state writes in memory. +func NewMemoryListener(key StoreKey) *MemoryListener { + return &MemoryListener{key: key} +} + +// OnWrite implements WriteListener interface +func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error { + fl.stateCache = append(fl.stateCache, StoreKVPair{ + StoreKey: storeKey.Name(), + Delete: delete, + Key: key, + Value: value, + }) + return nil +} + +// PopStateCache returns the current state caches and set to nil +func (fl *MemoryListener) PopStateCache() []StoreKVPair { + res := fl.stateCache + fl.stateCache = nil + return res +} + +// StoreKey returns the storeKey it listens to +func (fl *MemoryListener) StoreKey() StoreKey { + return fl.key +} diff --git a/store/types/listening.pb.go b/store/types/listening.pb.go index 47d5a23a8367..06db37a1b030 100644 --- a/store/types/listening.pb.go +++ b/store/types/listening.pb.go @@ -5,7 +5,12 @@ package types import ( fmt "fmt" +<<<<<<< HEAD proto "github.com/gogo/protobuf/proto" +======= + proto "github.com/cosmos/gogoproto/proto" + types "github.com/tendermint/tendermint/abci/types" +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) io "io" math "math" math_bits "math/bits" @@ -95,8 +100,149 @@ func (m *StoreKVPair) GetValue() []byte { return nil } +// BlockMetadata contains all the abci event data of a block +// the file streamer dump them into files together with the state changes. +type BlockMetadata struct { + RequestBeginBlock *types.RequestBeginBlock `protobuf:"bytes,1,opt,name=request_begin_block,json=requestBeginBlock,proto3" json:"request_begin_block,omitempty"` + ResponseBeginBlock *types.ResponseBeginBlock `protobuf:"bytes,2,opt,name=response_begin_block,json=responseBeginBlock,proto3" json:"response_begin_block,omitempty"` + DeliverTxs []*BlockMetadata_DeliverTx `protobuf:"bytes,3,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` + RequestEndBlock *types.RequestEndBlock `protobuf:"bytes,4,opt,name=request_end_block,json=requestEndBlock,proto3" json:"request_end_block,omitempty"` + ResponseEndBlock *types.ResponseEndBlock `protobuf:"bytes,5,opt,name=response_end_block,json=responseEndBlock,proto3" json:"response_end_block,omitempty"` + ResponseCommit *types.ResponseCommit `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"` +} + +func (m *BlockMetadata) Reset() { *m = BlockMetadata{} } +func (m *BlockMetadata) String() string { return proto.CompactTextString(m) } +func (*BlockMetadata) ProtoMessage() {} +func (*BlockMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_a5d350879fe4fecd, []int{1} +} +func (m *BlockMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadata.Merge(m, src) +} +func (m *BlockMetadata) XXX_Size() int { + return m.Size() +} +func (m *BlockMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo + +func (m *BlockMetadata) GetRequestBeginBlock() *types.RequestBeginBlock { + if m != nil { + return m.RequestBeginBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseBeginBlock() *types.ResponseBeginBlock { + if m != nil { + return m.ResponseBeginBlock + } + return nil +} + +func (m *BlockMetadata) GetDeliverTxs() []*BlockMetadata_DeliverTx { + if m != nil { + return m.DeliverTxs + } + return nil +} + +func (m *BlockMetadata) GetRequestEndBlock() *types.RequestEndBlock { + if m != nil { + return m.RequestEndBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseEndBlock() *types.ResponseEndBlock { + if m != nil { + return m.ResponseEndBlock + } + return nil +} + +func (m *BlockMetadata) GetResponseCommit() *types.ResponseCommit { + if m != nil { + return m.ResponseCommit + } + return nil +} + +// DeliverTx encapulate deliver tx request and response. +type BlockMetadata_DeliverTx struct { + Request *types.RequestDeliverTx `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + Response *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"` +} + +func (m *BlockMetadata_DeliverTx) Reset() { *m = BlockMetadata_DeliverTx{} } +func (m *BlockMetadata_DeliverTx) String() string { return proto.CompactTextString(m) } +func (*BlockMetadata_DeliverTx) ProtoMessage() {} +func (*BlockMetadata_DeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_a5d350879fe4fecd, []int{1, 0} +} +func (m *BlockMetadata_DeliverTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockMetadata_DeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockMetadata_DeliverTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BlockMetadata_DeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadata_DeliverTx.Merge(m, src) +} +func (m *BlockMetadata_DeliverTx) XXX_Size() int { + return m.Size() +} +func (m *BlockMetadata_DeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_BlockMetadata_DeliverTx.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockMetadata_DeliverTx proto.InternalMessageInfo + +func (m *BlockMetadata_DeliverTx) GetRequest() *types.RequestDeliverTx { + if m != nil { + return m.Request + } + return nil +} + +func (m *BlockMetadata_DeliverTx) GetResponse() *types.ResponseDeliverTx { + if m != nil { + return m.Response + } + return nil +} + func init() { proto.RegisterType((*StoreKVPair)(nil), "cosmos.base.store.v1beta1.StoreKVPair") + proto.RegisterType((*BlockMetadata)(nil), "cosmos.base.store.v1beta1.BlockMetadata") + proto.RegisterType((*BlockMetadata_DeliverTx)(nil), "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx") } func init() { @@ -104,21 +250,37 @@ func init() { } var fileDescriptor_a5d350879fe4fecd = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce, - 0xcd, 0x2f, 0xd6, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0x33, - 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0xc9, 0x2c, 0x2e, 0x49, 0xcd, 0xcb, 0xcc, 0x4b, 0xd7, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x84, 0x28, 0xd5, 0x03, 0x29, 0xd5, 0x03, 0x2b, 0xd5, - 0x83, 0x2a, 0x55, 0xca, 0xe2, 0xe2, 0x0e, 0x06, 0x09, 0x78, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, - 0x49, 0x73, 0x71, 0x82, 0xe5, 0xe3, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, - 0x38, 0xc0, 0x02, 0xde, 0xa9, 0x95, 0x42, 0x62, 0x5c, 0x6c, 0x29, 0xa9, 0x39, 0xa9, 0x25, 0xa9, - 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x1c, 0x41, 0x50, 0x9e, 0x90, 0x00, 0x17, 0x33, 0x48, 0x39, 0xb3, - 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, - 0xc1, 0x02, 0x16, 0x83, 0x70, 0x9c, 0x9c, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, - 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, - 0x21, 0x4a, 0x23, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0xea, 0x2d, - 0x08, 0xa5, 0x5b, 0x9c, 0x92, 0x0d, 0xf5, 0x5c, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, - 0x47, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0xe0, 0xb3, 0x51, 0xfe, 0x00, 0x00, 0x00, + // 473 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x4f, 0x6f, 0xd3, 0x40, + 0x10, 0xc5, 0xe3, 0xa6, 0x09, 0xc9, 0x04, 0x68, 0x59, 0x2a, 0x64, 0x5a, 0xc9, 0xb8, 0xe1, 0x62, + 0x0e, 0xac, 0xd5, 0x70, 0x44, 0xe2, 0x10, 0x40, 0x42, 0x2a, 0x08, 0xe4, 0x02, 0x07, 0x2e, 0x96, + 0xff, 0x8c, 0xc2, 0x12, 0xdb, 0x1b, 0x76, 0x37, 0x51, 0x73, 0xe6, 0xc2, 0x91, 0x8f, 0xc5, 0xb1, + 0x47, 0x8e, 0x28, 0xf9, 0x22, 0xc8, 0x6b, 0xc7, 0xa9, 0x43, 0x7d, 0xca, 0xee, 0xe4, 0xbd, 0x9f, + 0xdf, 0xcc, 0x6a, 0xe0, 0x49, 0xc4, 0x65, 0xca, 0xa5, 0x1b, 0x06, 0x12, 0x5d, 0xa9, 0xb8, 0x40, + 0x77, 0x71, 0x16, 0xa2, 0x0a, 0xce, 0xdc, 0x84, 0x49, 0x85, 0x19, 0xcb, 0x26, 0x74, 0x26, 0xb8, + 0xe2, 0xe4, 0x61, 0x21, 0xa5, 0xb9, 0x94, 0x6a, 0x29, 0x2d, 0xa5, 0xc7, 0x27, 0x0a, 0xb3, 0x18, + 0x45, 0xca, 0x32, 0xe5, 0x06, 0x61, 0xc4, 0x5c, 0xb5, 0x9c, 0xa1, 0x2c, 0x7c, 0xc3, 0x6f, 0x30, + 0xb8, 0xc8, 0xd5, 0xe7, 0x9f, 0x3f, 0x04, 0x4c, 0x90, 0x13, 0xe8, 0x6b, 0xb3, 0x3f, 0xc5, 0xa5, + 0x69, 0xd8, 0x86, 0xd3, 0xf7, 0x7a, 0xba, 0x70, 0x8e, 0x4b, 0xf2, 0x00, 0xba, 0x31, 0x26, 0xa8, + 0xd0, 0xdc, 0xb3, 0x0d, 0xa7, 0xe7, 0x95, 0x37, 0x72, 0x08, 0xed, 0x5c, 0xde, 0xb6, 0x0d, 0xe7, + 0xb6, 0x97, 0x1f, 0xc9, 0x11, 0x74, 0x16, 0x41, 0x32, 0x47, 0x73, 0x5f, 0xd7, 0x8a, 0xcb, 0xf0, + 0x47, 0x07, 0xee, 0x8c, 0x13, 0x1e, 0x4d, 0xdf, 0xa1, 0x0a, 0xe2, 0x40, 0x05, 0xc4, 0x83, 0xfb, + 0x02, 0xbf, 0xcf, 0x51, 0x2a, 0x3f, 0xc4, 0x09, 0xcb, 0xfc, 0x30, 0xff, 0x5b, 0x7f, 0x78, 0x30, + 0x1a, 0xd2, 0x6d, 0x70, 0x9a, 0x07, 0xa7, 0x5e, 0xa1, 0x1d, 0xe7, 0x52, 0x0d, 0xf2, 0xee, 0x89, + 0xdd, 0x12, 0xf9, 0x04, 0x47, 0x02, 0xe5, 0x8c, 0x67, 0x12, 0x6b, 0xd0, 0x3d, 0x0d, 0x7d, 0x7c, + 0x03, 0xb4, 0x10, 0x5f, 0xa3, 0x12, 0xf1, 0x5f, 0x8d, 0x5c, 0xc0, 0x20, 0xc6, 0x84, 0x2d, 0x50, + 0xf8, 0xea, 0x52, 0x9a, 0x6d, 0xbb, 0xed, 0x0c, 0x46, 0x23, 0xda, 0x38, 0x76, 0x5a, 0xeb, 0x94, + 0xbe, 0x2a, 0xbc, 0x1f, 0x2f, 0x3d, 0x88, 0x37, 0x47, 0x49, 0xde, 0xc2, 0xa6, 0x01, 0x1f, 0xb3, + 0xb8, 0x0c, 0xba, 0xaf, 0x83, 0xda, 0x4d, 0xdd, 0xbf, 0xce, 0xe2, 0x22, 0xe5, 0x81, 0xa8, 0x17, + 0xc8, 0x7b, 0xa8, 0x82, 0x5f, 0xc3, 0x75, 0x34, 0xee, 0xb4, 0xb1, 0xef, 0x8a, 0x77, 0x28, 0x76, + 0x2a, 0xe4, 0x0d, 0x1c, 0x54, 0xc0, 0x88, 0xa7, 0x29, 0x53, 0x66, 0x57, 0xd3, 0x1e, 0x35, 0xd2, + 0x5e, 0x6a, 0x99, 0x77, 0x57, 0xd4, 0xee, 0xc7, 0x3f, 0x0d, 0xe8, 0x57, 0x23, 0x20, 0xcf, 0xe1, + 0x56, 0x99, 0xbd, 0x7c, 0xea, 0xd3, 0xa6, 0x66, 0xb7, 0x63, 0xdb, 0x38, 0xc8, 0x0b, 0xe8, 0x6d, + 0xe0, 0xe5, 0x9b, 0x0e, 0x1b, 0xd3, 0x6c, 0xed, 0x95, 0x67, 0x3c, 0xfe, 0xbd, 0xb2, 0x8c, 0xab, + 0x95, 0x65, 0xfc, 0x5d, 0x59, 0xc6, 0xaf, 0xb5, 0xd5, 0xba, 0x5a, 0x5b, 0xad, 0x3f, 0x6b, 0xab, + 0xf5, 0xc5, 0x99, 0x30, 0xf5, 0x75, 0x1e, 0xd2, 0x88, 0xa7, 0x6e, 0xb9, 0x79, 0xc5, 0xcf, 0x53, + 0x19, 0x4f, 0xcb, 0xfd, 0xd3, 0xbb, 0x13, 0x76, 0xf5, 0xf2, 0x3c, 0xfb, 0x17, 0x00, 0x00, 0xff, + 0xff, 0x69, 0x5c, 0x8f, 0x23, 0xa1, 0x03, 0x00, 0x00, } func (m *StoreKVPair) Marshal() (dAtA []byte, err error) { @@ -175,6 +337,150 @@ func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *BlockMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResponseCommit != nil { + { + size, err := m.ResponseCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ResponseEndBlock != nil { + { + size, err := m.ResponseEndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.RequestEndBlock != nil { + { + size, err := m.RequestEndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.DeliverTxs) > 0 { + for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ResponseBeginBlock != nil { + { + size, err := m.ResponseBeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.RequestBeginBlock != nil { + { + size, err := m.RequestBeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BlockMetadata_DeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockMetadata_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BlockMetadata_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Response != nil { + { + size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Request != nil { + { + size, err := m.Request.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintListening(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintListening(dAtA []byte, offset int, v uint64) int { offset -= sovListening(v) base := offset @@ -210,6 +516,58 @@ func (m *StoreKVPair) Size() (n int) { return n } +func (m *BlockMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequestBeginBlock != nil { + l = m.RequestBeginBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseBeginBlock != nil { + l = m.ResponseBeginBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if len(m.DeliverTxs) > 0 { + for _, e := range m.DeliverTxs { + l = e.Size() + n += 1 + l + sovListening(uint64(l)) + } + } + if m.RequestEndBlock != nil { + l = m.RequestEndBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseEndBlock != nil { + l = m.ResponseEndBlock.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.ResponseCommit != nil { + l = m.ResponseCommit.Size() + n += 1 + l + sovListening(uint64(l)) + } + return n +} + +func (m *BlockMetadata_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = m.Request.Size() + n += 1 + l + sovListening(uint64(l)) + } + if m.Response != nil { + l = m.Response.Size() + n += 1 + l + sovListening(uint64(l)) + } + return n +} + func sovListening(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -386,6 +744,392 @@ func (m *StoreKVPair) Unmarshal(dAtA []byte) error { } return nil } +func (m *BlockMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestBeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestBeginBlock == nil { + m.RequestBeginBlock = &types.RequestBeginBlock{} + } + if err := m.RequestBeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseBeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseBeginBlock == nil { + m.ResponseBeginBlock = &types.ResponseBeginBlock{} + } + if err := m.ResponseBeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeliverTxs = append(m.DeliverTxs, &BlockMetadata_DeliverTx{}) + if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestEndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestEndBlock == nil { + m.RequestEndBlock = &types.RequestEndBlock{} + } + if err := m.RequestEndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseEndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseEndBlock == nil { + m.ResponseEndBlock = &types.ResponseEndBlock{} + } + if err := m.ResponseEndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseCommit == nil { + m.ResponseCommit = &types.ResponseCommit{} + } + if err := m.ResponseCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipListening(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthListening + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockMetadata_DeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &types.RequestDeliverTx{} + } + if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowListening + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthListening + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthListening + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Response == nil { + m.Response = &types.ResponseDeliverTx{} + } + if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipListening(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthListening + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipListening(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/store/types/store.go b/store/types/store.go index e3c0f0822ad2..5bf0c0c07f6f 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -128,12 +128,17 @@ type MultiStore interface { // tracing operations. The modified MultiStore is returned. SetTracingContext(TraceContext) MultiStore +<<<<<<< HEAD // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey ListeningEnabled(key StoreKey) bool // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey // It appends the listeners to a current set, if one already exists AddListeners(key StoreKey, listeners []WriteListener) +======= + // LatestVersion returns the latest version in the store + LatestVersion() int64 +>>>>>>> 1f91ee2ee (fix: state listener observe writes at wrong time (#13516)) } // From MultiStore.CacheMultiStore().... @@ -194,6 +199,13 @@ type CommitMultiStore interface { // RollbackToVersion rollback the db to specific version(height). RollbackToVersion(version int64) error + + // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey + ListeningEnabled(key StoreKey) bool + + // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey + // It appends the listeners to a current set, if one already exists + AddListeners(key StoreKey, listeners []WriteListener) } //---------subsp------------------------------- @@ -270,9 +282,6 @@ type CacheWrap interface { // CacheWrapWithTrace recursively wraps again with tracing enabled. CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap - - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey StoreKey, listeners []WriteListener) CacheWrap } type CacheWrapper interface { @@ -281,9 +290,6 @@ type CacheWrapper interface { // CacheWrapWithTrace branches a store with tracing enabled. CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap - - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey StoreKey, listeners []WriteListener) CacheWrap } func (cid CommitID) IsZero() bool {